summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/baremetal-api-v1-indicators.inc154
-rw-r--r--api-ref/source/baremetal-api-v1-nodes.inc4
-rw-r--r--api-ref/source/conf.py8
-rw-r--r--api-ref/source/index.rst1
-rw-r--r--api-ref/source/parameters.yaml65
-rw-r--r--api-ref/source/samples/node-create-response.json1
-rw-r--r--api-ref/source/samples/node-indicators-component-list-response.json45
-rw-r--r--api-ref/source/samples/node-indicators-get-state-response.json3
-rw-r--r--api-ref/source/samples/node-indicators-list-response.json34
-rw-r--r--api-ref/source/samples/node-indicators-set-state.json3
-rw-r--r--api-ref/source/samples/node-show-response.json1
-rw-r--r--api-ref/source/samples/node-update-driver-info-response.json1
-rw-r--r--api-ref/source/samples/nodes-list-details-response.json2
-rw-r--r--babel.cfg2
-rw-r--r--bindep.txt4
-rw-r--r--devstack/common_settings26
-rw-r--r--devstack/files/debs/ironic32
-rw-r--r--devstack/lib/ironic327
-rw-r--r--devstack/plugin.sh1
-rwxr-xr-xdevstack/tools/ironic/scripts/configure-vm.py2
-rw-r--r--devstack/tools/ironic/templates/vm.xml9
-rw-r--r--doc/requirements.txt8
-rw-r--r--doc/source/_exts/automated_steps.py12
-rw-r--r--doc/source/admin/agent-power.rst76
-rw-r--r--doc/source/admin/agent-token.rst2
-rw-r--r--doc/source/admin/boot-from-volume.rst2
-rw-r--r--doc/source/admin/drivers.rst35
-rw-r--r--doc/source/admin/drivers/ibmc.rst235
-rw-r--r--doc/source/admin/drivers/idrac.rst43
-rw-r--r--doc/source/admin/drivers/ipmitool.rst25
-rw-r--r--doc/source/admin/drivers/redfish.rst23
-rw-r--r--doc/source/admin/fast-track.rst50
-rw-r--r--doc/source/admin/index.rst21
-rw-r--r--doc/source/admin/inspection.rst4
-rw-r--r--doc/source/admin/node-deployment.rst52
-rw-r--r--doc/source/admin/raid.rst13
-rw-r--r--doc/source/admin/report.txt1
-rw-r--r--doc/source/admin/troubleshooting.rst303
-rw-r--r--doc/source/conf.py11
-rw-r--r--doc/source/contributor/bugs.rst128
-rw-r--r--doc/source/contributor/contributing.rst4
-rw-r--r--doc/source/contributor/index.rst1
-rw-r--r--doc/source/contributor/jobs-description.rst2
-rw-r--r--doc/source/contributor/releasing.rst90
-rw-r--r--doc/source/contributor/webapi-version-history.rst16
-rw-r--r--doc/source/index.rst2
-rw-r--r--doc/source/install/configure-pxe.rst59
-rw-r--r--doc/source/install/enrollment.rst7
-rw-r--r--doc/source/install/standalone.rst178
-rw-r--r--driver-requirements.txt4
-rw-r--r--ironic/api/app.py17
-rw-r--r--ironic/api/args.py381
-rw-r--r--ironic/api/controllers/link.py39
-rw-r--r--ironic/api/controllers/root.py73
-rw-r--r--ironic/api/controllers/v1/__init__.py364
-rw-r--r--ironic/api/controllers/v1/allocation.py11
-rw-r--r--ironic/api/controllers/v1/bios.py12
-rw-r--r--ironic/api/controllers/v1/chassis.py39
-rw-r--r--ironic/api/controllers/v1/collection.py54
-rw-r--r--ironic/api/controllers/v1/conductor.py12
-rw-r--r--ironic/api/controllers/v1/deploy_template.py15
-rw-r--r--ironic/api/controllers/v1/driver.py32
-rw-r--r--ironic/api/controllers/v1/network-data-schema.json580
-rw-r--r--ironic/api/controllers/v1/node.py120
-rw-r--r--ironic/api/controllers/v1/port.py62
-rw-r--r--ironic/api/controllers/v1/portgroup.py25
-rw-r--r--ironic/api/controllers/v1/ramdisk.py1
-rw-r--r--ironic/api/controllers/v1/state.py3
-rw-r--r--ironic/api/controllers/v1/utils.py14
-rw-r--r--ironic/api/controllers/v1/versions.py6
-rw-r--r--ironic/api/controllers/v1/volume.py25
-rw-r--r--ironic/api/controllers/v1/volume_connector.py19
-rw-r--r--ironic/api/controllers/v1/volume_target.py19
-rw-r--r--ironic/api/controllers/version.py57
-rw-r--r--ironic/api/expose.py208
-rw-r--r--ironic/api/functions.py182
-rw-r--r--ironic/api/hooks.py2
-rw-r--r--ironic/api/method.py95
-rw-r--r--ironic/api/middleware/__init__.py6
-rw-r--r--ironic/api/middleware/auth_public_routes.py (renamed from ironic/api/middleware/auth_token.py)13
-rw-r--r--ironic/api/types.py728
-rw-r--r--ironic/cmd/__init__.py11
-rw-r--r--ironic/common/cinder.py8
-rw-r--r--ironic/common/exception.py94
-rw-r--r--ironic/common/hash_ring.py3
-rw-r--r--ironic/common/images.py70
-rw-r--r--ironic/common/json_rpc/__init__.py4
-rw-r--r--ironic/common/json_rpc/client.py33
-rw-r--r--ironic/common/json_rpc/server.py14
-rw-r--r--ironic/common/neutron.py159
-rw-r--r--ironic/common/policy.py6
-rw-r--r--ironic/common/pxe_utils.py59
-rw-r--r--ironic/common/raid.py3
-rw-r--r--ironic/common/release_mappings.py23
-rw-r--r--ironic/common/rpc_service.py2
-rw-r--r--ironic/common/states.py4
-rw-r--r--ironic/common/utils.py12
-rw-r--r--ironic/conductor/base_manager.py34
-rw-r--r--ironic/conductor/cleaning.py29
-rw-r--r--ironic/conductor/deployments.py21
-rw-r--r--ironic/conductor/manager.py91
-rw-r--r--ironic/conductor/rpcapi.py2
-rw-r--r--ironic/conductor/task_manager.py17
-rw-r--r--ironic/conductor/utils.py126
-rw-r--r--ironic/conf/agent.py24
-rw-r--r--ironic/conf/api.py4
-rw-r--r--ironic/conf/cinder.py8
-rw-r--r--ironic/conf/conductor.py21
-rw-r--r--ironic/conf/console.py4
-rw-r--r--ironic/conf/default.py33
-rw-r--r--ironic/conf/deploy.py24
-rw-r--r--ironic/conf/ilo.py5
-rw-r--r--ironic/conf/ipmi.py14
-rw-r--r--ironic/conf/iscsi.py3
-rw-r--r--ironic/conf/json_rpc.py19
-rw-r--r--ironic/conf/neutron.py13
-rw-r--r--ironic/conf/nova.py1
-rw-r--r--ironic/conf/opts.py2
-rw-r--r--ironic/conf/pxe.py39
-rw-r--r--ironic/conf/redfish.py12
-rw-r--r--ironic/db/sqlalchemy/alembic/versions/cf1a80fdb352_add_node_network_data_field.py30
-rw-r--r--ironic/db/sqlalchemy/api.py45
-rw-r--r--ironic/db/sqlalchemy/models.py1
-rw-r--r--ironic/dhcp/neutron.py19
-rw-r--r--ironic/drivers/base.py32
-rw-r--r--ironic/drivers/drac.py8
-rw-r--r--ironic/drivers/generic.py3
-rw-r--r--ironic/drivers/hardware_type.py12
-rw-r--r--ironic/drivers/ibmc.py6
-rw-r--r--ironic/drivers/ilo.py8
-rw-r--r--ironic/drivers/modules/agent.py317
-rw-r--r--ironic/drivers/modules/agent_base.py290
-rw-r--r--ironic/drivers/modules/agent_client.py198
-rw-r--r--ironic/drivers/modules/agent_power.py220
-rw-r--r--ironic/drivers/modules/ansible/deploy.py58
-rw-r--r--ironic/drivers/modules/console_utils.py27
-rw-r--r--ironic/drivers/modules/deploy_utils.py88
-rw-r--r--ironic/drivers/modules/drac/bios.py2
-rw-r--r--ironic/drivers/modules/drac/raid.py166
-rw-r--r--ironic/drivers/modules/ibmc/management.py2
-rw-r--r--ironic/drivers/modules/ibmc/power.py2
-rw-r--r--ironic/drivers/modules/ibmc/raid.py199
-rw-r--r--ironic/drivers/modules/ibmc/utils.py2
-rw-r--r--ironic/drivers/modules/ibmc/vendor.py23
-rw-r--r--ironic/drivers/modules/ilo/common.py27
-rw-r--r--ironic/drivers/modules/ilo/management.py116
-rw-r--r--ironic/drivers/modules/image_cache.py7
-rw-r--r--ironic/drivers/modules/inspector.py5
-rw-r--r--ironic/drivers/modules/ipmitool.py58
-rw-r--r--ironic/drivers/modules/ipxe_config.template5
-rw-r--r--ironic/drivers/modules/irmc/boot.py2
-rw-r--r--ironic/drivers/modules/iscsi_deploy.py162
-rw-r--r--ironic/drivers/modules/network/common.py125
-rw-r--r--ironic/drivers/modules/pxe.py7
-rw-r--r--ironic/drivers/modules/pxe_base.py37
-rw-r--r--ironic/drivers/modules/redfish/boot.py1078
-rw-r--r--ironic/drivers/modules/redfish/management.py90
-rw-r--r--ironic/drivers/modules/redfish/power.py57
-rw-r--r--ironic/drivers/modules/snmp.py9
-rw-r--r--ironic/drivers/redfish.py3
-rw-r--r--ironic/drivers/utils.py10
-rw-r--r--ironic/objects/fields.py7
-rw-r--r--ironic/objects/node.py33
-rw-r--r--ironic/objects/port.py37
-rw-r--r--ironic/tests/base.py25
-rw-r--r--ironic/tests/json_samples/network_data.json113
-rw-r--r--ironic/tests/unit/api/base.py5
-rw-r--r--ironic/tests/unit/api/controllers/test_base.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_allocation.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_chassis.py13
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_conductor.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_deploy_template.py40
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_driver.py9
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_event.py3
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_expose.py230
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_node.py115
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_notification_utils.py3
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_port.py52
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_portgroup.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_ramdisk.py2
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_root.py129
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_types.py27
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_utils.py31
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_versions.py3
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_volume.py6
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_volume_connector.py12
-rw-r--r--ironic/tests/unit/api/controllers/v1/test_volume_target.py12
-rw-r--r--ironic/tests/unit/api/test_acl.py2
-rw-r--r--ironic/tests/unit/api/test_args.py506
-rw-r--r--ironic/tests/unit/api/test_audit.py9
-rw-r--r--ironic/tests/unit/api/test_functions.py88
-rw-r--r--ironic/tests/unit/api/test_healthcheck.py7
-rw-r--r--ironic/tests/unit/api/test_hooks.py12
-rw-r--r--ironic/tests/unit/api/test_middleware.py39
-rw-r--r--ironic/tests/unit/api/test_ospmiddleware.py7
-rw-r--r--ironic/tests/unit/api/test_root.py9
-rw-r--r--ironic/tests/unit/api/test_types.py566
-rw-r--r--ironic/tests/unit/cmd/test_conductor.py3
-rw-r--r--ironic/tests/unit/cmd/test_dbsync.py2
-rw-r--r--ironic/tests/unit/cmd/test_status.py3
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_network_show.json33
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json33
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_port_show.json59
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json59
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_subnet_show.json32
-rw-r--r--ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json32
-rw-r--r--ironic/tests/unit/common/test_cinder.py17
-rw-r--r--ironic/tests/unit/common/test_context.py5
-rw-r--r--ironic/tests/unit/common/test_driver_factory.py3
-rw-r--r--ironic/tests/unit/common/test_glance_service.py2
-rw-r--r--ironic/tests/unit/common/test_image_service.py2
-rw-r--r--ironic/tests/unit/common/test_images.py23
-rw-r--r--ironic/tests/unit/common/test_json_rpc.py145
-rw-r--r--ironic/tests/unit/common/test_keystone.py3
-rw-r--r--ironic/tests/unit/common/test_network.py3
-rw-r--r--ironic/tests/unit/common/test_neutron.py125
-rw-r--r--ironic/tests/unit/common/test_nova.py3
-rw-r--r--ironic/tests/unit/common/test_policy.py2
-rw-r--r--ironic/tests/unit/common/test_pxe_utils.py96
-rw-r--r--ironic/tests/unit/common/test_raid.py15
-rw-r--r--ironic/tests/unit/common/test_release_mappings.py3
-rw-r--r--ironic/tests/unit/common/test_rpc.py3
-rw-r--r--ironic/tests/unit/common/test_rpc_service.py7
-rw-r--r--ironic/tests/unit/common/test_swift.py2
-rw-r--r--ironic/tests/unit/common/test_utils.py2
-rw-r--r--ironic/tests/unit/common/test_wsgi_service.py3
-rw-r--r--ironic/tests/unit/conductor/mgr_utils.py4
-rw-r--r--ironic/tests/unit/conductor/test_allocations.py3
-rw-r--r--ironic/tests/unit/conductor/test_base_manager.py67
-rw-r--r--ironic/tests/unit/conductor/test_cleaning.py97
-rw-r--r--ironic/tests/unit/conductor/test_deployments.py163
-rw-r--r--ironic/tests/unit/conductor/test_manager.py916
-rw-r--r--ironic/tests/unit/conductor/test_notification_utils.py22
-rw-r--r--ironic/tests/unit/conductor/test_rpcapi.py24
-rw-r--r--ironic/tests/unit/conductor/test_steps.py48
-rw-r--r--ironic/tests/unit/conductor/test_task_manager.py21
-rw-r--r--ironic/tests/unit/conductor/test_utils.py158
-rw-r--r--ironic/tests/unit/db/sqlalchemy/test_migrations.py29
-rw-r--r--ironic/tests/unit/db/test_api.py2
-rw-r--r--ironic/tests/unit/db/test_bios_settings.py6
-rw-r--r--ironic/tests/unit/db/test_conductor.py2
-rw-r--r--ironic/tests/unit/db/test_node_tags.py6
-rw-r--r--ironic/tests/unit/db/test_node_traits.py8
-rw-r--r--ironic/tests/unit/db/test_nodes.py24
-rw-r--r--ironic/tests/unit/db/test_ports.py58
-rw-r--r--ironic/tests/unit/db/utils.py1
-rw-r--r--ironic/tests/unit/dhcp/test_factory.py6
-rw-r--r--ironic/tests/unit/dhcp/test_neutron.py3
-rw-r--r--ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template39
-rw-r--r--ironic/tests/unit/drivers/modules/ansible/test_deploy.py167
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_bios.py3
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_boot.py3
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_common.py3
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_inspect.py3
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_job.py3
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_management.py7
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_periodic_task.py26
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_power.py17
-rw-r--r--ironic/tests/unit/drivers/modules/drac/test_raid.py192
-rw-r--r--ironic/tests/unit/drivers/modules/drac/utils.py5
-rw-r--r--ironic/tests/unit/drivers/modules/ibmc/base.py5
-rw-r--r--ironic/tests/unit/drivers/modules/ibmc/test_management.py2
-rw-r--r--ironic/tests/unit/drivers/modules/ibmc/test_power.py3
-rw-r--r--ironic/tests/unit/drivers/modules/ibmc/test_raid.py167
-rw-r--r--ironic/tests/unit/drivers/modules/ibmc/test_utils.py6
-rw-r--r--ironic/tests/unit/drivers/modules/ibmc/test_vendor.py22
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_bios.py3
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_boot.py2
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_common.py25
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_console.py2
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_firmware_processor.py3
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_inspect.py2
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_management.py453
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_power.py3
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_raid.py3
-rw-r--r--ironic/tests/unit/drivers/modules/ilo/test_vendor.py2
-rw-r--r--ironic/tests/unit/drivers/modules/intel_ipmi/test_management.py2
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_bios.py2
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_boot.py303
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_common.py3
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_inspect.py2
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_management.py3
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_periodic_task.py3
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_power.py2
-rw-r--r--ironic/tests/unit/drivers/modules/irmc/test_raid.py2
-rw-r--r--ironic/tests/unit/drivers/modules/network/json_samples/network_data.json113
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_common.py219
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_flat.py99
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_neutron.py151
-rw-r--r--ironic/tests/unit/drivers/modules/network/test_noop.py6
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_bios.py17
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_boot.py485
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_inspect.py3
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_management.py116
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_power.py128
-rw-r--r--ironic/tests/unit/drivers/modules/redfish/test_utils.py2
-rw-r--r--ironic/tests/unit/drivers/modules/storage/test_cinder.py10
-rw-r--r--ironic/tests/unit/drivers/modules/storage/test_external.py2
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent.py766
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_base.py503
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_client.py192
-rw-r--r--ironic/tests/unit/drivers/modules/test_agent_power.py127
-rw-r--r--ironic/tests/unit/drivers/modules/test_boot_mode_utils.py8
-rw-r--r--ironic/tests/unit/drivers/modules/test_console_utils.py49
-rw-r--r--ironic/tests/unit/drivers/modules/test_deploy_utils.py117
-rw-r--r--ironic/tests/unit/drivers/modules/test_image_cache.py27
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspect_utils.py3
-rw-r--r--ironic/tests/unit/drivers/modules/test_inspector.py36
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipmitool.py112
-rw-r--r--ironic/tests/unit/drivers/modules/test_ipxe.py131
-rw-r--r--ironic/tests/unit/drivers/modules/test_iscsi_deploy.py251
-rw-r--r--ironic/tests/unit/drivers/modules/test_noop.py3
-rw-r--r--ironic/tests/unit/drivers/modules/test_noop_mgmt.py2
-rw-r--r--ironic/tests/unit/drivers/modules/test_pxe.py11
-rw-r--r--ironic/tests/unit/drivers/modules/test_snmp.py13
-rw-r--r--ironic/tests/unit/drivers/modules/xclarity/test_common.py3
-rw-r--r--ironic/tests/unit/drivers/modules/xclarity/test_management.py9
-rw-r--r--ironic/tests/unit/drivers/modules/xclarity/test_power.py28
-rw-r--r--ironic/tests/unit/drivers/test_base.py3
-rw-r--r--ironic/tests/unit/drivers/test_drac.py17
-rw-r--r--ironic/tests/unit/drivers/test_generic.py5
-rw-r--r--ironic/tests/unit/drivers/test_ibmc.py11
-rw-r--r--ironic/tests/unit/drivers/test_ilo.py25
-rw-r--r--ironic/tests/unit/drivers/test_irmc.py24
-rw-r--r--ironic/tests/unit/drivers/test_snmp.py2
-rw-r--r--ironic/tests/unit/drivers/test_utils.py15
-rw-r--r--ironic/tests/unit/drivers/third_party_driver_mocks.py9
-rw-r--r--ironic/tests/unit/objects/test_allocation.py2
-rw-r--r--ironic/tests/unit/objects/test_bios.py3
-rw-r--r--ironic/tests/unit/objects/test_chassis.py2
-rw-r--r--ironic/tests/unit/objects/test_conductor.py2
-rw-r--r--ironic/tests/unit/objects/test_deploy_template.py2
-rw-r--r--ironic/tests/unit/objects/test_fields.py9
-rw-r--r--ironic/tests/unit/objects/test_node.py4
-rw-r--r--ironic/tests/unit/objects/test_notification.py2
-rw-r--r--ironic/tests/unit/objects/test_objects.py4
-rw-r--r--ironic/tests/unit/objects/test_port.py20
-rw-r--r--ironic/tests/unit/objects/test_portgroup.py2
-rw-r--r--ironic/tests/unit/objects/test_trait.py2
-rw-r--r--ironic/tests/unit/objects/test_volume_connector.py2
-rw-r--r--ironic/tests/unit/objects/test_volume_target.py2
-rw-r--r--ironic/tests/unit/test_base.py12
-rw-r--r--lower-constraints.txt106
-rw-r--r--playbooks/ci-workarounds/get_tftpd.yaml5
-rw-r--r--playbooks/legacy/grenade-dsvm-ironic/run.yaml121
-rw-r--r--playbooks/legacy/ironic-dsvm-base/post.yaml15
-rw-r--r--playbooks/legacy/ironic-dsvm-base/pre.yaml22
-rw-r--r--releasenotes/notes/add-ibmc-raid-interface-0c13826e134fb4ce.yaml5
-rw-r--r--releasenotes/notes/add-ilo-inband-deploy-step-update-firmware-using-sum-cfee84a19120dd3c.yaml11
-rw-r--r--releasenotes/notes/add-ipxe-boot-iso-support-6ae2f5cc2250be3e.yaml12
-rw-r--r--releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml8
-rw-r--r--releasenotes/notes/agent-client-poll-ce16fd589e88c95a.yaml7
-rw-r--r--releasenotes/notes/agent-power-a000fdf37cb870e4.yaml6
-rw-r--r--releasenotes/notes/agent-power-off-2115fcfaac030bd0.yaml5
-rw-r--r--releasenotes/notes/agent-raid-647acfd599e83476.yaml5
-rw-r--r--releasenotes/notes/agent-raid-validate-f7348ac034606b83.yaml6
-rw-r--r--releasenotes/notes/agent-token-817a03776bd46d5b.yaml6
-rw-r--r--releasenotes/notes/agent-verify-ca-ddbfbb0f27198d82.yaml6
-rw-r--r--releasenotes/notes/allocation-delete-26c7c2f1651759f5.yaml6
-rw-r--r--releasenotes/notes/bug-2007963-idrac-wsman-raid-apply-configuration-792ccf195057016b.yaml6
-rw-r--r--releasenotes/notes/change_default_use_ipmitool_retries-2529ce032eae7d1b.yaml9
-rw-r--r--releasenotes/notes/cleaning-logs-dc115b0926ae3982.yaml5
-rw-r--r--releasenotes/notes/del-api-url-eb2ea29aa63a2cb5.yaml6
-rw-r--r--releasenotes/notes/del-cinder-url-cf43cd0336c22878.yaml3
-rw-r--r--releasenotes/notes/del-fatal_exception_format_errors-f63b15c8aa460dff.yaml5
-rw-r--r--releasenotes/notes/deleting-dcdb9cf0d2a6a1a6.yaml4
-rw-r--r--releasenotes/notes/deploy-step-validate-76b2aa97e02ba669.yaml5
-rw-r--r--releasenotes/notes/destroy-broken-8b13de8382199aca.yaml5
-rw-r--r--releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml17
-rw-r--r--releasenotes/notes/direct-fast-track-d0f43850b6e80751.yaml5
-rw-r--r--releasenotes/notes/disable_periodic_task-590a91c0a5235cfb.yaml7
-rw-r--r--releasenotes/notes/dont-cleanup-cache-twice-0395a50ad723bca8.yaml5
-rw-r--r--releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml5
-rw-r--r--releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml17
-rw-r--r--releasenotes/notes/fast-track-with-cleaning-438225116a11662d.yaml7
-rw-r--r--releasenotes/notes/fix-json-rpc-client-ssl-2438a731beb3d5f9.yaml5
-rw-r--r--releasenotes/notes/fix-redfish-sadness-workaround-ed02cb310ff369f4.yaml11
-rw-r--r--releasenotes/notes/hash-ring-algo-4337c18117b33070.yaml7
-rw-r--r--releasenotes/notes/http-basic-auth-f8c0536eba989918.yaml34
-rw-r--r--releasenotes/notes/ibmcclient-fix-8c6cb49be0aef5f2.yaml4
-rw-r--r--releasenotes/notes/idrac-add-ehba-support-10b90c92b8865364.yaml15
-rw-r--r--releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml5
-rw-r--r--releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml23
-rw-r--r--releasenotes/notes/inspection-fast-track-ab5165e11d3e9522.yaml5
-rw-r--r--releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml12
-rw-r--r--releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml6
-rw-r--r--releasenotes/notes/ipmitool-use_ipmitool_retries-b55b2b8ed5cab603.yaml16
-rw-r--r--releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml28
-rw-r--r--releasenotes/notes/json-rpc-timeout-ac30eea164b3a294.yaml7
-rw-r--r--releasenotes/notes/json_rpc_http_basic-42dfc6ca2471a30e.yaml6
-rw-r--r--releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml5
-rw-r--r--releasenotes/notes/netboot-fallback-b208b2c3b40a0d01.yaml12
-rw-r--r--releasenotes/notes/no-power-on-842b21d55b07a632.yaml9
-rw-r--r--releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml7
-rw-r--r--releasenotes/notes/port-list-by-project-8cfaf3b2cf0dd627.yaml5
-rw-r--r--releasenotes/notes/prevent-ports-with-vif-deletion-3edac3df5aa1becf.yaml7
-rw-r--r--releasenotes/notes/raid-max-c0920cc44b9779ee.yaml5
-rw-r--r--releasenotes/notes/redfish-noop-mgmt-b61d02b77b1c9d6b.yaml6
-rw-r--r--releasenotes/notes/redfish-power-87062756bce8b047.yaml6
-rw-r--r--releasenotes/notes/redfish-sadness-6e2a37b3f45ef1aa.yaml18
-rw-r--r--releasenotes/notes/redfish-virtual-media-permission-fix-1909b9cdbbbf9fd1.yaml15
-rw-r--r--releasenotes/notes/reloadable-301ec2aa421abf66.yaml48
-rw-r--r--releasenotes/notes/remove-locks-first-d12ac27106f800f8.yaml9
-rw-r--r--releasenotes/notes/skip-power-sync-for-adoptfail-d2498f1a2e997ed7.yaml5
-rw-r--r--releasenotes/notes/socat-console-port-alloc-ipv6-26760f53f86209d0.yaml5
-rw-r--r--releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml5
-rw-r--r--releasenotes/notes/unrescue-token-ae664a17343e0610.yaml5
-rw-r--r--releasenotes/notes/unsave-power-state-on-adopt-failed-09194c8269c779de.yaml7
-rw-r--r--releasenotes/notes/use-image-format-for-memory-check-25b1f06701ccdc47.yaml6
-rw-r--r--releasenotes/notes/vif-port-attach-17a9993bf5c21d69.yaml8
-rw-r--r--releasenotes/source/conf.py9
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po60
-rw-r--r--releasenotes/source/ussuri.rst6
-rw-r--r--requirements.txt11
-rw-r--r--setup.cfg19
-rw-r--r--test-requirements.txt7
-rw-r--r--tools/bandit.yml1
-rwxr-xr-xtools/test-setup.sh4
-rw-r--r--tox.ini21
-rw-r--r--zuul.d/ironic-jobs.yaml322
-rw-r--r--zuul.d/legacy-ironic-jobs.yaml45
-rw-r--r--zuul.d/project.yaml20
423 files changed, 16368 insertions, 4945 deletions
diff --git a/api-ref/source/baremetal-api-v1-indicators.inc b/api-ref/source/baremetal-api-v1-indicators.inc
new file mode 100644
index 000000000..0c4b11885
--- /dev/null
+++ b/api-ref/source/baremetal-api-v1-indicators.inc
@@ -0,0 +1,154 @@
+.. -*- rst -*-
+
+=====================
+Indicators Management
+=====================
+
+The Indicators management is an extension of the node ReST API endpoint that
+allows reading and toggling the indicators (e.g. LEDs) on the hardware
+units.
+
+List Indicators
+===============
+
+.. rest_method:: GET /v1/nodes/{node_ident}/management/indicators
+
+.. versionadded:: 1.63
+
+List all available indicator names for each of the hardware components.
+The components that the ``redfish`` driver may have are: ``system``,
+``chassis`` and ``drive``. The actual list depends on the support by the
+underlying hardware.
+
+Normal response code: 200
+
+Error response codes: 404 (if node not found)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - node_ident: node_ident
+
+Response Parameters
+-------------------
+
+.. rest_parameters:: parameters.yaml
+
+ - components: n_components
+ - name: component_name
+ - links: links
+
+**Example list of indicators for the node:**
+
+.. literalinclude:: samples/node-indicators-list-response.json
+ :language: javascript
+
+
+List Indicators for hardware component
+======================================
+
+.. rest_method:: GET /v1/nodes/{node_ident}/management/indicators/{component}
+
+.. versionadded:: 1.63
+
+Retrieves indicators for a given hardware component along with their attributes.
+The components that the ``redfish`` driver may have are: ``system``,
+``chassis`` and ``drive``. The actual list depends on the support by the
+underlying hardware.
+
+Normal response code: 200
+
+Error response codes: 404 (if node or component is not found)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - node_ident: node_ident
+ - component: component
+
+Response Parameters
+-------------------
+
+.. rest_parameters:: parameters.yaml
+
+ - indicators: n_indicators
+ - name: indicator_name
+ - readonly: indicator_readonly
+ - states: indicator_states
+ - links: links
+
+**Example list of indicators for a given component of the node:**
+
+.. literalinclude:: samples/node-indicators-component-list-response.json
+ :language: javascript
+
+
+Get Indicator State
+===================
+
+.. rest_method:: GET /v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}
+
+.. versionadded:: 1.63
+
+Retrieves the state of a chosen indicator for a given component of the node.
+The value of the field in the response object represents its state. The values
+can be one of ``OFF``, ``ON``, ``BLINKING`` or ``UNKNOWN``.
+
+Normal response code: 200
+
+Error response codes: 404 (if node, component or indicator is not found)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - node_ident: node_ident
+ - component: component
+ - ind_ident: ind_ident
+
+Response Parameters
+-------------------
+
+.. rest_parameters:: parameters.yaml
+
+ - state: n_ind_state
+
+**Example list of indicators for a given component of the node:**
+
+.. literalinclude:: samples/node-indicators-get-state-response.json
+ :language: javascript
+
+
+Set Indicator State
+===================
+
+.. rest_method:: PUT /v1/nodes/{node_ident}/management/indicators/{component}/{ind_ident}
+
+.. versionadded:: 1.63
+
+Set the state of the desired indicators of the component.
+
+Normal response code: 204 (No content)
+
+Error codes:
+ - 400 (if state is not an accepted value)
+ - 404 (if node, component or indicator is not found)
+
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - node_ident: node_ident
+ - component: component
+ - ind_ident: ind_ident
+ - state: n_ind_state
+
+**Set the State of an Indicator**
+
+.. literalinclude:: samples/node-indicators-set-state.json
diff --git a/api-ref/source/baremetal-api-v1-nodes.inc b/api-ref/source/baremetal-api-v1-nodes.inc
index 49d5cf436..526f6e0d2 100644
--- a/api-ref/source/baremetal-api-v1-nodes.inc
+++ b/api-ref/source/baremetal-api-v1-nodes.inc
@@ -442,6 +442,7 @@ Response
- allocation_uuid: allocation_uuid
- retired: retired
- retired_reason: retired_reason
+ - network_data: network_data
**Example detailed list of Nodes:**
@@ -491,6 +492,9 @@ only the specified set.
.. versionadded:: 1.65
Introduced the ``lessee`` field.
+.. versionadded:: 1.66
+ Introduced the ``network_data`` field.
+
Normal response codes: 200
Error codes: 400,403,404,406
diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py
index aea4cf4dd..0e3898f3d 100644
--- a/api-ref/source/conf.py
+++ b/api-ref/source/conf.py
@@ -37,9 +37,9 @@ extensions = [
'openstackdocstheme'
]
-repository_name = 'openstack/ironic'
-use_storyboard = True
-
+openstackdocs_repo_name = 'openstack/ironic'
+openstackdocs_use_storyboard = True
+openstackdocs_auto_name = False
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -98,7 +98,7 @@ add_module_names = False
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst
index 60c3faff0..b8f19267a 100644
--- a/api-ref/source/index.rst
+++ b/api-ref/source/index.rst
@@ -12,6 +12,7 @@
.. include:: baremetal-api-v1-node-passthru.inc
.. include:: baremetal-api-v1-nodes-traits.inc
.. include:: baremetal-api-v1-nodes-vifs.inc
+.. include:: baremetal-api-v1-indicators.inc
.. include:: baremetal-api-v1-portgroups.inc
.. include:: baremetal-api-v1-nodes-portgroups.inc
.. include:: baremetal-api-v1-ports.inc
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 59567b488..86a380ff0 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -56,6 +56,12 @@ chassis_ident:
in: path
required: true
type: string
+component:
+ description: |
+ The Bare Metal node component.
+ in: path
+ required: true
+ type: string
deploy_template_ident:
description: |
The UUID or name of the deploy template.
@@ -74,6 +80,12 @@ hostname_ident:
in: path
required: true
type: string
+ind_ident:
+ description: |
+ The indicator of a Bare Metal component.
+ in: path
+ required: true
+ type: string
node_id:
description: |
The UUID of the node.
@@ -538,6 +550,12 @@ clean_steps:
in: body
required: false
type: array
+component_name:
+ description: |
+ The name of the component available for the node.
+ in: body
+ required: true
+ type: string
conductor:
description: |
The conductor currently servicing a node. This field is read-only.
@@ -855,6 +873,25 @@ id:
in: body
required: true
type: string
+indicator_name:
+ description: |
+ The name of the indicator.
+ in: body
+ required: true
+ type: boolean
+indicator_readonly:
+ description: |
+ Indicates whether the indicator is readonly.
+ in: body
+ required: true
+ type: boolean
+indicator_states:
+ description: |
+ The possible states for a given indicator, the only values that can be
+ returned are: ``ON``, ``OFF``, ``BLINKING`` and ``UNKNOWN``
+ in: body
+ required: true
+ type: string
inspect_interface:
description: |
The interface used for node inspection, e.g. "no-inspect".
@@ -956,12 +993,33 @@ management_interface:
in: body
required: true
type: string
+n_components:
+ description: |
+ List all available indicators names for each of the hardware components
+ for this node.
+ in: body
+ required: true
+ type: array
n_description:
description: |
Informational text about this node.
in: body
required: true
type: string
+n_ind_state:
+ description: |
+ The state of an indicator of the component of the node. Possible values
+ are: ``OFF``, ``ON``, ``BLINKING`` or ``UNKNOWN``.
+ in: body
+ required: true
+ type: string
+n_indicators:
+ description: |
+ Retrieves all indicators for a given hardware component along with their
+ attributes for this node.
+ in: body
+ required: true
+ type: array
n_portgroups:
description: |
Links to the collection of portgroups on this node.
@@ -1012,6 +1070,13 @@ name:
in: body
required: true
type: string
+network_data:
+ description: |
+ Static network configuration for the node to eventually pass to node's
+ operating system.
+ in: body
+ required: false
+ type: JSON
network_interface:
description: |
Which Network Interface provider to use when plumbing the network
diff --git a/api-ref/source/samples/node-create-response.json b/api-ref/source/samples/node-create-response.json
index 214305d0e..273d0709a 100644
--- a/api-ref/source/samples/node-create-response.json
+++ b/api-ref/source/samples/node-create-response.json
@@ -38,6 +38,7 @@
"maintenance_reason": null,
"management_interface": null,
"name": "test_node_classic",
+ "network_data": {},
"network_interface": "flat",
"owner": null,
"portgroups": [
diff --git a/api-ref/source/samples/node-indicators-component-list-response.json b/api-ref/source/samples/node-indicators-component-list-response.json
new file mode 100644
index 000000000..51a3b4b16
--- /dev/null
+++ b/api-ref/source/samples/node-indicators-component-list-response.json
@@ -0,0 +1,45 @@
+{
+ "indicators": [
+ {
+ "name": "power",
+ "readonly": true,
+ "states": [
+ "OFF",
+ "ON"
+ ],
+ "links": [
+ {
+ "href": "http://127.0.0.1:6385/v1/nodes/Compute0/
+ management/indicators/system/power",
+ "rel": "self"
+ },
+ {
+ "href": "http://127.0.0.1:6385/nodes/Compute0/
+ management/indicators/system/power",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ {
+ "name": "alert",
+ "readonly": false,
+ "states": [
+ "OFF",
+ "BLINKING",
+ "UNKNOWN"
+ ],
+ "links": [
+ {
+ "href": "http://127.0.0.1:6385/v1/nodes/Compute0/
+ management/indicators/system/alert",
+ "rel": "self"
+ },
+ {
+ "href": "http://127.0.0.1:6385/nodes/Compute0/
+ management/indicators/system/alert",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ ]
+}
diff --git a/api-ref/source/samples/node-indicators-get-state-response.json b/api-ref/source/samples/node-indicators-get-state-response.json
new file mode 100644
index 000000000..f4741def6
--- /dev/null
+++ b/api-ref/source/samples/node-indicators-get-state-response.json
@@ -0,0 +1,3 @@
+{
+ "state": "ON"
+}
diff --git a/api-ref/source/samples/node-indicators-list-response.json b/api-ref/source/samples/node-indicators-list-response.json
new file mode 100644
index 000000000..a813c26ab
--- /dev/null
+++ b/api-ref/source/samples/node-indicators-list-response.json
@@ -0,0 +1,34 @@
+{
+ "components": [
+ {
+ "name": "system",
+ "links": [
+ {
+ "href": "http://127.0.0.1:6385/v1/nodes/Compute0/
+ management/indicators/system",
+ "rel": "self"
+ },
+ {
+ "href": "http://127.0.0.1:6385/nodes/Compute0/
+ management/indicators/system",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ {
+ "name": "chassis",
+ "links": [
+ {
+ "href": "http://127.0.0.1:6385/v1/nodes/Compute0/
+ management/indicators/chassis",
+ "rel": "self"
+ },
+ {
+ "href": "http://127.0.0.1:6385/nodes/Compute0/
+ management/indicators/chassis",
+ "rel": "bookmark"
+ }
+ ]
+ }
+ ]
+}
diff --git a/api-ref/source/samples/node-indicators-set-state.json b/api-ref/source/samples/node-indicators-set-state.json
new file mode 100644
index 000000000..26063ffe2
--- /dev/null
+++ b/api-ref/source/samples/node-indicators-set-state.json
@@ -0,0 +1,3 @@
+{
+ "state": "BLINKING"
+}
diff --git a/api-ref/source/samples/node-show-response.json b/api-ref/source/samples/node-show-response.json
index 9cb1931b0..68b7eacb9 100644
--- a/api-ref/source/samples/node-show-response.json
+++ b/api-ref/source/samples/node-show-response.json
@@ -41,6 +41,7 @@
"maintenance_reason": null,
"management_interface": null,
"name": "test_node_classic",
+ "network_data": {},
"network_interface": "flat",
"owner": null,
"portgroups": [
diff --git a/api-ref/source/samples/node-update-driver-info-response.json b/api-ref/source/samples/node-update-driver-info-response.json
index f7d2d88ee..3655243ce 100644
--- a/api-ref/source/samples/node-update-driver-info-response.json
+++ b/api-ref/source/samples/node-update-driver-info-response.json
@@ -42,6 +42,7 @@
"maintenance_reason": "Replacing the hard drive",
"management_interface": null,
"name": "test_node_classic",
+ "network_data": {},
"network_interface": "flat",
"owner": null,
"portgroups": [
diff --git a/api-ref/source/samples/nodes-list-details-response.json b/api-ref/source/samples/nodes-list-details-response.json
index 90dc72a2a..98c22aa9a 100644
--- a/api-ref/source/samples/nodes-list-details-response.json
+++ b/api-ref/source/samples/nodes-list-details-response.json
@@ -43,6 +43,7 @@
"maintenance_reason": null,
"management_interface": null,
"name": "test_node_classic",
+ "network_data": {},
"network_interface": "flat",
"owner": "john doe",
"portgroups": [
@@ -148,6 +149,7 @@
"maintenance_reason": null,
"management_interface": "ipmitool",
"name": "test_node_dynamic",
+ "network_data": {},
"network_interface": "flat",
"owner": "43e61ec9-8e42-4dcb-bc45-30d66aa93e5b",
"portgroups": [
diff --git a/babel.cfg b/babel.cfg
deleted file mode 100644
index 15cd6cb76..000000000
--- a/babel.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[python: **.py]
-
diff --git a/bindep.txt b/bindep.txt
index 80de701b3..36b4ce35d 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -34,9 +34,11 @@ libvirt-devel [platform:rpm devstack]
qemu [platform:dpkg devstack build-image-dib]
qemu-kvm [platform:dpkg devstack]
qemu-utils [platform:dpkg devstack build-image-dib]
-sgabios [devstack]
+qemu-system-data [platform:dpkg devstack]
+sgabios [platform:rpm devstack]
ipxe-qemu [platform:dpkg devstack]
edk2-ovmf [platform:rpm devstack]
+ovmf [platform:dpkg devstack]
ipxe-roms-qemu [platform:rpm devstack]
openvswitch [platform:rpm devstack]
iptables [devstack]
diff --git a/devstack/common_settings b/devstack/common_settings
index 0a5fe2387..59d12eb52 100644
--- a/devstack/common_settings
+++ b/devstack/common_settings
@@ -4,19 +4,12 @@ if [[ -f $TOP_DIR/../../old/devstack/.localrc.auto ]]; then
source <(cat $TOP_DIR/../../old/devstack/.localrc.auto | grep -v 'enable_plugin')
fi
-CIRROS_VERSION=0.4.0
-
# Whether configure the nodes to boot in Legacy BIOS or UEFI mode. Accepted
# values are: "bios" or "uefi", defaults to "bios".
-#
-# WARNING: UEFI is EXPERIMENTAL. The CirrOS images uploaded by DevStack by
-# default WILL NOT WORK with UEFI.
IRONIC_BOOT_MODE=${IRONIC_BOOT_MODE:-bios}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.5.1"}
IRONIC_DEFAULT_IMAGE_NAME=cirros-${CIRROS_VERSION}-x86_64-uec
-if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then
- IRONIC_DEFAULT_IMAGE_NAME=cirros-d160722-x86_64-uec
-fi
IRONIC_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-$IRONIC_DEFAULT_IMAGE_NAME}
@@ -33,17 +26,12 @@ function add_image_link {
fi
}
-if [[ "$IRONIC_BOOT_MODE" == "uefi" ]]; then
- add_image_link http://download.cirros-cloud.net/daily/20160722/cirros-d160722-x86_64-uec.tar.gz
- add_image_link http://download.cirros-cloud.net/daily/20160722/cirros-d160722-x86_64-disk.img
-else
- # NOTE (vsaienko) We are going to test mixed drivers/partitions in single setup.
- # Do not restrict downloading image only for specific case. Download both disk and uec images.
- # NOTE (vdrok): Here the images are actually pre-cached by devstack, in
- # the files folder, so they won't be downloaded again.
- add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz
- add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img
-fi
+# NOTE (vsaienko) We are going to test mixed drivers/partitions in single setup.
+# Do not restrict downloading image only for specific case. Download both disk and uec images.
+# NOTE (vdrok): Here the images are actually pre-cached by devstack, in
+# the files folder, so they won't be downloaded again.
+add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz
+add_image_link http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img
export IRONIC_WHOLEDISK_IMAGE_NAME=${IRONIC_WHOLEDISK_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-uec/-disk}}
export IRONIC_PARTITIONED_IMAGE_NAME=${IRONIC_PARTITIONED_IMAGE_NAME:-${IRONIC_IMAGE_NAME/-disk/-uec}}
diff --git a/devstack/files/debs/ironic b/devstack/files/debs/ironic
index e9907b0be..49b0689d7 100644
--- a/devstack/files/debs/ironic
+++ b/devstack/files/debs/ironic
@@ -6,36 +6,36 @@
# but only recommends it in Jessie/Xenial.
# Make sure syslinux-common is installed for those distros as it provides
# *.c32 modules for syslinux
-# TODO remove distro pinning when Wheezy / Trusty are EOLed (May 2019)
-# or DevStack stops supporting those.
# In the mean time, new Debian-based release codenames will have to be added
# as distros can not be pinned with 'if-later-than' specified.
apparmor
docker.io
+gnupg
ipmitool
iptables
ipxe
+ipxe-qemu
isolinux
-gnupg
-libguestfs0
+jq
libguestfs-tools
-libvirt-bin # dist:xenial,bionic NOPRIME
+libguestfs0
+libvirt-bin # dist:bionic
+libvirt-daemon-system # dist:focal
+libvirt-dev
open-iscsi
openssh-client
-# TODO (etingof) pinning to older version in devstack/lib/ironic
-#ovmf
-pxelinux # dist:xenial,bionic
-python-libguestfs
+ovmf
+pxelinux
+python-libguestfs # dist:bionic
+python3-guestfs # dist:focal
qemu
qemu-kvm
qemu-utils
-sgabios
+qemu-system-data # dist:focal
+sgabios # dist:bionic
shellinabox
-syslinux-common # dist:xenial,bionic
+socat
+squashfs-tools
+syslinux-common
tftpd-hpa
xinetd
-squashfs-tools
-libvirt-dev
-socat
-ipxe-qemu
-jq
diff --git a/devstack/lib/ironic b/devstack/lib/ironic
index bc28bb39e..475f14e74 100644
--- a/devstack/lib/ironic
+++ b/devstack/lib/ironic
@@ -66,7 +66,7 @@ IRONIC_PYTHON_AGENT_REPO=${IRONIC_PYTHON_AGENT_REPO:-${GIT_BASE}/openstack/ironi
IRONIC_PYTHON_AGENT_BRANCH=${IRONIC_PYTHON_AGENT_BRANCH:-$TARGET_BRANCH}
IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent
IRONIC_PYTHON_AGENT_BUILDER_REPO=${IRONIC_PYTHON_AGENT_BUILDER_REPO:-${GIT_BASE}/openstack/ironic-python-agent-builder.git}
-IRONIC_PYTHON_AGENT_BUILDER_BRANCH=${IRONIC_PYTHON_AGENT_BUILDER_BRANCH:-$TARGET_BRANCH}
+IRONIC_PYTHON_AGENT_BUILDER_BRANCH=${IRONIC_PYTHON_AGENT_BUILDER_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
IRONIC_PYTHON_AGENT_BUILDER_DIR=$DEST/ironic-python-agent-builder
IRONIC_DIB_BINDEP_FILE=https://opendev.org/openstack/diskimage-builder/raw/branch/master/bindep.txt
IRONIC_DATA_DIR=$DATA_DIR/ironic
@@ -143,7 +143,7 @@ IRONIC_TFTPSERVER_IP=${IRONIC_TFTPSERVER_IP:-$HOST_IP}
IRONIC_TFTP_BLOCKSIZE=${IRONIC_TFTP_BLOCKSIZE:-$((PUBLIC_BRIDGE_MTU-50))}
IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1}
IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1}
-IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-1280}
+IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-3072}
IRONIC_VM_SPECS_CPU_ARCH=${IRONIC_VM_SPECS_CPU_ARCH:-'x86_64'}
IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10}
IRONIC_VM_SPECS_DISK_FORMAT=${IRONIC_VM_SPECS_DISK_FORMAT:-qcow2}
@@ -164,6 +164,24 @@ if [[ -n "$BUILD_TIMEOUT" ]]; then
echo "WARNING: BUILD_TIMEOUT variable is renamed to IRONIC_TEMPEST_BUILD_TIMEOUT and will be deprecated in Pike."
fi
+hostdomain=$(hostname)
+if [[ "$hostdomain" =~ "rax" ]]; then
+ echo "WARNING: Auto-increasing the requested build timeout by 1.5 as the detected hostname suggests a cloud host where VMs are software emulated."
+ # NOTE(TheJulia): Rax hosts are entirely qemu emulated, not CPU enabled
+ # virtualization. As such, the ramdisk decompression is known to take an
+ # eceptional amount of time and we need to afford a little more time to
+ # these hosts for jobs to complete without issues.
+ new_timeout=$(echo "$IRONIC_TEMPEST_BUILD_TIMEOUT * 1.5 / 1" | bc)
+ IRONIC_TEMPEST_BUILD_TIMEOUT=$new_timeout
+
+ if [ -n "$IRONIC_PXE_BOOT_RETRY_TIMEOUT" ]; then
+ new_timeout=$(echo "$IRONIC_PXE_BOOT_RETRY_TIMEOUT * 1.5 / 1" | bc)
+ IRONIC_PXE_BOOT_RETRY_TIMEOUT=$new_timeout
+ fi
+ # TODO(TheJulia): If we have to do magically extend timeouts again,
+ # we should make a helper method...
+fi
+
IRONIC_DEFAULT_API_VERSION=${IRONIC_DEFAULT_API_VERSION:-}
IRONIC_CMD="openstack baremetal"
if [[ -n "$IRONIC_DEFAULT_API_VERSION" ]]; then
@@ -245,6 +263,14 @@ if [[ ! "$IRONIC_RAMDISK_TYPE" =~ $IRONIC_SUPPORTED_RAMDISK_TYPES_RE ]]; then
die $LINENO "Unrecognized IRONIC_RAMDISK_TYPE: $IRONIC_RAMDISK_TYPE. Expected 'tinyipa' or 'dib'"
fi
+# Prevent a case that will likely result in a failure.
+if [[ "$hostdomain" =~ "rax" ]]; then
+ if [[ "$IRONIC_RAMDISK_TYPE" == "dib" ]] && [[ "$IRONIC_BUILD_DEPLOY_RAMDISK" == "False" ]]; then
+ echo "** WARNING ** - DIB based IPA images have been defined, however we are running devstack on RAX VM. Due to virtualization constraints, we are automatically falling back to TinyIPA to ensure CI job passage."
+ IRONIC_RAMDISK_TYPE="tinyipa"
+ fi
+fi
+
# If present, these files are used as deploy ramdisk/kernel.
# (The value must be an absolute path)
IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.initramfs}
@@ -325,6 +351,9 @@ IRONIC_BIN_DIR=$(get_python_exec_prefix)
IRONIC_UWSGI_CONF=$IRONIC_CONF_DIR/ironic-uwsgi.ini
IRONIC_UWSGI=$IRONIC_BIN_DIR/ironic-api-wsgi
+# Lets support IPv6 testing!
+IRONIC_IP_VERSION=${IRONIC_IP_VERSION:-${IP_VERSION:-4}}
+
# Ironic connection info. Note the port must be specified.
if is_service_enabled tls-proxy; then
IRONIC_SERVICE_PROTOCOL=https
@@ -350,6 +379,11 @@ IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-3928}
IRONIC_RPC_TRANSPORT=${IRONIC_RPC_TRANSPORT:-oslo}
IRONIC_JSON_RPC_PORT=${IRONIC_JSON_RPC_PORT:-8089}
+# The authentication strategy used by json-rpc. Valid values are:
+# keystone, http_basic, noauth, or no value to inherit from ironic-api
+# auth strategy.
+IRONIC_JSON_RPC_AUTH_STRATEGY=${IRONIC_JSON_RPC_AUTH_STRATEGY:-}
+
# The first port in the range to bind the Virtual BMCs. The number of
# ports that will be used depends on $IRONIC_VM_COUNT variable, e.g if
# $IRONIC_VM_COUNT=3 the ports 6230, 6231 and 6232 will be used for the
@@ -384,7 +418,7 @@ LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
LIBVIRT_STORAGE_POOL_PATH=${LIBVIRT_STORAGE_POOL_PATH:-/var/lib/libvirt/images}
# The authentication strategy used by ironic-api. Valid values are:
-# keystone and noauth.
+# keystone, http_basic, noauth.
IRONIC_AUTH_STRATEGY=${IRONIC_AUTH_STRATEGY:-keystone}
# By default, terminal SSL certificate is disabled.
@@ -416,10 +450,43 @@ IRONIC_PROVISION_PROVIDER_NETWORK_TYPE=${IRONIC_PROVISION_PROVIDER_NETWORK_TYPE:
# This is only used if IRONIC_PROVISION_NETWORK_NAME has been set.
IRONIC_PROVISION_SEGMENTATION_ID=${IRONIC_PROVISION_SEGMENTATION_ID:-}
-# Allocation network pool for provision network
-# Example: IRONIC_PROVISION_ALLOCATION_POOL=start=10.0.5.10,end=10.0.5.100
-# This is only used if IRONIC_PROVISION_NETWORK_NAME has been set.
-IRONIC_PROVISION_ALLOCATION_POOL=${IRONIC_PROVISION_ALLOCATION_POOL:-'start=10.0.5.10,end=10.0.5.100'}
+if [[ "$IRONIC_IP_VERSION" != '6' ]]; then
+ # NOTE(TheJulia): Lets not try and support mixed mode since the conductor
+ # can't support mixed mode operation. We are either IPv4 OR IPv6.
+ IRONIC_IP_VERSION='4'
+ # Allocation network pool for provision network
+ # Example: IRONIC_PROVISION_ALLOCATION_POOL=start=10.0.5.10,end=10.0.5.100
+ # This is only used if IRONIC_PROVISION_NETWORK_NAME has been set.
+ IRONIC_PROVISION_ALLOCATION_POOL=${IRONIC_PROVISION_ALLOCATION_POOL:-'start=10.0.5.10,end=10.0.5.100'}
+
+ # With multinode case all ironic-conductors should have IP from provisioning network.
+ # IRONIC_PROVISION_SUBNET_GATEWAY - is configured on primary node.
+ # Ironic provision subnet gateway.
+ IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-'10.0.5.1'}
+ IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'10.0.5.2'}
+
+ # Ironic provision subnet prefix
+ # Example: IRONIC_PROVISION_SUBNET_PREFIX=10.0.5.0/24
+ IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'10.0.5.0/24'}
+else
+ IRONIC_IP_VERSION='6'
+ # NOTE(TheJulia): The IPv6 address devstack has identified is the
+ # local loopback. This does not really serve our purposes very
+ # well, so we need to setup something that will work.
+ if [[ "$HOST_IPV6" == '::1' ]] || [[ ! $HOST_IPV6 =~ "::" ]]; then
+ # We setup an address elsewhere because the service address of
+ # loopback cannot be used for v6 testing.
+ IRONIC_HOST_IPV6='fc00::1'
+ else
+ IRONIC_HOST_IPV6=$SERVICE_HOST
+ fi
+ IRONIC_PROVISION_SUBNET_GATEWAY=${IRONIC_PROVISION_SUBNET_GATEWAY:-'fc01::1'}
+ IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'fc01::2'}
+ IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'fc01::/64'}
+ IRONIC_TFTPSERVER_IP=$IRONIC_HOST_IPV6
+fi
+
+IRONIC_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
# Ironic provision subnet name.
# This is only used if IRONIC_PROVISION_NETWORK_NAME has been set.
@@ -446,6 +513,8 @@ IRONIC_PROVISION_SUBNET_SUBNODE_IP=${IRONIC_PROVISION_SUBNET_SUBNODE_IP:-'10.0.5
IRONIC_PROVISION_SUBNET_PREFIX=${IRONIC_PROVISION_SUBNET_PREFIX:-'10.0.5.0/24'}
if [[ "$HOST_TOPOLOGY_ROLE" == "primary" ]]; then
+ # Some CI jobs get triggered without a HOST_TOPOLOGY_ROLE
+ # If so, none of this logic is, or needs to be executed.
IRONIC_TFTPSERVER_IP=$IRONIC_PROVISION_SUBNET_GATEWAY
IRONIC_HTTP_SERVER=$IRONIC_PROVISION_SUBNET_GATEWAY
fi
@@ -454,6 +523,8 @@ if [[ "$HOST_TOPOLOGY_ROLE" == "subnode" ]]; then
IRONIC_HTTP_SERVER=$IRONIC_PROVISION_SUBNET_SUBNODE_IP
fi
+# NOTE(TheJulia): Last catch for this being set or not.
+# should only work for v4.
IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$IRONIC_TFTPSERVER_IP}
# Port that must be permitted for iSCSI connections to be
@@ -488,10 +559,19 @@ TEMPEST_BAREMETAL_MIN_MICROVERSION=${TEMPEST_BAREMETAL_MIN_MICROVERSION:-}
# Define baremetal max_microversion in tempest config. No default value means that it is picked from tempest.
TEMPEST_BAREMETAL_MAX_MICROVERSION=${TEMPEST_BAREMETAL_MAX_MICROVERSION:-}
+# TODO(TheJulia): This PHYSICAL_NETWORK needs to be refactored in
+# our devstack plugin. It is used by the neutron-legacy integration,
+# however they want to name the new variable for the current neutron
+# plugin NEUTRON_PHYSICAL_NETWORK. For now we'll do some magic and
+# change it later once we migrate our jobs.
+
+PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-${PHYSICAL_NETWORK:-}}
+
# get_pxe_boot_file() - Get the PXE/iPXE boot file path
function get_pxe_boot_file {
local pxe_boot_file
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
+ # TODO(TheJulia): This is not UEFI safe.
if is_ubuntu; then
pxe_boot_file=/usr/lib/ipxe/undionly.kpxe
elif is_fedora || is_suse; then
@@ -914,13 +994,11 @@ function cleanup_redfish {
function install_redfish {
# TODO(lucasagomes): Use Apache WSGI instead of gunicorn
- gunicorn=gunicorn
if is_ubuntu; then
- if python3_enabled; then
- gunicorn=${gunicorn}3
- fi
- install_package $gunicorn
+ install_package gunicorn3
+ elif is_fedora; then
+ install_package python3-gunicorn
else
pip_install_gr "gunicorn"
fi
@@ -934,11 +1012,14 @@ function install_redfish {
local cmd
- cmd=$(which $gunicorn)
+ cmd=$(which gunicorn3)
cmd+=" sushy_tools.emulator.main:app"
cmd+=" --bind ${HOST_IP}:${IRONIC_REDFISH_EMULATOR_PORT}"
cmd+=" --env FLASK_DEBUG=1"
cmd+=" --env SUSHY_EMULATOR_CONFIG=${IRONIC_REDFISH_EMULATOR_CONFIG}"
+ # NOTE(dtantsur): handling virtual media ISO can take time, so increase
+ # both concurrency and the worker timeout.
+ cmd+=" --workers 2 --threads 2 --timeout 90"
write_user_unit_file $IRONIC_REDFISH_EMULATOR_SYSTEMD_SERVICE "$cmd" "" "$STACK_USER"
@@ -1008,28 +1089,7 @@ function install_ironic {
# Replace the default virtio PXE ROM in QEMU with an EFI capable
# one. The EFI ROM should work on with both boot modes, Legacy
# BIOS and UEFI.
- if is_ubuntu; then
- # (rpittau) in bionic the UEFI in the ovmf 0~20180205.c0d9813c-2
- # package is broken: EFI v2.70 by EDK II
- # As a workaround, here we download and install the old working
- # version from the multiverse repository: EFI v2.60 by EDK II
- # Bug reference:
- # https://bugs.launchpad.net/ubuntu/+source/edk2/+bug/1821729
- local temp_deb
- temp_deb="$(mktemp)"
- wget http://archive.ubuntu.com/ubuntu/pool/multiverse/e/edk2/ovmf_0~20160408.ffea0a2c-2_all.deb -O "$temp_deb"
- sudo dpkg -i "$temp_deb"
- rm -f "$temp_deb"
-
- # NOTE(TheJulia): This no longer seems required as the ovmf images
- # DO correctly network boot. The effect of this is making the
- # default boot loader iPXE, which is not always desired nor
- # realistic for hardware in the field.
- # If it is after Train, we should likely just delete the lines
- # below and consider the same for Fedora.
- # sudo rm /usr/share/qemu/pxe-virtio.rom
- # sudo ln -s /usr/lib/ipxe/qemu/efi-virtio.rom /usr/share/qemu/pxe-virtio.rom
- elif is_fedora; then
+ if is_fedora; then
sudo rm /usr/share/qemu/pxe-virtio.rom
sudo ln -s /usr/share/ipxe.efi/1af41000.rom /usr/share/qemu/pxe-virtio.rom
fi
@@ -1200,6 +1260,33 @@ function configure_ironic_rescue_network {
}
function configure_ironic_provision_network {
+ if [[ "$IP_VERSION" == "6" ]]; then
+ # NOTE(TheJulia): Ideally we should let this happen
+ # with our global address, but iPXE seems to have in
+ # consistant behavior in this configuration with devstack.
+ # so we will setup a dummy interface and use that.
+ sudo ip link add magicv6 type dummy
+ sudo ip link set dev magicv6 up
+ sudo ip -6 addr add $IRONIC_HOST_IPV6/64 dev magicv6
+ fi
+ if is_service_enabled neutron-api; then
+ if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
+ sudo sysctl -w net.ipv6.conf.all.proxy_ndp=1
+ configure_neutron_l3_lower_v6_ra
+ fi
+ # Neutron agent needs to be pre-configured before proceeding down the
+ # path of configuring the provision network. This was done for us in
+ # the legacy neutron code.
+ neutron_plugin_configure_plugin_agent
+ # This prior step updates configuration related to physnet mappings,
+ # and we must restart neutron as a result
+ stop_neutron
+ sleep 15
+ # By default, upon start, neutron tries to create the networks...
+ NEUTRON_CREATE_INITIAL_NETWORKS=False
+ start_neutron_api
+ start_neutron
+ fi
# This is only called if IRONIC_PROVISION_NETWORK_NAME has been set and
# means we are using multi-tenant networking.
local net_id
@@ -1225,12 +1312,28 @@ function configure_ironic_provision_network {
fi
local subnet_id
- subnet_id="$(openstack subnet create --ip-version 4 \
- ${IRONIC_PROVISION_ALLOCATION_POOL:+--allocation-pool $IRONIC_PROVISION_ALLOCATION_POOL} \
- ${net_segment_id:+--network-segment $net_segment_id} \
- $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \
- --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \
- --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)"
+ if [[ "$IRONIC_IP_VERSION" == '4' ]]; then
+ subnet_id="$(openstack subnet create --ip-version 4 \
+ ${IRONIC_PROVISION_ALLOCATION_POOL:+--allocation-pool $IRONIC_PROVISION_ALLOCATION_POOL} \
+ ${net_segment_id:+--network-segment $net_segment_id} \
+ $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \
+ --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \
+ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)"
+ else
+ subnet_id="$(openstack subnet create --ip-version 6 \
+ --ipv6-address-mode dhcpv6-stateful \
+ --ipv6-ra-mode dhcpv6-stateful \
+ --dns-nameserver 2001:4860:4860::8888 \
+ ${net_segment_id:+--network-segment $net_segment_id} \
+ $IRONIC_PROVISION_PROVIDER_SUBNET_NAME \
+ --gateway $IRONIC_PROVISION_SUBNET_GATEWAY --network $net_id \
+ --subnet-range $IRONIC_PROVISION_SUBNET_PREFIX -f value -c id)"
+ # NOTE(TheJulia): router must be attached to the subnet for RAs.
+ openstack router add subnet $IRONIC_ROUTER_NAME $subnet_id
+
+ # We're going to be using this router of public access to tenant networks
+ PUBLIC_ROUTER_ID=$(openstack router show -c id -f value $IRONIC_ROUTER_NAME)
+ fi
die_if_not_set $LINENO subnet_id "Failure creating SUBNET_ID for $IRONIC_PROVISION_NETWORK_NAME"
@@ -1246,14 +1349,22 @@ function configure_ironic_provision_network {
# Set provision network GW on physical interface
# Add vlan on br interface in case of IRONIC_PROVISION_PROVIDER_NETWORK_TYPE==vlan
# othervise assign ip to br interface directly.
- if [[ "$IRONIC_PROVISION_PROVIDER_NETWORK_TYPE" == "vlan" ]]; then
- sudo ip link add link $OVS_PHYSICAL_BRIDGE name $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID type vlan id $IRONIC_PROVISION_SEGMENTATION_ID
- sudo ip link set dev $OVS_PHYSICAL_BRIDGE up
- sudo ip link set dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID up
- sudo ip addr add dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID $ironic_provision_network_ip/$provision_net_prefix
+ sudo ip link set dev $OVS_PHYSICAL_BRIDGE up
+ if [[ "$IRONIC_IP_VERSION" == "4" ]]; then
+ if [[ "$IRONIC_PROVISION_PROVIDER_NETWORK_TYPE" == "vlan" ]]; then
+ sudo ip link add link $OVS_PHYSICAL_BRIDGE name $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID type vlan id $IRONIC_PROVISION_SEGMENTATION_ID
+ sudo ip link set dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID up
+ sudo ip -$IRONIC_IP_VERSION addr add dev $OVS_PHYSICAL_BRIDGE.$IRONIC_PROVISION_SEGMENTATION_ID $ironic_provision_network_ip/$provision_net_prefix
+ else
+ sudo ip -$IRONIC_IP_VERSION addr add dev $OVS_PHYSICAL_BRIDGE $ironic_provision_network_ip/$provision_net_prefix
+ fi
else
- sudo ip link set dev $OVS_PHYSICAL_BRIDGE up
- sudo ip addr add dev $OVS_PHYSICAL_BRIDGE $ironic_provision_network_ip/$provision_net_prefix
+ # Turn on the external/integration bridges, for IPV6.
+ sudo ip link set dev br-ex up
+ sudo ip link set dev br-int up
+
+ sudo ip6tables -I FORWARD -i brbm -j LOG || true
+ sudo ip6tables -I FORWARD -i br-ex -j LOG || true
fi
iniset $IRONIC_CONF_FILE neutron provisioning_network $IRONIC_PROVISION_NETWORK_NAME
@@ -1269,6 +1380,10 @@ function cleanup_ironic_provision_network {
done
}
+function configure_neutron_l3_lower_v6_ra {
+ iniset $Q_L3_CONF_FILE DEFAULT min_rtr_adv_interval 5
+}
+
# configure_ironic() - Set config files, create data dirs, etc
function configure_ironic {
configure_ironic_dirs
@@ -1292,6 +1407,18 @@ function configure_ironic {
# Configure JSON RPC backend
iniset $IRONIC_CONF_FILE DEFAULT rpc_transport $IRONIC_RPC_TRANSPORT
iniset $IRONIC_CONF_FILE json_rpc port $IRONIC_JSON_RPC_PORT
+ if [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" != "" ]]; then
+ iniset $IRONIC_CONF_FILE json_rpc auth_strategy $IRONIC_JSON_RPC_AUTH_STRATEGY
+ fi
+ if [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" == "http_basic" ]]; then
+ iniset $IRONIC_CONF_FILE json_rpc username myName
+ iniset $IRONIC_CONF_FILE json_rpc password myPassword
+ # json-rpc auth file with bcrypt hash of myPassword
+ echo 'myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.JETVCWBkc32C63UP2aYrGoYOEpbJm' > /etc/ironic/htpasswd-json-rpc
+ fi
+ if [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" == "" ]] || [[ "$IRONIC_JSON_RPC_AUTH_STRATEGY" == "keystone" ]]; then
+ configure_client_for json_rpc
+ fi
# Set fast track options
iniset $IRONIC_CONF_FILE deploy fast_track $IRONIC_DEPLOY_FAST_TRACK
@@ -1303,6 +1430,9 @@ function configure_ironic {
iniset_rpc_backend ironic $IRONIC_CONF_FILE
fi
+ # Set IP version
+ iniset $IRONIC_CONF_FILE pxe ip_version $IRONIC_IP_VERSION
+
# Configure Ironic conductor, if it was enabled.
if is_service_enabled ir-cond; then
configure_ironic_conductor
@@ -1383,7 +1513,7 @@ EOF
# API specific configuration.
function configure_ironic_api {
iniset $IRONIC_CONF_FILE DEFAULT auth_strategy $IRONIC_AUTH_STRATEGY
- configure_auth_token_middleware $IRONIC_CONF_FILE ironic $IRONIC_AUTH_CACHE_DIR/api
+ configure_keystone_authtoken_middleware $IRONIC_CONF_FILE ironic
if [[ "$IRONIC_USE_WSGI" == "True" ]]; then
iniset $IRONIC_CONF_FILE oslo_middleware enable_proxy_headers_parsing True
@@ -1422,7 +1552,7 @@ function configure_ironic_conductor {
# NOTE(pas-ha) service_catalog section is used to discover
# ironic API endpoint from keystone catalog
- local client_sections="neutron swift glance inspector cinder service_catalog json_rpc nova"
+ local client_sections="neutron swift glance inspector cinder service_catalog nova"
for conf_section in $client_sections; do
configure_client_for $conf_section
done
@@ -1435,7 +1565,7 @@ function configure_ironic_conductor {
ironic_lib_prefix=${GITDIR["ironic-lib"]}
else
# pip uses default python 'data' path
- ironic_lib_prefix=$(python -c "import sysconfig; \
+ ironic_lib_prefix=$(python3 -c "import sysconfig; \
print(sysconfig.get_path('data'))")
# on Centos7 the data is installed to /usr/local
@@ -1480,6 +1610,7 @@ function configure_ironic_conductor {
# specific driver interfaces in DevStack
iniset $IRONIC_CONF_FILE DEFAULT enabled_power_interfaces "snmp"
iniset $IRONIC_CONF_FILE DEFAULT enabled_management_interfaces "noop"
+ iniset $IRONIC_CONF_FILE pxe enable_netboot_fallback True
fi
if is_ansible_deploy_enabled; then
@@ -1512,11 +1643,15 @@ function configure_ironic_conductor {
fi
iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
- iniset $IRONIC_CONF_FILE conductor api_url $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT
+ iniset $IRONIC_CONF_FILE service_catalog endpoint_override "$IRONIC_SERVICE_PROTOCOL://$([[ $IRONIC_HTTP_SERVER =~ : ]] && echo "[$IRONIC_HTTP_SERVER]" || echo $IRONIC_HTTP_SERVER)/baremetal"
if [[ -n "$IRONIC_CALLBACK_TIMEOUT" ]]; then
iniset $IRONIC_CONF_FILE conductor deploy_callback_timeout $IRONIC_CALLBACK_TIMEOUT
fi
- iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP
+ if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
+ iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_HOST_IPV6
+ else
+ iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP
+ fi
iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
if [[ -n "$IRONIC_PXE_BOOT_RETRY_TIMEOUT" ]]; then
@@ -1587,12 +1722,10 @@ function configure_ironic_conductor {
local pxebin
pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
uefipxebin=`basename $(get_uefi_ipxe_boot_file)`
- iniset $IRONIC_CONF_FILE pxe pxe_config_template '$pybasedir/drivers/modules/ipxe_config.template'
- iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
- iniset $IRONIC_CONF_FILE pxe uefi_pxe_config_template '$pybasedir/drivers/modules/ipxe_config.template'
- iniset $IRONIC_CONF_FILE pxe uefi_pxe_bootfile_name $uefipxebin
+ iniset $IRONIC_CONF_FILE pxe ipxe_bootfile_name $pxebin
+ iniset $IRONIC_CONF_FILE pxe uefi_ipxe_bootfile_name $uefipxebin
iniset $IRONIC_CONF_FILE deploy http_root $IRONIC_HTTP_DIR
- iniset $IRONIC_CONF_FILE deploy http_url "http://$IRONIC_HTTP_SERVER:$IRONIC_HTTP_PORT"
+ iniset $IRONIC_CONF_FILE deploy http_url "http://$([[ $IRONIC_HTTP_SERVER =~ : ]] && echo "[$IRONIC_HTTP_SERVER]" || echo $IRONIC_HTTP_SERVER):$IRONIC_HTTP_PORT"
if [[ "$IRONIC_IPXE_USE_SWIFT" == "True" ]]; then
iniset $IRONIC_CONF_FILE pxe ipxe_use_swift True
fi
@@ -1713,7 +1846,7 @@ function start_ironic_api {
fi
if [[ "$IRONIC_USE_WSGI" == "True" ]]; then
- run_process "ir-api" "$IRONIC_BIN_DIR/uwsgi --procname-prefix ironic-api --ini $IRONIC_UWSGI_CONF"
+ run_process "ir-api" "$(which uwsgi) --procname-prefix ironic-api --ini $IRONIC_UWSGI_CONF"
ironic_url=$service_protocol://$SERVICE_HOST/baremetal
else
run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
@@ -1933,18 +2066,40 @@ SUBSHELL
# Add route here to have connection to VMs during provisioning.
local pub_router_id
local r_net_gateway
- pub_router_id=$(openstack router show $Q_ROUTER_NAME -f value -c id)
- r_net_gateway=$(sudo ip netns exec qrouter-$pub_router_id ip -4 route get 8.8.8.8 |grep dev | awk '{print $7}')
- local replace_range=${SUBNETPOOL_PREFIX_V4}
- if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then
- replace_range=${FIXED_RANGE}
+ local dns_server
+ local replace_range
+ if [[ "$IRONIC_IP_VERSION" == '4' ]]; then
+ dns_server="8.8.8.8"
+ if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then
+ replace_range=${FIXED_RANGE}
+ else
+ replace_range=${SUBNETPOOL_PREFIX_V4}
+ fi
+ else
+ dns_server="2001:4860:4860::8888"
+ if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then
+ replace_range=${FIXED_RANGE_V6}
+ else
+ replace_range=${SUBNETPOOL_PREFIX_V6}
+ fi
fi
+ pub_router_id=$(openstack router show $Q_ROUTER_NAME -f value -c id)
+ # Select the text starting at "src ", and grabbing the following field.
+ r_net_gateway=$(sudo ip netns exec qrouter-$pub_router_id ip -$IRONIC_IP_VERSION route get $dns_server |grep dev | sed s/^.*src\ // |awk '{ print $1 }')
sudo ip route replace $replace_range via $r_net_gateway
fi
# Here is a good place to restart tcpdump to begin capturing packets.
# See: https://docs.openstack.org/devstack/latest/debugging.html
# stop_tcpdump
# start_tcpdump
+
+ if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
+ # route us back through the neutron router!
+ sudo ip -6 route add $IRONIC_PROVISION_SUBNET_PREFIX via $IPV6_ROUTER_GW_IP
+ sudo ip link set dev br-ex up || true
+ # Route back to our test subnet. Static should be safe for a while.
+ sudo ip -6 route add fd00::/8 via $IPV6_ROUTER_GW_IP
+ fi
}
function wait_for_nova_resources {
@@ -2389,13 +2544,22 @@ function configure_iptables {
die_if_module_not_loaded nf_conntrack_tftp
die_if_module_not_loaded nf_nat_tftp
fi
+ ################ NETWORK DHCP
# explicitly allow DHCP - packets are occasionally being dropped here
sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true
# nodes boot from TFTP and callback to the API server listening on $HOST_IP
sudo iptables -I INPUT -d $IRONIC_TFTPSERVER_IP -p udp --dport 69 -j ACCEPT || true
+
+ # dhcpv6 which is the only way to transmit boot options
+ sudo ip6tables -I INPUT -d $IRONIC_HOST_IPV6 -p udp --dport 546:547 --sport 546:547 -j ACCEPT || true
+
+ sudo ip6tables -I INPUT -d $IRONIC_HOST_IPV6 -p udp --dport 69 -j ACCEPT || true
+
+ ################ Webserver/API
# To use named /baremetal endpoint we should open default apache port
if [[ "$IRONIC_USE_WSGI" == "False" ]]; then
sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
+ sudo ip6tables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
# open ironic API on baremetal network
sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
# allow IPA to connect to ironic API on subnode
@@ -2405,7 +2569,9 @@ function configure_iptables {
sudo iptables -I INPUT -d $HOST_IP -p tcp --dport 443 -j ACCEPT || true
# open ironic API on baremetal network
sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 80 -j ACCEPT || true
+ sudo ip6tables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 80 -j ACCEPT || true
sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 443 -j ACCEPT || true
+ sudo ip6tables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport 443 -j ACCEPT || true
fi
if is_deployed_by_agent; then
# agent ramdisk gets instance image from swift
@@ -2415,6 +2581,7 @@ function configure_iptables {
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
sudo iptables -I INPUT -d $IRONIC_HTTP_SERVER -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true
+ sudo ip6tables -I INPUT -d $IRONIC_HOST_IPV6 -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true
fi
if [[ "${IRONIC_STORAGE_INTERFACE}" == "cinder" ]]; then
@@ -2426,6 +2593,7 @@ function configure_iptables {
qrouter=$(sudo ip netns list | grep qrouter | awk '{print $1;}')
if [[ ! -z "$qrouter" ]]; then
sudo ip netns exec $qrouter /sbin/iptables -A PREROUTING -t raw -p udp --dport 69 -j CT --helper tftp
+ sudo ip netns exec $qrouter /sbin/ip6tables -A PREROUTING -t raw -p udp --dport 69 -j CT --helper tftp || true
fi
}
@@ -2436,7 +2604,9 @@ function configure_tftpd {
sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp
sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp
sudo sed -e "s|%MAX_BLOCKSIZE%|$IRONIC_TFTP_BLOCKSIZE|g" -i /etc/xinetd.d/tftp
-
+ if [[ "$IRONIC_IP_VERSION" == '6' ]]; then
+ sudo sed -e "s|IPv4|IPv6|g" -i /etc/xinetd.d/tftp
+ fi
# setup tftp file mapping to satisfy requests at the root (booting) and
# /tftpboot/ sub-dir (as per deploy-ironic elements)
# this section is only for ubuntu and fedora
@@ -2814,6 +2984,26 @@ function ironic_configure_tempest {
if [[ -n "$IRONIC_PING_TIMEOUT" ]]; then
iniset $TEMPEST_CONFIG validation ping_timeout $IRONIC_PING_TIMEOUT
fi
+ if [[ -n "$IRONIC_IP_VERSION" ]]; then
+ iniset $TEMPEST_CONFIG validation ip_version_for_ssh $IRONIC_IP_VERSION
+ fi
+ if [[ -n "$IRONIC_BOOT_MODE" ]]; then
+ iniset $TEMPEST_CONFIG baremetal boot_mode $IRONIC_BOOT_MODE
+ fi
+ if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
+ # No FIPs in V6 and we dynamically create networks...
+ # network_for_ssh is defaulted to public
+ iniset $TEMPEST_CONFIG validation network_for_ssh
+ iniset $TEMPEST_CONFIG validation connect_method fixed
+ iniset $TEMPEST_CONFIG network ipv6-private-subnet
+ if [ -n "${PUBLIC_ROUTER_ID:-}" ] ; then
+ # For IPv6 tempest is going to use a precreated router for
+ # access to the tenant networks (as we have set up routes to it)
+ # it needs to know the ID of the router and be admin to attach to it
+ iniset $TEMPEST_CONFIG network public_router_id $PUBLIC_ROUTER_ID
+ iniset $TEMPEST_CONFIG auth tempest_roles "admin"
+ fi
+ fi
if is_service_enabled nova; then
local bm_flavor_id
@@ -2856,7 +3046,11 @@ function ironic_configure_tempest {
iniset $TEMPEST_CONFIG baremetal partition_image_ref $image_uuid
fi
- iniset $TEMPEST_CONFIG baremetal whole_disk_image_url "http://$IRONIC_HTTP_SERVER:$IRONIC_HTTP_PORT/${IRONIC_WHOLEDISK_IMAGE_NAME}.img"
+ if [[ "$IRONIC_IP_VERSION" == "6" ]]; then
+ iniset $TEMPEST_CONFIG baremetal whole_disk_image_url "http://$IRONIC_HOST_IPV6:$IRONIC_HTTP_PORT/${IRONIC_WHOLEDISK_IMAGE_NAME}.img"
+ else
+ iniset $TEMPEST_CONFIG baremetal whole_disk_image_url "http://$IRONIC_HTTP_SERVER:$IRONIC_HTTP_PORT/${IRONIC_WHOLEDISK_IMAGE_NAME}.img"
+ fi
iniset $TEMPEST_CONFIG baremetal whole_disk_image_checksum $(md5sum $FILES/${IRONIC_WHOLEDISK_IMAGE_NAME}.img)
# NOTE(dtantsur): keep this option here until the defaults change in
@@ -2890,6 +3084,7 @@ function ironic_configure_tempest {
if [[ $IRONIC_VM_VOLUME_COUNT -gt 1 ]]; then
iniset $TEMPEST_CONFIG baremetal_feature_enabled software_raid True
+ iniset $TEMPEST_CONFIG baremetal_feature_enabled deploy_time_raid True
fi
# Enabled features
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 0fc1654b2..f49c63d38 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -36,6 +36,7 @@ if is_service_enabled ir-api ir-cond; then
if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" && "$IRONIC_IS_HARDWARE" == "False" ]]; then
echo_summary "Precreating bridge: $IRONIC_VM_NETWORK_BRIDGE"
+ install_package openvswitch-switch
sudo ovs-vsctl -- --may-exist add-br $IRONIC_VM_NETWORK_BRIDGE
fi
diff --git a/devstack/tools/ironic/scripts/configure-vm.py b/devstack/tools/ironic/scripts/configure-vm.py
index a1b3a65df..d363817d4 100755
--- a/devstack/tools/ironic/scripts/configure-vm.py
+++ b/devstack/tools/ironic/scripts/configure-vm.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
diff --git a/devstack/tools/ironic/templates/vm.xml b/devstack/tools/ironic/templates/vm.xml
index d2b3c0071..f5c59b54e 100644
--- a/devstack/tools/ironic/templates/vm.xml
+++ b/devstack/tools/ironic/templates/vm.xml
@@ -3,7 +3,7 @@
<memory unit='KiB'>{{ memory }}</memory>
<vcpu>{{ cpus }}</vcpu>
<os>
- <type arch='{{ arch }}' machine='pc-1.0'>hvm</type>
+ <type arch='{{ arch }}' machine='pc'>hvm</type>
{% if bootdev == 'network' and not uefi_loader %}
<boot dev='{{ bootdev }}'/>
{% endif %}
@@ -34,15 +34,8 @@
<disk type='file' device='disk'>
<driver name='qemu' type='{{ disk_format }}' cache='unsafe'/>
<source file='{{ imagefile }}'/>
- <!-- NOTE(lucasagomes): The virtio disk controller apparently does
- not work with UEFI, so let's use IDE. -->
- {% if uefi_loader %}
- <target dev='vd{{ letter }}' bus='ide'/>
- <address type='drive' controller='0' bus='0' target='0' unit='{{ loop.index }}'/>
- {% else %}
<target dev='vd{{ letter }}' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x0{{ letter }}' function='0x0'/>
- {% endif %}
</disk>
{% endfor %}
<controller type='ide' index='0'>
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 28e33b775..d7394a9e6 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,9 +1,7 @@
-mock>=3.0.0 # BSD
-openstackdocstheme>=1.31.2 # Apache-2.0
+openstackdocstheme>=2.2.0 # Apache-2.0
os-api-ref>=1.4.0 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
-sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD
+reno>=3.1.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
sphinxcontrib-apidoc>=0.2.0 # BSD
-sphinxcontrib-pecanwsme>=0.10.0 # Apache-2.0
sphinxcontrib-seqdiag>=0.8.4 # BSD
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/_exts/automated_steps.py b/doc/source/_exts/automated_steps.py
index 4cf434974..ff226ec2f 100644
--- a/doc/source/_exts/automated_steps.py
+++ b/doc/source/_exts/automated_steps.py
@@ -30,6 +30,8 @@ from ironic.common import driver_factory
LOG = logging.getLogger(__name__)
+# Enable this locally if you need debugging output
+DEBUG = False
def _list_table(add, headers, data, title='', columns=None):
"""Build a list-table directive.
@@ -85,8 +87,9 @@ def _init_steps_by_driver():
# the methods of the class.
for interface_name in sorted(driver_factory.driver_base.ALL_INTERFACES):
- LOG.info('[{}] probing available plugins for interface {}'.format(
- __name__, interface_name))
+ if DEBUG:
+ LOG.info('[{}] probing available plugins for interface {}'.format(
+ __name__, interface_name))
loader = stevedore.ExtensionManager(
'ironic.hardware.interfaces.{}'.format(interface_name),
@@ -110,8 +113,9 @@ def _init_steps_by_driver():
'interface': interface_name,
'doc': _format_doc(inspect.getdoc(method)),
}
- LOG.info('[{}] interface {!r} driver {!r} STEP {}'.format(
- __name__, interface_name, plugin.name, step))
+ if DEBUG:
+ LOG.info('[{}] interface {!r} driver {!r} STEP {}'.format(
+ __name__, interface_name, plugin.name, step))
steps.append(step)
if steps:
diff --git a/doc/source/admin/agent-power.rst b/doc/source/admin/agent-power.rst
new file mode 100644
index 000000000..b948733ee
--- /dev/null
+++ b/doc/source/admin/agent-power.rst
@@ -0,0 +1,76 @@
+=================================
+Deploying without BMC Credentials
+=================================
+
+The Bare Metal service usually requires BMC credentials for all provisioning
+operations. Starting with the Victoria release series there is limited support
+for inspection, cleaning and deployments without the credentials.
+
+.. warning::
+ This feature is experimental and only works in a limited scenario. When
+ using it, you have to be prepared to provide BMC credentials in case of
+ a failure or any non-supported actions.
+
+How it works
+============
+
+The expected workflow is as follows:
+
+#. The node is discovered by manually powering it on and gets the
+ `manual-management` hardware type and `agent` power interface.
+
+ If discovery is not used, a node can be enrolled through the API and then
+ powered on manually.
+
+#. The operator moves the node to `manageable`. It works because the `agent`
+ power only requires to be able to connect to the agent.
+
+#. The operator moves the node to `available`. Cleaning happens normally via
+ the already running agent. If reboot is needed, it is done by telling the
+ agent to reboot the node in-band.
+
+#. A user deploys the node. Deployment happens normally via the already
+ running agent.
+
+#. In the end of the deployment, the node is rebooted via the reboot command
+ instead of power off+on.
+
+Enabling
+========
+
+:doc:`fast-track` is a requirement for this feature to work. After enabling it,
+adds the ``agent`` power interface and the ``manual-management`` hardware type
+to the enabled list:
+
+.. code-block:: ini
+
+ [DEFAULT]
+ enabled_hardware_types = manual-management
+ enabled_management_interfaces = noop
+ enabled_power_interfaces = agent
+
+ [deploy]
+ fast_track = true
+
+As usual with the ``noop`` management, enable the networking boot fallback:
+
+.. code-block:: ini
+
+ [pxe]
+ enable_netboot_fallback = true
+
+If using discovery, :ironic-inspector-doc:`configure discovery in
+ironic-inspector <user/usage.html#discovery>` with the default driver set
+to ``manual-management``.
+
+Limitations
+===========
+
+* Only the ``noop`` network interface is supported.
+
+* Undeploy and rescue are not supported, you need to add BMC credentials first.
+
+* If any errors happens in the process, recovery will likely require BMC
+ credentials.
+
+* Only rebooting is possible through the API, power on/off commands will fail.
diff --git a/doc/source/admin/agent-token.rst b/doc/source/admin/agent-token.rst
index 90528bc38..4c2fd0e34 100644
--- a/doc/source/admin/agent-token.rst
+++ b/doc/source/admin/agent-token.rst
@@ -43,7 +43,7 @@ It remains available to the conductors, and is stored in memory of the
With the token is available in memory in the agent, the token is embedded with
``heartbeat`` operations to the ironic API endpoint. This enables the API to
authenticate the heartbeat request, and refuse "heartbeat" requests from the
-``ironic-python-agent``. With the ``Ussuri`` release, the confiuration option
+``ironic-python-agent``. With the ``Ussuri`` release, the configuration option
``[DEFAULT]require_agent_token`` can be set ``True`` to explicitly require
token use.
diff --git a/doc/source/admin/boot-from-volume.rst b/doc/source/admin/boot-from-volume.rst
index a33888fcd..fd89360a6 100644
--- a/doc/source/admin/boot-from-volume.rst
+++ b/doc/source/admin/boot-from-volume.rst
@@ -177,7 +177,7 @@ to a remote boot from volume target, so that also must be ensured by
the user in advance.
Records of volume targets are removed upon the node being undeployed,
-and as such are not presistent across deployments.
+and as such are not persistent across deployments.
Cinder Multi-attach
-------------------
diff --git a/doc/source/admin/drivers.rst b/doc/source/admin/drivers.rst
index 627702cf2..267409403 100644
--- a/doc/source/admin/drivers.rst
+++ b/doc/source/admin/drivers.rst
@@ -104,6 +104,41 @@ not compatible with them. There are three ways to deal with this situation:
.. note:: This feature is available starting with ironic 11.1.0 (Rocky
series, API version 1.45).
+.. _static-boot-order:
+
+Static boot order configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Some hardware is known to misbehave when changing the boot device through the
+BMC. To work around it you can use the ``noop`` management interface
+implementation with the ``ipmi`` and ``redfish`` hardware types. In this case
+the Bare Metal service will not change the boot device for you, leaving
+the pre-configured boot order.
+
+For example, in case of the :ref:`pxe-boot`:
+
+#. Via any available means configure the boot order on the node as follows:
+
+ #. Boot from PXE/iPXE on the provisioning NIC.
+
+ .. warning::
+ If it is not possible to limit network boot to only provisioning NIC,
+ make sure that no other DHCP/PXE servers are accessible by the node.
+
+ #. Boot from hard drive.
+
+#. Make sure the ``noop`` management interface is enabled, for example:
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ enabled_hardware_types = ipmi,redfish
+ enabled_management_interfaces = ipmitool,redfish,noop
+
+#. Change the node to use the ``noop`` management interface::
+
+ openstack baremetal node set <NODE> --management-interface noop
+
Unsupported drivers
-------------------
diff --git a/doc/source/admin/drivers/ibmc.rst b/doc/source/admin/drivers/ibmc.rst
index c39bf05df..86d89c057 100644
--- a/doc/source/admin/drivers/ibmc.rst
+++ b/doc/source/admin/drivers/ibmc.rst
@@ -2,10 +2,6 @@
iBMC driver
===============
-.. warning::
- The ``ibmc`` driver has been deprecated due to a lack of a functioning
- third party CI and will be removed in the Victoria development cycle.
-
Overview
========
@@ -13,6 +9,13 @@ The ``ibmc`` driver is targeted for Huawei V5 series rack server such as
2288H V5, CH121 V5. The iBMC hardware type enables the user to take advantage
of features of `Huawei iBMC`_ to control Huawei server.
+The ``ibmc`` hardware type supports the following Ironic interfaces:
+
+* Management Interface: Boot device management
+* Power Interface: Power management
+* `RAID Interface`_: RAID controller and disk management
+* `Vendor Interface`_: ibmc passthru interfaces
+
Prerequisites
=============
@@ -32,9 +35,10 @@ Enabling the iBMC driver
[DEFAULT]
...
- enabled_hardware_types = ibmc,ipmi
- enabled_power_interfaces = ibmc,ipmitool
- enabled_management_interfaces = ibmc,ipmitool
+ enabled_hardware_types = ibmc
+ enabled_power_interfaces = ibmc
+ enabled_management_interfaces = ibmc
+ enabled_raid_interfaces = ibmc
enabled_vendor_interfaces = ibmc
#. Restart the ironic conductor service::
@@ -95,19 +99,218 @@ a node with the ``ibmc`` driver. For example:
For more information about enrolling nodes see :ref:`enrollment`
in the install guide.
-Features of the ``ibmc`` hardware type
-=========================================
+RAID Interface
+==============
-Query boot up sequence
-^^^^^^^^^^^^^^^^^^^^^^
+Currently, only RAID controller which supports OOB management can be managed.
-The ``ibmc`` hardware type can query current boot up sequence from the
-bare metal node
+See :doc:`/admin/raid` for more information on Ironic RAID support.
+
+The following properties are supported by the iBMC raid interface
+implementation, ``ibmc``:
+
+Mandatory properties
+--------------------
+
+* ``size_gb``: Size in gigabytes (integer) for the logical disk. Use ``MAX`` as
+ ``size_gb`` if this logical disk is supposed to use the rest of the space
+ available.
+* ``raid_level``: RAID level for the logical disk. Valid values are
+ ``JBOD``, ``0``, ``1``, ``5``, ``6``, ``1+0``, ``5+0`` and ``6+0``. And it
+ is possible that some RAID controllers can only support a subset RAID
+ levels.
+
+.. NOTE::
+ RAID level ``2`` is not supported by ``iBMC`` driver.
+
+Optional properties
+-------------------
+
+* ``is_root_volume``: Optional. Specifies whether this disk is a root volume.
+ By default, this is ``False``.
+* ``volume_name``: Optional. Name of the volume to be created. If this is not
+ specified, it will be N/A.
+
+Backing physical disk hints
+---------------------------
+
+See :doc:`/admin/raid` for more information on backing disk hints.
+
+These are machine-independent properties. The hints are specified for each
+logical disk to help Ironic find the desired disks for RAID configuration.
+
+* ``share_physical_disks``
+* ``disk_type``
+* ``interface_type``
+* ``number_of_physical_disks``
+
+Backing physical disks
+----------------------
+
+These are HUAWEI RAID controller dependent properties:
+
+* ``controller``: Optional. Supported values are: RAID storage id,
+ RAID storage name or RAID controller name. If a bare metal server have more
+ than one controller, this is mandatory. Typical values would look like:
+
+ * RAID Storage Id: ``RAIDStorage0``
+ * RAID Storage Name: ``RAIDStorage0``
+ * RAID Controller Name: ``RAID Card1 Controller``.
+
+* ``physical_disks``: Optional. Supported values are: disk-id, disk-name or
+ disk serial number. Typical values for hdd disk would look like:
+
+ * Disk Id: ``HDDPlaneDisk0``
+ * Disk Name: ``Disk0``.
+ * Disk SerialNumber: ``38DGK77LF77D``
+
+Delete RAID configuration
+-------------------------
+
+For ``delete_configuration`` step, ``ibmc`` will do:
+
+* delete all logical disks
+* delete all hot-spare disks
+
+Logical disks creation priority
+-------------------------------
+
+Logical Disks creation priority based on three properties:
+
+* ``share_physical_disks``
+* ``physical_disks``
+* ``size_gb``
+
+The logical disks creation priority strictly follow the table below, if
+multiple logical disks have the same priority, then they will be created with
+the same order in ``logical_disks`` array.
+
+==================== ========================== =========
+Share physical disks Specified Physical Disks Size
+==================== ========================== =========
+no yes int|max
+no no int
+yes yes int
+yes yes max
+yes no int
+yes no max
+no no max
+==================== ========================== =========
+
+Physical disks choice strategy
+------------------------------
+
+.. note::
+ physical-disk-group: a group of physical disks which have been used by some
+ logical-disks with same RAID level.
+
+
+* If no ``physical_disks`` are specified, the "waste least" strategy will be
+ used to choose the physical disks.
+
+ * waste least disk capacity: when using disks with different capacity, it
+ will cause a waste of disk capacity. This is to avoid with highest
+ priority.
+ * using least total disk capacity: for example, we can create 400G RAID 5
+ with both 5 100G-disks and 3 200G-disks. 5 100G disks is a better
+ strategy because it uses a 500G capacity totally. While 3 200G-disks
+ are 600G totally.
+ * using least disk count: finally, if waste capacity and total disk
+ capacity are both the same (it rarely happens?), we will choose the one
+ with the minimum number of disks.
+
+* when ``share_physical_disks`` option is present, ``ibmc`` driver will
+ create logical disk upon existing physical-disk-group list first. Only
+ when no existing physical-disk-group matches, then it chooses unused
+ physical disks with same strategy described above. When multiple exists
+ physical-disk-groups matches, it will use "waste least" strategy too,
+ the bigger capacity left the better. For example, to create a logical disk
+ shown below on a ``ibmc`` server which has two RAID5 logical disks already.
+ And the shareable capacity of this two logical-disks are 500G and 300G,
+ then ``ibmc`` driver will choose the second one.
+
+ .. code-block:: json
+
+ {
+ "logical_disks": [
+ {
+ "controller": "RAID Card1 Controller",
+ "raid_level": "5",
+ "size_gb": 100,
+ "share_physical_disks": true
+ }
+ ]
+ }
+
+ And the ``ibmc`` server has two RAID5 logical disks already.
+
+* When ``size_gb`` is set to ``MAX``, ``ibmc`` driver will auto work through
+ all possible cases and choose the "best" solution which has the biggest
+ capacity and use least capacity. For example: to create a RAID 5+0 logical
+ disk with MAX size in a server has 9 200G-disks, it will finally choose
+ "8 disks + span-number 2" but not "9 disks + span-number 3". Although they
+ both have 1200G capacity totally, but the former uses only 8 disks and the
+ latter uses 9 disks. If you want to choose the latter solution, you can
+ specified the disk count to use by adding ``number_of_physical_disks``
+ option.
+
+ .. code-block:: json
+
+ {
+ "logical_disks": [
+ {
+ "controller": "RAID Card1 Controller",
+ "raid_level": "5+0",
+ "size_gb": "MAX"
+ }
+ ]
+ }
+
+
+Examples
+--------
+
+In a typical scenario we may want to create:
+ * RAID 5, 500G, root OS volume with 3 disks
+ * RAID 5, rest available space, data volume with rest disks
+
+.. code-block:: json
+
+ {
+ "logical_disks": [
+ {
+ "volume_name": "os_volume",
+ "controller": "RAID Card1 Controller",
+ "is_root_volume": "True",
+ "physical_disks": [
+ "Disk0",
+ "Disk1",
+ "Disk2"
+ ],
+ "raid_level": "5",
+ "size_gb": "500"
+ },
+ {
+ "volume_name": "data_volume",
+ "controller": "RAID Card1 Controller",
+ "raid_level": "5",
+ "size_gb": "MAX"
+ }
+ ]
+ }
+
+Vendor Interface
+=========================================
+
+The ``ibmc`` hardware type provides vendor passthru interfaces shown below:
-.. code-block:: bash
- openstack baremetal node passthru call --http-method GET \
- <node id or node name> boot_up_seq
+======================== ============ ======================================
+Method Name HTTP Method Description
+======================== ============ ======================================
+boot_up_seq GET Query boot up sequence
+get_raid_controller_list GET Query RAID controller summary info
+======================== ============ ======================================
PXE Boot and iSCSI Deploy Process with Ironic Standalone Environment
diff --git a/doc/source/admin/drivers/idrac.rst b/doc/source/admin/drivers/idrac.rst
index f0eac3697..796b09edb 100644
--- a/doc/source/admin/drivers/idrac.rst
+++ b/doc/source/admin/drivers/idrac.rst
@@ -227,7 +227,7 @@ BMC would be as follows::
},
{
"name": "SriovGlobalEnable",
- "value": "Enabled
+ "value": "Enabled"
}
]
}
@@ -235,8 +235,8 @@ BMC would be as follows::
]
}
-To see all the available BIOS parameters on a node with iDRAC BMC, and also
-for additional details of BIOS configuration, see :doc:`/admin/bios`.
+See the `Known Issues`_ for a known issue with ``factory_reset`` clean step.
+For additional details of BIOS configuration, see :doc:`/admin/bios`.
Inspect Interface
=================
@@ -624,6 +624,22 @@ into maintenance mode in Ironic. This issue can be worked around by changing
the Ironic power state poll interval to 70 seconds. See
``[conductor]sync_power_state_interval`` in ``/etc/ironic/ironic.conf``.
+PXE reset with "factory_reset" BIOS clean step
+----------------------------------------------
+
+When using the ``UEFI boot mode``` with non-default PXE interface, the factory
+reset can cause the PXE interface to be reset to default, which doesn't allow
+the server to PXE boot for any further operations. This can cause a
+``clean_failed`` state on the node or ``deploy_failed`` if you attempt to
+deploy a node after this step. For now, the only solution is for the operator
+to manually restore the PXE settings of the server for it to PXE boot again,
+properly.
+The problem is caused by the fact that with the ``UEFI boot mode``, the
+``idrac`` uses BIOS settings to manage PXE configuration. This is not the case
+with the ``BIOS boot mode`` where the PXE configuration is handled as a
+configuration job on the integrated NIC itself, independently of the BIOS
+settings.
+
.. _Ironic_RAID: https://docs.openstack.org/ironic/latest/admin/raid.html
.. _iDRAC: https://www.dell.com/idracmanuals
@@ -660,24 +676,3 @@ To resolve this issue, increase the timeout to 90 seconds by setting the retry c
[agent]
post_deploy_get_power_state_retries = 18
-
-Redfish management interface failure to set boot device
--------------------------------------------------------
-
-When using the ``idrac-redfish`` management interface with certain iDRAC
-firmware versions (at least versions 2.70.70.70, 4.00.00.00, and
-4.10.10.10) and attempting to set the boot device on a baremetal server
-that is configured to UEFI boot, the iDRAC will return the following
-error::
-
- Unable to Process the request because the value entered for the
- parameter Continuous is not supported by the implementation.
-
-To work around this issue, set the ``force_persistent_boot_device`` parameter
-in ``driver-info`` on the node to ``Never`` by running the following command
-from the command line:
-
-.. code-block:: bash
-
- openstack baremetal node set --driver-info \
- force_persistent_boot_device=Never ${node_uuid}
diff --git a/doc/source/admin/drivers/ipmitool.rst b/doc/source/admin/drivers/ipmitool.rst
index b6d64f17d..dd051d988 100644
--- a/doc/source/admin/drivers/ipmitool.rst
+++ b/doc/source/admin/drivers/ipmitool.rst
@@ -171,30 +171,7 @@ protocol version::
Static boot order configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Some hardware is known to misbehave when changing the boot device through the
-IPMI protocol. To work around it you can use the ``noop`` management interface
-implementation with the ``ipmi`` hardware type. In this case the Bare Metal
-service will not change the boot device for you, leaving the pre-configured
-boot order.
-
-For example, in case of the :ref:`pxe-boot`:
-
-#. Via any available means configure the boot order on the node as follows:
-
- #. Boot from PXE/iPXE on the provisioning NIC.
-
- .. warning::
- If it is not possible to limit network boot to only provisioning NIC,
- make sure that no other DHCP/PXE servers are accessible by the node.
-
- #. Boot from hard drive.
-
-#. Make sure the ``noop`` management interface is enabled, see example in
- `Enabling the IPMI hardware type`_.
-
-#. Change the node to use the ``noop`` management interface::
-
- openstack baremetal node set <NODE> --management-interface noop
+See :ref:`static-boot-order`.
.. TODO(lucasagomes): Write about privilege level
.. TODO(lucasagomes): Write about force boot device
diff --git a/doc/source/admin/drivers/redfish.rst b/doc/source/admin/drivers/redfish.rst
index 2e5a50a2b..f784740dc 100644
--- a/doc/source/admin/drivers/redfish.rst
+++ b/doc/source/admin/drivers/redfish.rst
@@ -185,6 +185,29 @@ property can be used to pass user-specified kernel command line parameters.
For ramdisk kernel, ``[instance_info]/kernel_append_params`` property serves
the same purpose.
+Virtual Media Ramdisk
+~~~~~~~~~~~~~~~~~~~~~
+
+The ``ramdisk`` deploy interface can be used in concert with the the
+``redfish-virtual-media`` boot interface to facilitate the boot of a remote
+node utilizing pre-supplied virtual media.
+
+Instead of supplying an ``[instance_info]/image_source`` parameter, a
+``[instance_info]/boot_iso`` parameter can be supplied. The image will
+be downloaded by the conductor, and the instance will be booted using
+the supplied ISO image. In accordance with the ``ramdisk`` deployment
+interface behavior, once booted the machine will have a ``provision_state``
+of ``ACTIVE``.
+
+.. code-block:: bash
+
+ openstack baremetal node set \
+ --instance_info boot_iso=http://url/to.iso node-0
+
+This initial interface does not support bootloader configuration
+parameter injection, as such the ``[instance_info]/kernel_append_params``
+setting is ignored.
+
.. _Redfish: http://redfish.dmtf.org/
.. _Sushy: https://opendev.org/openstack/sushy
.. _TLS: https://en.wikipedia.org/wiki/Transport_Layer_Security
diff --git a/doc/source/admin/fast-track.rst b/doc/source/admin/fast-track.rst
new file mode 100644
index 000000000..464966da8
--- /dev/null
+++ b/doc/source/admin/fast-track.rst
@@ -0,0 +1,50 @@
+=====================
+Fast-Track Deployment
+=====================
+
+*Fast track* is a mode of operation where the Bare Metal service keeps a
+machine powered on with the agent running between provisioning operations.
+It is first booted during in-band inspection or cleaning (whatever happens
+first) and is only shut down before rebooting into the final instance.
+Depending on the configuration, this mode can save several reboots and is
+particularly useful for scenarios where nodes are enrolled, prepared and
+provisioned within a short period of time.
+
+.. warning::
+ Fast track deployment targets standalone use cases and is only tested with
+ the ``noop`` networking. The case where inspection, cleaning and
+ provisioning networks are different is not supported.
+
+Enabling
+========
+
+Fast track is off by default and should be enabled in the configuration:
+
+.. code-block:: ini
+
+ [deploy]
+ fast_track = true
+
+Inspection
+----------
+
+If using :ref:`in-band inspection`, you need to tell ironic-inspector not to
+power off nodes afterwards. Depending on the inspection mode (managed or
+unmanaged), you need to configure two places. In ``ironic.conf``:
+
+.. code-block:: ini
+
+ [inspector]
+ power_off = false
+
+And in ``inspector.conf``:
+
+.. code-block:: ini
+
+ [processing]
+ power_off = false
+
+Finally, you need to update the :ironic-inspector-doc:`inspection PXE
+configuration <install/index.html#configuration>` to include the
+``ipa-api-url`` kernel parameter, pointing at the **ironic** endpoint, in
+addition to the existing ``ipa-inspection-callback-url``.
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index 4815be3c3..5154aa39b 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -23,18 +23,27 @@ the services.
Port Groups <portgroups>
Configuring Web or Serial Console <console>
Enabling Notifications <notifications>
- Ceph Object Gateway <radosgw>
- Emitting Software Metrics <metrics>
- Auditing API Traffic <api-audit-support>
- Service State Reporting <gmr>
Conductor Groups <conductor-groups>
Upgrade Guide <upgrade-guide>
Security <security>
- Windows Images <building-windows-images>
Troubleshooting FAQ <troubleshooting>
Power Sync with the Compute Service <power-sync>
- Agent Token <agent-token>
Node Multi-Tenancy <node-multitenancy>
+ Fast-Track Deployment <fast-track>
+
+Advanced Topics
+---------------
+
+.. toctree::
+ :maxdepth: 1
+
+ Ceph Object Gateway <radosgw>
+ Windows Images <building-windows-images>
+ Emitting Software Metrics <metrics>
+ Auditing API Traffic <api-audit-support>
+ Service State Reporting <gmr>
+ Agent Token <agent-token>
+ Deploying without BMC Credentials <agent-power>
.. toctree::
:hidden:
diff --git a/doc/source/admin/inspection.rst b/doc/source/admin/inspection.rst
index 39ad9cdf0..41605f1b1 100644
--- a/doc/source/admin/inspection.rst
+++ b/doc/source/admin/inspection.rst
@@ -68,6 +68,8 @@ for scheduling::
Please see a specific :doc:`hardware type page </admin/drivers>` for
the exact list of capabilities this hardware type can discover.
+.. _in-band inspection:
+
In-band inspection
------------------
@@ -92,7 +94,7 @@ the following option:
.. code-block:: ini
[inspector]
- endpoint-override = http://inspector.example.com:5050
+ endpoint_override = http://inspector.example.com:5050
In order to ensure that ports in Bare Metal service are synchronized with
NIC ports on the node, the following settings in the ironic-inspector
diff --git a/doc/source/admin/node-deployment.rst b/doc/source/admin/node-deployment.rst
index 39dbc28a5..3136685ed 100644
--- a/doc/source/admin/node-deployment.rst
+++ b/doc/source/admin/node-deployment.rst
@@ -40,15 +40,49 @@ BIOS, and RAID interfaces.
.. _node-deployment-core-steps:
-Core steps
-----------
-
-Certain default deploy steps are designated as 'core' deploy steps. The
-following deploy steps are core:
-
-``deploy.deploy``
- In this step the node is booted using a provisioning image, and the user
- image is written to the node's disk. It has a priority of 100.
+Agent steps
+-----------
+
+All deploy interfaces based on ironic-python-agent (i.e. ``direct``, ``iscsi``
+and ``ansible`` and any derivatives) expose the following deploy steps:
+
+``deploy.deploy`` (priority 100)
+ In this step the node is booted using a provisioning image.
+``deploy.write_image`` (priority 80)
+ An out-of-band (``iscsi``, ``ansible``) or in-band (``direct``) step that
+ downloads and writes the image to the node.
+``deploy.tear_down_agent`` (priority 40)
+ In this step the provisioning image is shut down.
+``deploy.switch_to_tenant_network`` (priority 30)
+ In this step networking for the node is switched from provisioning to
+ tenant networks.
+``deploy.boot_instance`` (priority 20)
+ In this step the node is booted into the user image.
+
+Additionally, the ``iscsi`` and ``direct`` deploy interfaces have:
+
+``deploy.prepare_instance_boot`` (priority 60)
+ In this step the boot device is configured and the bootloader is installed.
+
+ .. note::
+ For the ``ansible`` deploy interface these steps are done in
+ ``deploy.write_image``.
+
+Accordingly, the following priority ranges can be used for custom deploy steps:
+
+> 100
+ Out-of-band steps to run before deployment.
+81 to 99
+ In-band deploy steps to run before the image is written.
+61 to 79
+ In-band deploy steps to run after the image is written but before the
+ bootloader is installed.
+41 to 59
+ In-band steps to run after the image is written the bootloader is installed.
+21 to 39
+ Out-of-band steps to run after the provisioning image is shut down.
+1 to 19
+ Any steps that are run when the user instance is already running.
Writing a Deploy Step
---------------------
diff --git a/doc/source/admin/raid.rst b/doc/source/admin/raid.rst
index 40a5e4bb1..38753d698 100644
--- a/doc/source/admin/raid.rst
+++ b/doc/source/admin/raid.rst
@@ -383,6 +383,7 @@ There are certain limitations to be aware of:
{
"interface": "deploy",
"step": "erase_devices_metadata"
+ },
{
"interface": "raid",
"step": "create_configuration"
@@ -409,7 +410,17 @@ have its root file system on the first partition. Starting with Ussuri,
the image can also have additional metadata to point Ironic to the
partition with the root file system: for this, the image needs to set
the ``rootfs_uuid`` property with the file system UUID of the root file
-system. The pre-Ussuri approach, i.e. to have the root file system on
+system. One way to extract this UUID from an existing image is to
+download the image, mount it as a loopback device, and use ``blkid``:
+
+.. code-block:: bash
+
+ $ sudo losetup -f
+ $ sudo losetup /dev/loop0 /tmp/myimage.raw
+ $ sudo kpartx -a /dev/loop0
+ $ blkid
+
+The pre-Ussuri approach, i.e. to have the root file system on
the first partition, is kept as a fallback and hence allows software
RAID deployments where Ironic does not have access to any image metadata
(e.g. Ironic stand-alone).
diff --git a/doc/source/admin/report.txt b/doc/source/admin/report.txt
index 1293047f7..c3cfdbf3e 100644
--- a/doc/source/admin/report.txt
+++ b/doc/source/admin/report.txt
@@ -273,7 +273,6 @@ default:
no-raid
enabled_vendor_interfaces =
no-vendor
- fatal_exception_format_errors = False
force_raw_images = True
graceful_shutdown_timeout = 60
grub_config_template = /opt/stack/ironic/ironic/common/grub_conf.template
diff --git a/doc/source/admin/troubleshooting.rst b/doc/source/admin/troubleshooting.rst
index 03fa0445b..20549bbca 100644
--- a/doc/source/admin/troubleshooting.rst
+++ b/doc/source/admin/troubleshooting.rst
@@ -78,6 +78,35 @@ A few things should be checked in this case:
set to ``1``. See :doc:`/install/configure-nova-flavors` for more
details on the correct configuration.
+#. Upon scheduling, Nova will query the Placement API service for the
+ available resource providers (in the case of Ironic: nodes with a given
+ resource class). If placement does not have any allocation candidates for the
+ requested resource class, the request will result in a "No valid host
+ was found" error. It is hence sensible to check if Placement is aware of
+ resource providers (nodes) for the requested resource class with::
+
+ $ openstack allocation candidate list --resource CUSTOM_BAREMETAL_LARGE='1'
+ +---+-----------------------------+--------------------------------------+-------------------------------+
+ | # | allocation | resource provider | inventory used/capacity |
+ +---+-----------------------------+--------------------------------------+-------------------------------+
+ | 1 | CUSTOM_BAREMETAL_LARGE=1 | 2f7b9c69-c1df-4e40-b94e-5821a4ea0453 | CUSTOM_BAREMETAL_LARGE=0/1 |
+ +---+-----------------------------+--------------------------------------+-------------------------------+
+
+ For Ironic, the resource provider is the UUID of the available Ironic node.
+ If this command returns an empty list (or does not contain the targeted
+ resource provider), the operator needs to understand first, why the resource
+ tracker has not reported this provider to placement. Potential explanations
+ include:
+
+ * the resource tracker cycle has not finished yet and the resource provider
+ will appear once it has (the time to finish the cycle scales linearly with
+ the number of nodes the corresponding ``nova-compute`` service manages);
+
+ * the node is in a state where the resource tracker does not consider it to
+ be eligible for scheduling, e.g. when the node has ``maintenance`` set to
+ ``True``; make sure the target nodes are in ``available`` and
+ ``maintenance`` is ``False``;
+
#. If you do not use scheduling based on resource classes, then the node's
properties must have been set either manually or via inspection.
For each node with ``available`` state check that the ``properties``
@@ -134,8 +163,6 @@ A few things should be checked in this case:
check ``openstack hypervisor show <IRONIC NODE>`` to see the status of
individual Ironic nodes as reported to Nova.
- .. TODO(dtantsur): explain inspecting the placement API
-
#. Figure out which Nova Scheduler filter ruled out your nodes. Check the
``nova-scheduler`` logs for lines containing something like::
@@ -163,11 +190,13 @@ inject your code and/or SSH keys during the ramdisk build (depends on how
exactly you've built your ramdisk). But it's also possible to quickly modify
an already built ramdisk.
-Create an empty directory and unpack the ramdisk content there::
+Create an empty directory and unpack the ramdisk content there:
- mkdir unpack
- cd unpack
- gzip -dc /path/to/the/ramdisk | cpio -id
+.. code-block:: bash
+
+ $ mkdir unpack
+ $ cd unpack
+ $ gzip -dc /path/to/the/ramdisk | cpio -id
The last command will result in the whole Linux file system tree unpacked in
the current directory. Now you can modify any files you want. The actual
@@ -178,7 +207,7 @@ location of the files will depend on the way you've built the ramdisk.
the ``systemd-container`` package) to create a lightweight container from
the unpacked filesystem tree::
- sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ /bin/bash
+ $ sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ /bin/bash
This will allow you to run commands within the filesystem, e.g. use package
manager. If the ramdisk is also systemd-based, and you have login
@@ -186,12 +215,12 @@ location of the files will depend on the way you've built the ramdisk.
::
- sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ --boot
+ $ sudo systemd-nspawn --directory /path/to/unpacked/ramdisk/ --boot
After you've done the modifications, pack the whole content of the current
directory back::
- find . | cpio -H newc -o | gzip -c > /path/to/the/new/ramdisk
+ $ find . | cpio -H newc -o | gzip -c > /path/to/the/new/ramdisk
.. note:: You don't need to modify the kernel (e.g.
``tinyipa-master.vmlinuz``), only the ramdisk part.
@@ -373,7 +402,7 @@ the IPMI port to be unreachable through ipmitool, as shown:
.. code-block:: bash
- $ipmitool -I lan -H ipmi_host -U ipmi_user -P ipmi_pass chassis power status
+ $ ipmitool -I lan -H ipmi_host -U ipmi_user -P ipmi_pass chassis power status
Error: Unable to establish LAN session
To fix this, enable `IPMI over lan` setting using your BMC tool or web app.
@@ -385,9 +414,261 @@ When working with lanplus interfaces, you may encounter the following error:
.. code-block:: bash
- $ipmitool -I lanplus -H ipmi_host -U ipmi_user -P ipmi_pass power status
+ $ ipmitool -I lanplus -H ipmi_host -U ipmi_user -P ipmi_pass power status
Error in open session response message : insufficient resources for session
Error: Unable to establish IPMI v2 / RMCP+ session
To fix that issue, please enable `RMCP+ Cipher Suite3 Configuration` setting
using your BMC tool or web app.
+
+Why are my nodes stuck in a "-ing" state?
+=========================================
+
+The Ironic conductor uses states ending with ``ing`` as a signifier that
+the conductor is actively working on something related to the node.
+
+Often, this means there is an internal lock or ``reservation`` set on the node
+and the conductor is downloading, uploading, or attempting to perform some
+sort of Input/Output operation.
+
+In the case the conductor gets stuck, these operations should timeout,
+but there are cases in operating systems where operations are blocked until
+completion. These sorts of operations can vary based on the specific
+environment and operating configuration.
+
+What can cause these sorts of failures?
+---------------------------------------
+
+Typical causes of such failures are going to be largely rooted in the concept
+of ``iowait``, either in the form of downloading from a remote host or
+reading or writing to the disk of the conductor. An operator can use the
+`iostat <https://man7.org/linux/man-pages/man1/iostat.1.html>`_ tool to
+identify the percentage of CPU time spent waiting on storage devices.
+
+The fields that will be particularly important are the ``iowait``, ``await``,
+and ``tps`` ones, which can be read about in the ``iostat`` manual page.
+
+In the case of network file systems, for backing components such as image
+caches or distributed ``tftpboot`` or ``httpboot`` folders, IO operations
+failing on these can, depending on operating system and underlying client
+settings, cause threads to be stuck in a blocking wait state, which is
+realistically undetectable short the operating system logging connectivity
+errors or even lock manager access errors.
+
+For example with
+`nfs <https://www.man7.org/linux/man-pages/man5/nfs.5.html>`_,
+the underlying client recovery behavior, in terms of ``soft``, ``hard``,
+``softreval``, ``nosoftreval``, will largely impact this behavior, but also
+NFS server settings can impact this behavior. A solid sign that this is a
+failure, is when an ``ls /path/to/nfs`` command hangs for a period of time.
+In such cases, the Storage Administrator should be consulted and network
+connectivity investigated for errors before trying to recover to
+proceed.
+
+The bad news for IO related failures
+------------------------------------
+
+If the node has a populated ``reservation`` field, and has not timed out or
+proceeded to a ``fail`` state, then the conductor process will likely need to
+be restarted. This is because the worker thread is hung with-in the conductor.
+
+Manual intervention with-in Ironic's database is *not* advised to try and
+"un-wedge" the machine in this state, and restarting the conductor is
+encouraged.
+
+.. note::
+ Ironic's conductor, upon restart, clears reservations for nodes which
+ were previously managed by the conductor before restart.
+
+If a distributed or network file system is in use, it is highly recommended
+that the operating system of the node running the conductor be rebooted as
+the running conductor may not even be able to exit in the state of an IO
+failure, again dependent upon site and server configuration.
+
+File Size != Disk Size
+----------------------
+
+An easy to make misconception is that a 2.4 GB file means that only 2.4 GB
+is written to disk. But if that file's virtual size is 20 GB, or 100 GB
+things can become very problematic and extend the amount of time the node
+spends in ``deploying`` and ``deploy wait`` states.
+
+Again, these sorts of cases will depend upon the exact configuration of the
+deployment, but hopefully these are areas where these actions can occur.
+
+* Conversion to raw image files upon download to the conductor, from the
+ ``[DEFAULT]force_raw_images`` option, in particular with the ``iscsi``
+ deployment interface. Users using glance and the ``direct`` deployment
+ interface may also experience issues here as the conductor will cache
+ the image to be written which takes place when the
+ ``[agent]image_download_source`` is set to ``http`` instead of ``swift``.
+
+* Write of a QCOW2 file over the ``iscsi`` deployment interface from the
+ conductor to the node being deployed can result in large amounts of
+ "white space" to be written to be transmitted over the wire and written
+ to the end device.
+
+.. note::
+ The QCOW2 image conversion utility does consume quite a bit of memory
+ when converting images or writing them to the end storage device. This
+ is because the files are not sequential in nature, and must be re-assembled
+ from an internal block mapping. Internally Ironic limits this to 1GB
+ of RAM. Operators performing large numbers of deployments may wish to
+ explore the ``direct`` deployment interface in these sorts of cases in
+ order to minimize the conductor becoming a limiting factor due to memory
+ and network IO.
+
+Why are my nodes stuck in a "wait" state?
+=========================================
+
+The Ironic conductor uses states containing ``wait`` as a signifier that
+the conductor is waiting for a callback from another component, such as
+the Ironic Python Agent or the Inspector. If this feedback does not arrive,
+the conductor will time out and the node will eventually move to a ``failed``
+state. Depending on the configuration and the circumstances, however, a node
+can stay in a ``wait`` state for a long time or even never time out. The list
+of such wait states includes:
+
+* ``clean wait`` for cleaning,
+* ``inspect wait`` for introspection,
+* ``rescue wait`` for rescueing, and
+* ``wait call-back`` for deploying.
+
+Communication issues between the conductor and the node
+-------------------------------------------------------
+
+One of the most common issues when nodes seem to be stuck in a wait state
+occur when the node never received any instructions or does not react as
+expected: the conductor moved the node to a wait state but the node will
+never call back. Examples include wrong ciphers which will make ipmitool
+get stuck or BMCs in a state where they accept commands, but don't do the
+requested task (or only a part of it, like shutting off, but not starting).
+It is useful in these cases to see via a ping or the console if and which
+action the node is performing. If the node does not seem to react to the
+requests sent be the conductor, it may be worthwhile to try the corresponding
+action out-of-band, e.g. confirm that power on/off commands work when directly
+sent to the BMC. The section on `IPMI errors`_. above gives some additional
+points to check. In some situations, a BMC reset may be necessary.
+
+Ironic Python Agent stuck
+-------------------------
+
+Nodes can also get remain in a wait state when the component the conductor is
+waiting for gets stuck, e.g. when a hardware manager enters a loop or is
+waiting for an event that is never happening. In these cases, it might be
+helpful to connect to the IPA and inspect its logs, see the trouble shooting
+guide of the :ironic-python-agent-doc:`ironic-python-agent (IPA) <>` on how
+to do this.
+
+Deployments fail with "failed to update MAC address"
+====================================================
+
+The design of the integration with the Networking service (neutron) is such
+that once virtual ports have been created in the API, their MAC address must
+be updated in order for the DHCP server to be able to appropriately reply.
+
+This can sometimes result in errors being raised indicating that the MAC
+address is already in use. This is because at some point in the past, a
+virtual interface was orphaned either by accident or by some unexpected
+glitch, and a previous entry is still present in Neutron.
+
+This error looks something like this when reported in the ironic-conductor
+log output.:
+
+ Failed to update MAC address on Neutron port 305beda7-0dd0-4fec-b4d2-78b7aa4e8e6a.: MacAddressInUseClient: Unable to complete operation for network 1e252627-6223-4076-a2b9-6f56493c9bac. The mac address 52:54:00:7c:c4:56 is in use.
+
+Because we have no idea about this entry, we fail the deployment process
+as we can't make a number of assumptions in order to attempt to automatically
+resolve the conflict.
+
+How did I get here?
+-------------------
+
+Originally this was a fairly easy issue to encounter. The retry logic path
+which resulted between the Orchestration (heat) and Compute (nova) services,
+could sometimes result in additional un-necessary ports being created.
+
+Bugs of this class have been largely resolved since the Rocky development
+cycle. Since then, the way this can become encountered is due to Networking
+(neutron) VIF attachments not being removed or deleted prior to deleting a
+port in the Bare Metal service.
+
+Ultimately, the key of this is that the port is being deleted. Under most
+operating circumstances, there really is no need to delete the port, and
+VIF attachments are stored on the port object, so deleting the port
+*CAN* result in the VIF not being cleaned up from Neutron.
+
+Under normal circumstances, when deleting ports, a node should be in a
+stable state, and the node should not be provisioned. If the
+``openstack baremetal port delete`` command fails, this may indicate that
+a known VIF is still attached. Generally if they are transitory from cleaning,
+provisioning, rescuing, or even inspection, getting the node to the
+``available`` state wil unblock your delete operation, that is unless there is
+a tenant VIF attahment. In that case, the vif will need to be removed from
+with-in the Bare Metal service using the
+``openstack baremetal node vif detach`` command.
+
+A port can also be checked to see if there is a VIF attachment by consulting
+the port's ``internal_info`` field.
+
+.. warning::
+ The ``maintenance`` flag can be used to force the node's port to be
+ deleted, however this will disable any check that would normally block
+ the user from issuing a delete and accidently orphaning the VIF attachment
+ record.
+
+How do I resolve this?
+----------------------
+
+Generally, you need to identify the port with the offending MAC address.
+Example:
+
+ openstack port list --mac-address 52:54:00:7c:c4:56
+
+From the command's output, you should be able to identify the ``id`` field.
+Using that, you can delete the port. Example:
+
+ openstack port delete <id>
+
+.. warning::
+ Before deleting a port, you should always verify that it is no longer in
+ use or no longer seems applicable/operable. If multiple deployments of
+ the Bare Metal service with a single Neutron, the possibility that a
+ inventory typo, or possibly even a duplicate MAC address exists, which
+ could also produce the same basic error message.
+
+My test VM image does not deploy -- mount point does not exist
+==============================================================
+
+What is likely occuring
+-----------------------
+
+The image attempting to be deployed likely is a partition image where
+the file system that the user wishes to boot from lacks the required
+folders, such as ``/dev`` and ``/proc``, which are required to install
+a bootloader for a Linux OS image
+
+It should be noted that similar errors can also occur with whole disk
+images where we are attempting to setup the UEFI bootloader configuration.
+That being said, in this case, the image is likely invalid or contains
+an unexpected internal structure.
+
+Users performing testing may choose something that they believe
+will work based on it working for virtual machines. These images are often
+attractive for testing as they are generic and include basic support
+for establishing networking and possibly installing user keys.
+Unfortunately, these images often lack drivers and firmware required for
+many different types of physical hardware which makes using them
+very problematic. Additionally, images such as `Cirros <https://download.cirros-cloud.net>`_
+do not have any contents in the root filesystem (i.e. an empty filesystem),
+as they are designed for the ``ramdisk`` to write the contents to disk upon
+boot.
+
+How do I not encounter this issue?
+----------------------------------
+
+We generally recommend using `diskimage-builder <https://docs.openstack.org/diskimage-builder>`_
+or vendor supplied images. Centos, Ubuntu, Fedora, and Debian all publish
+operating system images which do generally include drivers and firmware for
+physical hardware. Many of these published "cloud" images, also support
+auto-configuration of networking AND population of user keys.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index ecbedec6f..1f667a4b4 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -34,8 +34,6 @@ sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts'))
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.viewcode',
'sphinx.ext.graphviz',
- 'sphinxcontrib.httpdomain',
- 'sphinxcontrib.pecanwsme.rest',
'sphinxcontrib.seqdiag',
'sphinxcontrib.apidoc',
'sphinxcontrib.rsvgconverter',
@@ -59,9 +57,10 @@ apidoc_excluded_paths = [
]
apidoc_separate_modules = True
-repository_name = 'openstack/ironic'
-use_storyboard = True
-openstack_projects = [
+openstackdocs_repo_name = 'openstack/ironic'
+openstackdocs_use_storyboard = True
+openstackdocs_pdf_link = True
+openstackdocs_projects = [
'bifrost',
'cinder',
'glance',
@@ -124,7 +123,7 @@ add_function_parentheses = True
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of glob-style patterns that should be excluded when looking for
# source files. They are matched against the source file names relative to the
diff --git a/doc/source/contributor/bugs.rst b/doc/source/contributor/bugs.rst
new file mode 100644
index 000000000..548788776
--- /dev/null
+++ b/doc/source/contributor/bugs.rst
@@ -0,0 +1,128 @@
+Bug Reporting and Triaging Guide
+================================
+
+StoryBoard
+----------
+
+All ironic projects use StoryBoard_ for tracking both bugs and enhancement
+requests (RFE). The `ironic project group`_ lists all our projects.
+
+.. note::
+ Ironic is developed as part of OpenStack and therefore uses
+ the ``openstack/`` namespace.
+
+StoryBoard is somewhat different from traditional bug tracking
+systems because every *story* is not linked to a project itself, but rather
+through its *tasks*. A story represents an issue you are facing or an
+enhancement you want to see, while tasks represent individual action items
+which can span several projects. When creating a story, you'll also need to
+create the first task. If unsure, create a task against ``openstack/ironic``.
+
+Reporting Guide
+---------------
+
+We are constantly receiving a lot of requests, so it's important to file a
+meaningful story for it to be acted upon. A good story:
+
+* specifies **why** a change is needed.
+* explains how to reproduce the described condition.
+
+ .. note::
+ Please try to provide a reproducer based on unit tests, :ref:`devstack
+ <deploy_devstack>` or bifrost_. While we try our best to support users
+ using other installers and distributions, it may be non-trivial without
+ deep knowledge of them. If you're using a commercial distribution or
+ a product, please try contacting support first.
+
+* should be understandable without additional context. For example, if you see
+ an exception, we will need the full traceback.
+
+* should not be too verbose either. Unfortunately, we cannot process a few days
+ worth of system logs to find the problems, we expect your collaboration.
+
+* is not a question or a support request. Please see :doc:`contributing` for
+ the ways to contact us.
+
+* provides a way to contact the reporter. Please follow the comments and
+ expect follow-up emails, but ideally also be on IRC for questions.
+
+An enhancement request additionally:
+
+* benefits the overall project, not just one consumer. If you have a case that
+ is specific to your requirements, think about ways to make ironic extensible
+ to be able to cover it.
+
+* does not unnecessary increase the project scope. Consider if your idea can be
+ implemented without changing ironic or its projects, maybe it actually
+ should?
+
+Triaging Guide
+--------------
+
+The bug triaging process involves checking new stories to make sure they are
+actionable by the team. This guide is mostly targeting the project team, but we
+would appreciate if reporters could partly self-triage their own requests.
+
+* Determine if the request is valid and complete. Use the checklist in the
+ `Reporting Guide`_ for that.
+
+* Is the request a bug report or an enhancement request (an RFE)? The
+ difference is often subtle, the key question to answer is if the described
+ behavior is expected.
+
+ Add an ``rfe`` tag to all enhancement requests and propose it for the "RFE
+ Review" section of the `weekly meeting`_.
+
+* Does the RFE obviously require a spec_? Usually this is decided when an RFE
+ is reviewed during the meeting, but some requests are undoubtedly complex,
+ involve changing a lot of critical parts and thus demand a spec.
+
+ Add a ``needs-spec`` tag to enhancement requests that obviously need a
+ spec. Otherwise leave it until the meeting.
+
+* Apply additional tags:
+
+ * All hardware type specific stories should receive a corresponding tag (e.g.
+ ``ipmi``, ``idrac``, etc).
+
+ * API-related stories should have an ``api`` tag.
+
+ * CI issues should have a ``gate`` tag.
+
+The next actions **must only** be done by a core team member (or an experienced
+full-time contributor appoined by the PTL):
+
+* Can the RFE be automatically approved? It happens if the RFE requests an
+ implementation of a driver feature that is already implemented for other
+ drivers and does not pose additional complexity.
+
+ If the RFE can be automatically approved, apply the ``rfe-approved`` tag.
+ If unsure, never apply the tag! Talk to the PTL instead.
+
+* Does the RFE have a corresponding spec approved? If yes, apply the
+ ``rfe-approved`` tag.
+
+* In the end, apply the ``ironic-triaged`` tag to make the story as triaged.
+
+Expiring Bugs
+-------------
+
+While we hope to fix all issues that our consumers hit, it is unfortunately not
+realistic. Stories **may** be closed by marking all their tasks ``INVALID`` in
+the following cases:
+
+* No solution has been proposed in 1 calendar year.
+
+* Additional information has been requested from the reporter, and no update
+ has been provided in 1 calendar month.
+
+* The request no longer aligns with the direction of the project.
+
+.. note::
+ As usual, common sense should be applied when closing stories.
+
+.. _StoryBoard: https://storyboard.openstack.org
+.. _ironic project group: https://storyboard.openstack.org/#!/project_group/ironic
+.. _bifrost: https://docs.openstack.org/bifrost
+.. _spec: https://specs.openstack.org/openstack/ironic-specs/
+.. _weekly meeting: https://wiki.openstack.org/wiki/Meetings/Ironic
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 88f8449da..7295da979 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -36,11 +36,13 @@ The ironic project moved from Launchpad to `StoryBoard
<https://storyboard.openstack.org/>`_ for work and task tracking.
This provides an aggregate view called a "Project Group"
and individual "Projects". A good starting place is the
-`project group <https://storyboard.openstack.org/#!/project_group/75>`_
+`project group <https://storyboard.openstack.org/#!/project_group/ironic>`_
representing the whole of the ironic community, as opposed to
the `ironic project <https://storyboard.openstack.org/#!/project/943>`_
storyboard which represents ironic as a repository.
+See :doc:`bugs` for more details on how we track bugs.
+
Internet Relay Chat 'IRC'
-------------------------
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
index 136827f94..0377bdb34 100644
--- a/doc/source/contributor/index.rst
+++ b/doc/source/contributor/index.rst
@@ -12,6 +12,7 @@ project.
:maxdepth: 1
Developer Contribution Guide <contributing>
+ Bugs Reporting and Triaging Guide <bugs>
Setting Up Your Development Environment <dev-quickstart>
Priorities <https://specs.openstack.org/openstack/ironic-specs/#priorities>
Specifications <https://specs.openstack.org/openstack/ironic-specs/>
diff --git a/doc/source/contributor/jobs-description.rst b/doc/source/contributor/jobs-description.rst
index 24b4fa217..a2ca0fe78 100644
--- a/doc/source/contributor/jobs-description.rst
+++ b/doc/source/contributor/jobs-description.rst
@@ -24,7 +24,7 @@ The description of each jobs that runs in the CI when you submit a patch for
* - ironic-tempest-functional-python3
- Deploys Ironic in standalone mode and runs tempest functional tests
that matches the regex `ironic_tempest_plugin.tests.api` under Python3
- * - ironic-grenade-dsvm
+ * - ironic-grenade
- Deploys Ironic in a DevStack and runs upgrade for all enabled services.
* - ironic-grenade-dsvm-multinode-multitenant
- Deploys Ironic in a multinode DevStack and runs upgrade for all enabled
diff --git a/doc/source/contributor/releasing.rst b/doc/source/contributor/releasing.rst
index dd3744e0f..44ec3616e 100644
--- a/doc/source/contributor/releasing.rst
+++ b/doc/source/contributor/releasing.rst
@@ -137,6 +137,96 @@ Things to do before releasing
Otherwise, once it is made, CI (the grenade job that tests new-release ->
master) will fail.
+* Check for any open patches that are close to be merged or release critical.
+
+ This usually includes important bug fixes and/or features that we'd like to
+ release, including the related documentation.
+
+How to propose a release
+========================
+
+The steps that lead to a release proposal are mainly manual, while proposing
+the release itself is almost a 100% automated process, accomplished by
+following the next steps:
+
+* Clone the `openstack/releases <https://opendev.org/openstack/releases>`_
+ repository. This is where deliverables are tracked and all the automation
+ resides.
+
+ * Under the ``deliverables`` directory you can see yaml files for each
+ deliverable (i.e. subproject) grouped by release cycles.
+
+ * The ``_independent`` directory contains yaml files for deliverables that
+ are not bound to (official) cycles (e.g. ironic-python-agent-builder).
+
+* To check the changes we're about to release we can use the tox environment
+ ``list-unreleased-changes``, with this syntax:
+
+ .. code-block:: bash
+
+ tox -e venv -- list-unreleased-changes <series> <deliverable>
+
+ The ``series`` argument is a release series (i.e. master or train,
+ not stable/ussuri or stable/train).
+
+ For example, assuming we're in the main directory of the releases repository,
+ to check the changes in the ussuri series for ironic-python-agent
+ type:
+
+ .. code-block:: bash
+
+ tox -e venv -- list-unreleased-changes ussuri openstack/ironic-python-agent
+
+* To update the deliverable file for the new release, we use a scripted process
+ in the form of a tox environment called ``new-release``.
+
+ To get familiar with it and see all the options, type:
+
+ .. code-block:: bash
+
+ tox -e venv -- new-release -h
+
+ Now, based on the list of changes we found in the precedent step, and the
+ release notes, we need to decide on whether the next version will be major,
+ minor (feature) or patch (bugfix).
+
+ Note that in this case ``series`` is a code name (train, ussuri), not a
+ branch.
+
+ The ``--stable-branch argument`` is used only for branching in the end of a
+ cycle, independent projects are not branched this way though.
+
+ To propose the release, use the script to update the deliverable file, then
+ commit the change, and propose it for review.
+
+ For example, to propose a minor release for ironic in the master branch use:
+
+ .. code-block:: bash
+
+ tox -e venv -- new-release -v master ironic feature
+
+ Remember to use a meaningful topic, usually using the name of the
+ deliverable, the new version and the branch, if applicable.
+
+ A good commit message title should also include the same, for example
+ "Release ironic 1.2.3 for ussuri"
+
+* As an optional step, we can use ``tox -e list-changes`` to double-check the
+ changes before submitting them for review.
+
+ Also ``tox -e validate`` (it might take a while to run based on the number of
+ changes) does some some sanity-checks, but since everything is scripted,
+ there shouldn't be any issue.
+
+ All the scripts are designed and maintained by the release team; in case of
+ questions or doubts or if any errors should arise, you can reach to them in
+ the IRC channel ``#openstack-release``; all release liaisons should be
+ present there.
+
+* After the change is up for review, the PTL or a release liaison will have to approve
+ it before it can get approved by the release team. Then, it will be processed
+ automatically by zuul.
+
Things to do after releasing
============================
diff --git a/doc/source/contributor/webapi-version-history.rst b/doc/source/contributor/webapi-version-history.rst
index 731ce2d62..3ac63c403 100644
--- a/doc/source/contributor/webapi-version-history.rst
+++ b/doc/source/contributor/webapi-version-history.rst
@@ -2,6 +2,22 @@
REST API Version History
========================
+1.67 (Victoria, master)
+-----------------------
+
+Add support for the mutually exclusive ``port_uuid`` and ``portgroup_uuid``
+fields by having the node vif_attach API accept those values within
+``vif_info``.
+If one is specified, then Ironic will attempt to attach a VIF to the relative
+port or portgroup.
+
+1.66 (Victoria, master)
+-----------------------
+
+Add ``network_data`` field to the node object, that will be used by
+stand-alone ironic to pass L3 network configuration information to
+ramdisk.
+
1.65 (Ussuri, 15.0)
---------------------
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 39d7481ee..d62cadeee 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -22,6 +22,8 @@ previous release of ironic, append the OpenStack release name to the URL; for
example, the ``ocata`` release is available at
https://docs.openstack.org/ironic/ocata/.
+Found a bug in one of our projects? Please see :doc:`/contributor/bugs`.
+
Installation Guide
==================
diff --git a/doc/source/install/configure-pxe.rst b/doc/source/install/configure-pxe.rst
index 56f345eff..291b101f3 100644
--- a/doc/source/install/configure-pxe.rst
+++ b/doc/source/install/configure-pxe.rst
@@ -357,41 +357,59 @@ on the Bare Metal service node(s) where ``ironic-conductor`` is running.
Ubuntu::
- cp /usr/lib/ipxe/{undionly.kpxe,ipxe.efi} /tftpboot
+ cp /usr/lib/ipxe/{undionly.kpxe,ipxe.efi,snponly.efi} /tftpboot
Fedora/RHEL7/CentOS7::
- cp /usr/share/ipxe/{undionly.kpxe,ipxe.efi} /tftpboot
+ cp /usr/share/ipxe/{undionly.kpxe,ipxe.efi,snponly.efi} /tftpboot
-#. Enable/Configure iPXE in the Bare Metal Service's configuration file
- (/etc/ironic/ironic.conf):
+#. Enable/Configure iPXE overrides in the Bare Metal Service's configuration
+ file **if required** (/etc/ironic/ironic.conf):
.. code-block:: ini
[pxe]
- # Enable iPXE boot. (boolean value)
- ipxe_enabled=True
-
# Neutron bootfile DHCP parameter. (string value)
- pxe_bootfile_name=undionly.kpxe
+ ipxe_bootfile_name=undionly.kpxe
# Bootfile DHCP parameter for UEFI boot mode. (string value)
- uefi_pxe_bootfile_name=ipxe.efi
+ uefi_ipxe_bootfile_name=ipxe.efi
# Template file for PXE configuration. (string value)
- pxe_config_template=$pybasedir/drivers/modules/ipxe_config.template
+ ipxe_config_template=$pybasedir/drivers/modules/ipxe_config.template
+
+ .. note::
+ Most UEFI systems have integrated networking which means the
+ ``[pxe]uefi_ipxe_bootfile_name`` setting should be set to
+ ``snponly.efi``.
+
+ .. note::
+ Setting the iPXE parameters noted in the code block above to no value,
+ in other words setting a line to something like ``ipxe_bootfile_name=``
+ will result in ironic falling back to the default values of the non-iPXE
+ PXE settings. This is for backwards compatability.
+
+#. Ensure iPXE is the default PXE, if applicable.
- # Template file for PXE configuration for UEFI boot loader.
- # (string value)
- uefi_pxe_config_template=$pybasedir/drivers/modules/ipxe_config.template
+ In earlier versions of ironic, a ``[pxe]ipxe_enabled`` setting allowing
+ operators to declare the behavior of the conductor to exclusively operate
+ as if only iPXE was to be used. As time moved on, iPXE functionality was
+ moved to it's own ``ipxe`` boot interface.
+
+ If you want to emulate that same hehavior, set the following in the
+ configuration file (/etc/ironic/ironic.conf):
+
+ .. code-block:: ini
+
+ [DEFAULT]
+ default_boot_interface=ipxe
+ enabled_boot_interfaces=ipxe,pxe
.. note::
- The ``[pxe]ipxe_enabled`` option has been deprecated and will be removed
- in the T* development cycle. Users should instead consider use of the
- ``ipxe`` boot interface. The same default use of iPXE functionality can
- be achieved by setting the ``[DEFAULT]default_boot_interface`` option
- to ``ipxe``.
+ The ``[DEFAULT]enabled_boot_interfaces`` setting may be exclusively set
+ to ``ipxe``, however ironic has multiple interfaces available depending
+ on the hardware types available for use.
#. It is possible to configure the Bare Metal service in such a way
that nodes will boot into the deploy image directly from Object Storage.
@@ -442,7 +460,6 @@ on the Bare Metal service node(s) where ``ironic-conductor`` is running.
sudo service ironic-conductor restart
-
PXE multi-architecture setup
----------------------------
@@ -498,6 +515,10 @@ nodes will be deployed by 'grubaa64.efi', and ppc64 nodes by 'bootppc64'::
commands, you'll need to switch to use ``linux`` and ``initrd`` command
instead.
+.. note::
+ A ``[pxe]ipxe_bootfile_name_by_arch`` setting is available for multi-arch
+ iPXE based deployment, and defaults to the same behavior as the comperable
+ ``[pxe]pxe_bootfile_by_arch`` setting for standard PXE.
PXE timeouts tuning
-------------------
diff --git a/doc/source/install/enrollment.rst b/doc/source/install/enrollment.rst
index 4d6b0a4b2..1e0f9957e 100644
--- a/doc/source/install/enrollment.rst
+++ b/doc/source/install/enrollment.rst
@@ -250,6 +250,13 @@ and may be combined if desired.
$ openstack baremetal port create $MAC_ADDRESS --node $NODE_UUID
+ .. note::
+ When it is time to remove the node from the Bare Metal service, the
+ command used to remove the port is ``openstack baremetal port delete
+ <port uuid>``. When doing so, it is important to ensure that the
+ baremetal node is not in ``maintenance`` as guarding logic to prevent
+ orphaning Neutron Virtual Interfaces (VIFs) will be overriden.
+
.. _enrollment-scheduling:
Adding scheduling information
diff --git a/doc/source/install/standalone.rst b/doc/source/install/standalone.rst
index c1349d468..553c58306 100644
--- a/doc/source/install/standalone.rst
+++ b/doc/source/install/standalone.rst
@@ -2,15 +2,32 @@
Using Bare Metal service as a standalone service
================================================
+Service settings
+----------------
+
It is possible to use the Bare Metal service without other OpenStack services.
You should make the following changes to ``/etc/ironic/ironic.conf``:
-#. To disable usage of Identity service tokens::
+#. Choose an authentication strategy which supports standalone, one option is
+ ``noauth``::
[DEFAULT]
...
auth_strategy=noauth
+ Another option is ``http_basic`` where the credentials are stored in an
+ `Apache htpasswd format`_ file::
+
+ [DEFAULT]
+ ...
+ auth_strategy=http_basic
+ http_basic_auth_user_file=/etc/ironic/htpasswd
+
+ Only the ``bcrypt`` format is supported, and the Apache `htpasswd` utility can
+ be used to populate the file with entries, for example::
+
+ htpasswd -nbB myName myPassword >> /etc/ironic/htpasswd
+
#. If you want to disable the Networking service, you should have your network
pre-configured to serve DHCP and TFTP for machines that you're deploying.
To disable it, change the following lines::
@@ -38,6 +55,36 @@ You should make the following changes to ``/etc/ironic/ironic.conf``:
[DEFAULT]
rpc_transport = json-rpc
+ JSON RPC also has its own authentication strategy. If it is not specified then
+ the stategy defaults to ``[DEFAULT]`` ``auth_strategy``. The following will
+ set JSON RPC to ``noauth``:
+
+ .. code-block:: ini
+
+ [json_rpc]
+ auth_strategy = noauth
+
+ For ``http_basic`` the conductor server needs a credentials file to validate
+ requests:
+
+ .. code-block:: ini
+
+ [json_rpc]
+ auth_strategy = http_basic
+ http_basic_auth_user_file = /etc/ironic/htpasswd-json-rpc
+
+ The API server also needs client-side credentials to be specified:
+
+ .. code-block:: ini
+
+ [json_rpc]
+ auth_type = http_basic
+ username = myName
+ password = myPassword
+
+Preparing images
+----------------
+
If you don't use Image service, it's possible to provide images to Bare Metal
service via a URL.
@@ -58,17 +105,37 @@ There are however some limitations for different hardware interfaces:
* :ref:`direct-deploy` requires the instance image be accessible through a
HTTP(s) URL.
-Steps to start a deployment are pretty similar to those when using Compute:
+.. note::
+ The Bare Metal service tracks content changes for non-Glance images by
+ checking their modification date and time. For example, for HTTP image,
+ if 'Last-Modified' header value from response to a HEAD request to
+ "http://my.server.net/images/deploy.ramdisk" is greater than cached image
+ modification time, Ironic will re-download the content. For "file://"
+ images, the file system modification time is used.
+
+Using CLI
+---------
-#. To use the
- :python-ironicclient-doc:`openstack baremetal CLI <cli/osc_plugin_cli.html>`,
- set up these environment variables. Since no authentication strategy is
- being used, the value none must be set for OS_AUTH_TYPE. OS_ENDPOINT is
- the URL of the ironic-api process.
- For example::
+To use the
+:python-ironicclient-doc:`openstack baremetal CLI <cli/osc_plugin_cli.html>`,
+set up these environment variables. If the ``noauth`` authentication strategy is
+being used, the value ``none`` must be set for OS_AUTH_TYPE. OS_ENDPOINT is
+the URL of the ironic-api process.
+For example::
+
+ export OS_AUTH_TYPE=none
+ export OS_ENDPOINT=http://localhost:6385/
- export OS_AUTH_TYPE=none
- export OS_ENDPOINT=http://localhost:6385/
+If the ``http_basic`` authentication strategy is being used, the value
+``http_basic`` must be set for OS_AUTH_TYPE. For example::
+
+ export OS_AUTH_TYPE=http_basic
+ export OS_ENDPOINT=http://localhost:6385/
+ export OS_USERNAME=myUser
+ export OS_PASSWORD=myPassword
+
+Enrolling nodes
+---------------
#. Create a node in Bare Metal service. At minimum, you must specify the driver
name (for example, ``ipmi``). You can also specify all the required
@@ -110,6 +177,9 @@ Steps to start a deployment are pretty similar to those when using Compute:
openstack baremetal port create $MAC_ADDRESS --node $NODE_UUID
+Populating instance_info
+------------------------
+
#. You also need to specify image information in the node's ``instance_info``
(see :doc:`creating-images`):
@@ -190,6 +260,15 @@ Steps to start a deployment are pretty similar to those when using Compute:
This setting overrides any previous setting in ``properties`` and will be
removed on undeployment.
+#. For iLO drivers, fields that should be provided are:
+
+ * ``ilo_deploy_iso`` under ``driver_info``;
+
+ * ``ilo_boot_iso``, ``image_source``, ``root_gb`` under ``instance_info``.
+
+Deployment
+----------
+
#. Validate that all parameters are correct::
openstack baremetal node validate $NODE_UUID
@@ -212,19 +291,77 @@ Steps to start a deployment are pretty similar to those when using Compute:
openstack baremetal node deploy $NODE_UUID
-For iLO drivers, fields that should be provided are:
-* ``ilo_deploy_iso`` under ``driver_info``;
+Ramdisk booting
+---------------
-* ``ilo_boot_iso``, ``image_source``, ``root_gb`` under ``instance_info``.
+Advanced operators, specifically ones working with ephemeral workloads,
+may find it more useful to explicitly treat a node as one that would always
+boot from a Ramdisk.
-.. note::
- The Bare Metal service tracks content changes for non-Glance images by
- checking their modification date and time. For example, for HTTP image,
- if 'Last-Modified' header value from response to a HEAD request to
- "http://my.server.net/images/deploy.ramdisk" is greater than cached image
- modification time, Ironic will re-download the content. For "file://"
- images, the file system modification time is used.
+This functionality is largely intended for network booting, however some
+other boot interface, such as the ``redfish-virtual-media`` support enabling
+the same basic functionality through the existing interfaces.
+
+To use, a few different settings must be modified.
+
+#. Change the ``deploy_interface`` on the node to ``ramdisk``::
+
+ openstack baremetal node set $NODE_UUID \
+ --deploy-interface ramdisk
+
+#. Set a kernel and ramdisk to be utilized::
+
+ openstack baremetal node set $NODE_UUID \
+ --instance-info kernel=$KERNEL_URL \
+ --instance-info ramdisk=$RAMDISK_URL
+
+#. Deploy the node::
+
+ openstack baremetal node deploy $NODE_UUID
+
+ .. warning::
+ Configuration drives, also known as a configdrive, is not supported
+ with the ``ramdisk`` deploy interface. Please ensure your ramdisk
+ CPIO archive contains all necessary configuration and credentials.
+ This is as no disk image is written to the disk of the node being
+ provisioned with a ramdisk.
+
+The node ramdisk components will then be assembled by the conductor,
+appropriate configuration put in place, and the node will then be powered
+on. From there, normal node booting will occur. Upon undeployment of the node,
+normal cleaning proceedures will occur as configured with-in the conductor.
+
+Ramdisk booting with ISO media
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Currently supported for the use of ramdisks with the ``redfish-virtual-media``
+and ``ipxe`` boot interfaces, an operator may request an explict ISO file to
+be booted.
+
+#. Store the URL to the ISO image to ``instance_info/boot_iso``,
+ instead of a ``kernel`` or ``ramdisk`` setting::
+
+ openstack baremetal node set $NODE_UUID \
+ --instance-info boot_iso=$BOOT_ISO_URL
+
+#. Deploy the node::
+
+ openstack baremetal node deploy $NODE_UUID
+
+
+.. warning::
+ This feature, when utilized with the ``ipxe`` ``boot_interface``,
+ will only allow a kernel and ramdisk to be booted from the
+ supplied ISO file. Any additional contents, such as additional
+ ramdisk contents or installer package files will be unavailable
+ after the boot of the Operating System. Operators wishing to leverage
+ this functionality for actions such as OS installation should explore
+ use of the standard ``ramdisk`` ``deploy_interface`` along with the
+ ``instance_info/kernel_append_params`` setting to pass arbitrary
+ settings such as a mirror URL for the initial ramdisk to load data from.
+ This is a limitation of iPXE and the overall boot process of the
+ operating system where memory allocated by iPXE is released.
Other references
@@ -232,3 +369,4 @@ Other references
* :ref:`local-boot-without-compute`
+.. _`Apache htpasswd format`: https://httpd.apache.org/docs/current/misc/password_encryptions.html
diff --git a/driver-requirements.txt b/driver-requirements.txt
index b8d91d0bc..539d27ba9 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -4,7 +4,7 @@
# python projects they should package as optional dependencies for Ironic.
# These are available on pypi
-proliantutils>=2.9.1
+proliantutils>=2.9.5
pysnmp>=4.3.0,<5.0.0
python-scciclient>=0.8.0
python-dracclient>=3.1.0,<5.0.0
@@ -17,7 +17,7 @@ sushy>=3.2.0
ansible>=2.7
# HUAWEI iBMC hardware type uses the python-ibmcclient library
-python-ibmcclient>=0.1.0
+python-ibmcclient>=0.2.2,<0.3.0
# Dell EMC iDRAC sushy OEM extension
sushy-oem-idrac<=1.0.0
diff --git a/ironic/api/app.py b/ironic/api/app.py
index 46a6333e1..8780037b1 100644
--- a/ironic/api/app.py
+++ b/ironic/api/app.py
@@ -15,7 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from ironic_lib import auth_basic
import keystonemiddleware.audit as audit_middleware
+from keystonemiddleware import auth_token
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
from oslo_middleware import healthcheck
@@ -27,7 +29,7 @@ from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
-from ironic.api.middleware import auth_token
+from ironic.api.middleware import auth_public_routes
from ironic.api.middleware import json_ext
from ironic.common import exception
from ironic.conf import CONF
@@ -97,9 +99,18 @@ def setup_app(pecan_config=None, extra_hooks=None):
reason=e
)
+ auth_middleware = None
if CONF.auth_strategy == "keystone":
- app = auth_token.AuthTokenMiddleware(
- app, {"oslo_config_config": cfg.CONF},
+ auth_middleware = auth_token.AuthProtocol(
+ app, {"oslo_config_config": cfg.CONF})
+ elif CONF.auth_strategy == "http_basic":
+ auth_middleware = auth_basic.BasicAuthMiddleware(
+ app, cfg.CONF.http_basic_auth_user_file)
+
+ if auth_middleware:
+ app = auth_public_routes.AuthPublicRoutes(
+ app,
+ auth=auth_middleware,
public_api_routes=pecan_config.app.acl_public_routes)
if CONF.profiler.enabled:
diff --git a/ironic/api/args.py b/ironic/api/args.py
new file mode 100644
index 000000000..7addecf8b
--- /dev/null
+++ b/ironic/api/args.py
@@ -0,0 +1,381 @@
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
+#
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import decimal
+import json
+import logging
+
+from dateutil import parser as dateparser
+
+from ironic.api import types as atypes
+from ironic.common import exception
+
+LOG = logging.getLogger(__name__)
+
+
+CONTENT_TYPE = 'application/json'
+ACCEPT_CONTENT_TYPES = [
+ CONTENT_TYPE,
+ 'text/javascript',
+ 'application/javascript'
+]
+ENUM_TRUE = ('true', 't', 'yes', 'y', 'on', '1')
+ENUM_FALSE = ('false', 'f', 'no', 'n', 'off', '0')
+
+
+def fromjson_array(datatype, value):
+ if not isinstance(value, list):
+ raise ValueError("Value not a valid list: %s" % value)
+ return [fromjson(datatype.item_type, item) for item in value]
+
+
+def fromjson_dict(datatype, value):
+ if not isinstance(value, dict):
+ raise ValueError("Value not a valid dict: %s" % value)
+ return dict((
+ (fromjson(datatype.key_type, item[0]),
+ fromjson(datatype.value_type, item[1]))
+ for item in value.items()))
+
+
+def fromjson_bool(value):
+ if isinstance(value, (int, bool)):
+ return bool(value)
+ if value in ENUM_TRUE:
+ return True
+ if value in ENUM_FALSE:
+ return False
+ raise ValueError("Value not an unambiguous boolean: %s" % value)
+
+
+def fromjson(datatype, value):
+ """A generic converter from json base types to python datatype.
+
+ """
+ if value is None:
+ return None
+
+ if isinstance(datatype, atypes.ArrayType):
+ return fromjson_array(datatype, value)
+
+ if isinstance(datatype, atypes.DictType):
+ return fromjson_dict(datatype, value)
+
+ if datatype is bytes:
+ if isinstance(value, (str, int, float)):
+ return str(value).encode('utf8')
+ return value
+
+ if datatype is str:
+ if isinstance(value, bytes):
+ return value.decode('utf-8')
+ return value
+
+ if datatype in (int, float):
+ return datatype(value)
+
+ if datatype is bool:
+ return fromjson_bool(value)
+
+ if datatype is decimal.Decimal:
+ return decimal.Decimal(value)
+
+ if datatype is datetime.datetime:
+ return dateparser.parse(value)
+
+ if atypes.iscomplex(datatype):
+ return fromjson_complex(datatype, value)
+
+ if atypes.isusertype(datatype):
+ return datatype.frombasetype(fromjson(datatype.basetype, value))
+
+ return value
+
+
+def fromjson_complex(datatype, value):
+ obj = datatype()
+ attributes = atypes.list_attributes(datatype)
+
+ # Here we check that all the attributes in the value are also defined
+ # in our type definition, otherwise we raise an Error.
+ v_keys = set(value.keys())
+ a_keys = set(adef.name for adef in attributes)
+ if not v_keys <= a_keys:
+ raise exception.UnknownAttribute(None, v_keys - a_keys)
+
+ for attrdef in attributes:
+ if attrdef.name in value:
+ try:
+ val_fromjson = fromjson(attrdef.datatype,
+ value[attrdef.name])
+ except exception.UnknownAttribute as e:
+ e.add_fieldname(attrdef.name)
+ raise
+ if getattr(attrdef, 'readonly', False):
+ raise exception.InvalidInput(attrdef.name, val_fromjson,
+ "Cannot set read only field.")
+ setattr(obj, attrdef.key, val_fromjson)
+ elif attrdef.mandatory:
+ raise exception.InvalidInput(attrdef.name, None,
+ "Mandatory field missing.")
+
+ return atypes.validate_value(datatype, obj)
+
+
+def parse(s, datatypes, bodyarg, encoding='utf8'):
+ jload = json.load
+ if not hasattr(s, 'read'):
+ if isinstance(s, bytes):
+ s = s.decode(encoding)
+ jload = json.loads
+ try:
+ jdata = jload(s)
+ except ValueError:
+ raise exception.ClientSideError("Request is not in valid JSON format")
+ if bodyarg:
+ argname = list(datatypes.keys())[0]
+ try:
+ kw = {argname: fromjson(datatypes[argname], jdata)}
+ except ValueError as e:
+ raise exception.InvalidInput(argname, jdata, e.args[0])
+ except exception.UnknownAttribute as e:
+ # We only know the fieldname at this level, not in the
+ # called function. We fill in this information here.
+ e.add_fieldname(argname)
+ raise
+ else:
+ kw = {}
+ extra_args = []
+ if not isinstance(jdata, dict):
+ raise exception.ClientSideError("Request must be a JSON dict")
+ for key in jdata:
+ if key not in datatypes:
+ extra_args.append(key)
+ else:
+ try:
+ kw[key] = fromjson(datatypes[key], jdata[key])
+ except ValueError as e:
+ raise exception.InvalidInput(key, jdata[key], e.args[0])
+ except exception.UnknownAttribute as e:
+ # We only know the fieldname at this level, not in the
+ # called function. We fill in this information here.
+ e.add_fieldname(key)
+ raise
+ if extra_args:
+ raise exception.UnknownArgument(', '.join(extra_args))
+ return kw
+
+
+def from_param(datatype, value):
+ if datatype is datetime.datetime:
+ return dateparser.parse(value) if value else None
+
+ if isinstance(datatype, atypes.UserType):
+ return datatype.frombasetype(
+ from_param(datatype.basetype, value))
+
+ if isinstance(datatype, atypes.ArrayType):
+ if value is None:
+ return value
+ return [
+ from_param(datatype.item_type, item)
+ for item in value
+ ]
+
+ return datatype(value) if value is not None else None
+
+
+def from_params(datatype, params, path, hit_paths):
+ if isinstance(datatype, atypes.ArrayType):
+ return array_from_params(datatype, params, path, hit_paths)
+
+ if isinstance(datatype, atypes.UserType):
+ return usertype_from_params(datatype, params, path, hit_paths)
+
+ if path in params:
+ assert not isinstance(datatype, atypes.DictType), \
+ 'DictType unsupported'
+ assert not atypes.iscomplex(datatype) or datatype is atypes.File, \
+ 'complex type unsupported'
+ hit_paths.add(path)
+ return from_param(datatype, params[path])
+ return atypes.Unset
+
+
+def array_from_params(datatype, params, path, hit_paths):
+ if hasattr(params, 'getall'):
+ # webob multidict
+ def getall(params, path):
+ return params.getall(path)
+ elif hasattr(params, 'getlist'):
+ # werkzeug multidict
+ def getall(params, path): # noqa
+ return params.getlist(path)
+ if path in params:
+ hit_paths.add(path)
+ return [
+ from_param(datatype.item_type, value)
+ for value in getall(params, path)]
+
+ return atypes.Unset
+
+
+def usertype_from_params(datatype, params, path, hit_paths):
+ if path in params:
+ hit_paths.add(path)
+ value = from_param(datatype.basetype, params[path])
+ if value is not atypes.Unset:
+ return datatype.frombasetype(value)
+ return atypes.Unset
+
+
+def args_from_args(funcdef, args, kwargs):
+ newargs = []
+ for argdef, arg in zip(funcdef.arguments[:len(args)], args):
+ try:
+ newargs.append(from_param(argdef.datatype, arg))
+ except Exception as e:
+ if isinstance(argdef.datatype, atypes.UserType):
+ datatype_name = argdef.datatype.name
+ elif isinstance(argdef.datatype, type):
+ datatype_name = argdef.datatype.__name__
+ else:
+ datatype_name = argdef.datatype.__class__.__name__
+ raise exception.InvalidInput(
+ argdef.name,
+ arg,
+ "unable to convert to %(datatype)s. Error: %(error)s" % {
+ 'datatype': datatype_name, 'error': e})
+ newkwargs = {}
+ for argname, value in kwargs.items():
+ newkwargs[argname] = from_param(
+ funcdef.get_arg(argname).datatype, value
+ )
+ return newargs, newkwargs
+
+
+def args_from_params(funcdef, params):
+ kw = {}
+ hit_paths = set()
+ for argdef in funcdef.arguments:
+ value = from_params(
+ argdef.datatype, params, argdef.name, hit_paths)
+ if value is not atypes.Unset:
+ kw[argdef.name] = value
+ paths = set(params.keys())
+ unknown_paths = paths - hit_paths
+ if '__body__' in unknown_paths:
+ unknown_paths.remove('__body__')
+ if not funcdef.ignore_extra_args and unknown_paths:
+ raise exception.UnknownArgument(', '.join(unknown_paths))
+ return [], kw
+
+
+def args_from_body(funcdef, body, mimetype):
+ if funcdef.body_type is not None:
+ datatypes = {funcdef.arguments[-1].name: funcdef.body_type}
+ else:
+ datatypes = dict(((a.name, a.datatype) for a in funcdef.arguments))
+
+ if not body:
+ return (), {}
+
+ if mimetype == "application/x-www-form-urlencoded":
+ # the parameters should have been parsed in params
+ return (), {}
+ elif mimetype not in ACCEPT_CONTENT_TYPES:
+ raise exception.ClientSideError("Unknown mimetype: %s" % mimetype,
+ status_code=415)
+
+ try:
+ kw = parse(
+ body, datatypes, bodyarg=funcdef.body_type is not None
+ )
+ except exception.UnknownArgument:
+ if not funcdef.ignore_extra_args:
+ raise
+ kw = {}
+
+ return (), kw
+
+
+def combine_args(funcdef, akw, allow_override=False):
+ newargs, newkwargs = [], {}
+ for args, kwargs in akw:
+ for i, arg in enumerate(args):
+ n = funcdef.arguments[i].name
+ if not allow_override and n in newkwargs:
+ raise exception.ClientSideError(
+ "Parameter %s was given several times" % n)
+ newkwargs[n] = arg
+ for name, value in kwargs.items():
+ n = str(name)
+ if not allow_override and n in newkwargs:
+ raise exception.ClientSideError(
+ "Parameter %s was given several times" % n)
+ newkwargs[n] = value
+ return newargs, newkwargs
+
+
+def get_args(funcdef, args, kwargs, params, body, mimetype):
+ """Combine arguments from multiple sources
+
+ Combine arguments from :
+ * the host framework args and kwargs
+ * the request params
+ * the request body
+
+ Note that the host framework args and kwargs can be overridden
+ by arguments from params of body
+
+ """
+ # get the body from params if not given directly
+ if not body and '__body__' in params:
+ body = params['__body__']
+
+ # extract args from the host args and kwargs
+ from_args = args_from_args(funcdef, args, kwargs)
+
+ # extract args from the request parameters
+ from_params = args_from_params(funcdef, params)
+
+ # extract args from the request body
+ from_body = args_from_body(funcdef, body, mimetype)
+
+ # combine params and body arguments
+ from_params_and_body = combine_args(
+ funcdef,
+ (from_params, from_body)
+ )
+
+ args, kwargs = combine_args(
+ funcdef,
+ (from_args, from_params_and_body),
+ allow_override=True
+ )
+ check_arguments(funcdef, args, kwargs)
+ return args, kwargs
+
+
+def check_arguments(funcdef, args, kw):
+ """Check if some arguments are missing"""
+ assert len(args) == 0
+ for arg in funcdef.arguments:
+ if arg.mandatory and arg.name not in kw:
+ raise exception.MissingArgument(arg.name)
diff --git a/ironic/api/controllers/link.py b/ironic/api/controllers/link.py
index 8f2549c9b..490a78ab5 100644
--- a/ironic/api/controllers/link.py
+++ b/ironic/api/controllers/link.py
@@ -14,8 +14,6 @@
# under the License.
from ironic import api
-from ironic.api.controllers import base
-from ironic.api import types as atypes
def build_url(resource, resource_args, bookmark=False, base_url=None):
@@ -30,28 +28,15 @@ def build_url(resource, resource_args, bookmark=False, base_url=None):
return template % {'url': base_url, 'res': resource, 'args': resource_args}
-class Link(base.Base):
- """A link representation."""
-
- href = str
- """The url of a link."""
-
- rel = str
- """The name of a link."""
-
- type = str
- """Indicates the type of document/link."""
-
- @staticmethod
- def make_link(rel_name, url, resource, resource_args,
- bookmark=False, type=atypes.Unset):
- href = build_url(resource, resource_args,
- bookmark=bookmark, base_url=url)
- return Link(href=href, rel=rel_name, type=type)
-
- @classmethod
- def sample(cls):
- sample = cls(href="http://localhost:6385/chassis/"
- "eaaca217-e7d8-47b4-bb41-3f99f20eed89",
- rel="bookmark")
- return sample
+def make_link(rel_name, url, resource, resource_args,
+ bookmark=False, type=None):
+ """Build a dict representing a link"""
+ href = build_url(resource, resource_args,
+ bookmark=bookmark, base_url=url)
+ l = {
+ 'href': href,
+ 'rel': rel_name
+ }
+ if type:
+ l['type'] = type
+ return l
diff --git a/ironic/api/controllers/root.py b/ironic/api/controllers/root.py
index 42308fd82..88681874e 100644
--- a/ironic/api/controllers/root.py
+++ b/ironic/api/controllers/root.py
@@ -15,64 +15,51 @@
# under the License.
import pecan
-from pecan import rest
-from ironic.api.controllers import base
from ironic.api.controllers import v1
from ironic.api.controllers import version
-from ironic.api import expose
+from ironic.api import method
-class Root(base.Base):
+V1 = v1.Controller()
- name = str
- """The name of the API"""
- description = str
- """Some information about this API"""
+def root():
+ return {
+ 'name': "OpenStack Ironic API",
+ 'description': ("Ironic is an OpenStack project which aims to "
+ "provision baremetal machines."),
+ 'default_version': version.default_version(),
+ 'versions': version.all_versions()
+ }
- versions = [version.Version]
- """Links to all the versions available in this API"""
- default_version = version.Version
- """A link to the default version of the API"""
+class RootController(object):
- @staticmethod
- def convert():
- root = Root()
- root.name = "OpenStack Ironic API"
- root.description = ("Ironic is an OpenStack project which aims to "
- "provision baremetal machines.")
- root.default_version = version.default_version()
- root.versions = [root.default_version]
- return root
-
-
-class RootController(rest.RestController):
-
- _versions = [version.ID_VERSION1]
- """All supported API versions"""
-
- _default_version = version.ID_VERSION1
- """The default API version"""
-
- v1 = v1.Controller()
-
- @expose.expose(Root)
- def get(self):
- # NOTE: The reason why convert() it's being called for every
- # request is because we need to get the host url from
- # the request object to make the links.
- return Root.convert()
+ @method.expose()
+ def index(self, *args):
+ if args:
+ pecan.abort(404)
+ return root()
@pecan.expose()
- def _route(self, args, request=None):
+ def _lookup(self, primary_key, *remainder):
"""Overrides the default routing behavior.
It redirects the request to the default version of the ironic API
if the version number is not specified in the url.
"""
- if args[0] and args[0] not in self._versions:
- args = [self._default_version] + args
- return super(RootController, self)._route(args, request)
+ # support paths which are missing the first version element
+ if primary_key and primary_key != version.ID_VERSION1:
+ remainder = [primary_key] + list(remainder)
+
+ # remove any trailing /
+ if remainder and not remainder[-1]:
+ remainder = remainder[:-1]
+
+ # but ensure /v1 goes to /v1/
+ if not remainder:
+ remainder = ['']
+
+ return V1, remainder
diff --git a/ironic/api/controllers/v1/__init__.py b/ironic/api/controllers/v1/__init__.py
index cd568881f..a944dec69 100644
--- a/ironic/api/controllers/v1/__init__.py
+++ b/ironic/api/controllers/v1/__init__.py
@@ -18,8 +18,9 @@ Version 1 of the Ironic API
Specification can be found at doc/source/webapi/v1.rst
"""
+from http import client as http_client
+
import pecan
-from pecan import rest
from webob import exc
from ironic import api
@@ -39,7 +40,7 @@ from ironic.api.controllers.v1 import utils
from ironic.api.controllers.v1 import versions
from ironic.api.controllers.v1 import volume
from ironic.api.controllers import version
-from ironic.api import expose
+from ironic.api import method
from ironic.common.i18n import _
BASE_VERSION = versions.BASE_VERSION
@@ -57,205 +58,161 @@ def max_version():
versions.min_version_string(), versions.max_version_string())
-class MediaType(base.Base):
- """A media type representation."""
-
- base = str
- type = str
-
- def __init__(self, base, type):
- self.base = base
- self.type = type
-
-
-class V1(base.Base):
- """The representation of the version 1 of the API."""
-
- id = str
- """The ID of the version, also acts as the release number"""
-
- media_types = [MediaType]
- """An array of supported media types for this version"""
-
- links = [link.Link]
- """Links that point to a specific URL for this version and documentation"""
-
- chassis = [link.Link]
- """Links to the chassis resource"""
-
- nodes = [link.Link]
- """Links to the nodes resource"""
-
- ports = [link.Link]
- """Links to the ports resource"""
-
- portgroups = [link.Link]
- """Links to the portgroups resource"""
-
- drivers = [link.Link]
- """Links to the drivers resource"""
-
- volume = [link.Link]
- """Links to the volume resource"""
-
- lookup = [link.Link]
- """Links to the lookup resource"""
-
- heartbeat = [link.Link]
- """Links to the heartbeat resource"""
-
- conductors = [link.Link]
- """Links to the conductors resource"""
-
- allocations = [link.Link]
- """Links to the allocations resource"""
-
- deploy_templates = [link.Link]
- """Links to the deploy_templates resource"""
-
- version = version.Version
- """Version discovery information."""
-
- events = [link.Link]
- """Links to the events resource"""
-
- @staticmethod
- def convert():
- v1 = V1()
- v1.id = "v1"
- v1.links = [link.Link.make_link('self', api.request.public_url,
- 'v1', '', bookmark=True),
- link.Link.make_link('describedby',
- 'https://docs.openstack.org',
- '/ironic/latest/contributor/',
- 'webapi.html',
- bookmark=True, type='text/html')
- ]
- v1.media_types = [MediaType('application/json',
- 'application/vnd.openstack.ironic.v1+json')]
- v1.chassis = [link.Link.make_link('self', api.request.public_url,
- 'chassis', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'chassis', '',
- bookmark=True)
- ]
- v1.nodes = [link.Link.make_link('self', api.request.public_url,
- 'nodes', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'nodes', '',
- bookmark=True)
- ]
- v1.ports = [link.Link.make_link('self', api.request.public_url,
- 'ports', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'ports', '',
- bookmark=True)
- ]
- if utils.allow_portgroups():
- v1.portgroups = [
- link.Link.make_link('self', api.request.public_url,
- 'portgroups', ''),
- link.Link.make_link('bookmark', api.request.public_url,
- 'portgroups', '', bookmark=True)
- ]
- v1.drivers = [link.Link.make_link('self', api.request.public_url,
- 'drivers', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'drivers', '',
- bookmark=True)
- ]
- if utils.allow_volume():
- v1.volume = [
- link.Link.make_link('self',
- api.request.public_url,
- 'volume', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'volume', '',
- bookmark=True)
- ]
- if utils.allow_ramdisk_endpoints():
- v1.lookup = [link.Link.make_link('self', api.request.public_url,
- 'lookup', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'lookup', '',
- bookmark=True)
- ]
- v1.heartbeat = [link.Link.make_link('self',
- api.request.public_url,
- 'heartbeat', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'heartbeat', '',
- bookmark=True)
- ]
- if utils.allow_expose_conductors():
- v1.conductors = [link.Link.make_link('self',
- api.request.public_url,
- 'conductors', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'conductors', '',
- bookmark=True)
- ]
- if utils.allow_allocations():
- v1.allocations = [link.Link.make_link('self',
- api.request.public_url,
- 'allocations', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'allocations', '',
- bookmark=True)
- ]
- if utils.allow_expose_events():
- v1.events = [link.Link.make_link('self', api.request.public_url,
- 'events', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'events', '',
- bookmark=True)
- ]
- if utils.allow_deploy_templates():
- v1.deploy_templates = [
- link.Link.make_link('self',
- api.request.public_url,
- 'deploy_templates', ''),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'deploy_templates', '',
- bookmark=True)
- ]
- v1.version = version.default_version()
- return v1
-
-
-class Controller(rest.RestController):
+def v1():
+ v1 = {
+ 'id': "v1",
+ 'links': [
+ link.make_link('self', api.request.public_url,
+ 'v1', '', bookmark=True),
+ link.make_link('describedby',
+ 'https://docs.openstack.org',
+ '/ironic/latest/contributor/',
+ 'webapi.html',
+ bookmark=True, type='text/html')
+ ],
+ 'media_types': {
+ 'base': 'application/json',
+ 'type': 'application/vnd.openstack.ironic.v1+json'
+ },
+ 'chassis': [
+ link.make_link('self', api.request.public_url,
+ 'chassis', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'chassis', '',
+ bookmark=True)
+ ],
+ 'nodes': [
+ link.make_link('self', api.request.public_url,
+ 'nodes', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'nodes', '',
+ bookmark=True)
+ ],
+ 'ports': [
+ link.make_link('self', api.request.public_url,
+ 'ports', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'ports', '',
+ bookmark=True)
+ ],
+ 'drivers': [
+ link.make_link('self', api.request.public_url,
+ 'drivers', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'drivers', '',
+ bookmark=True)
+ ],
+ 'version': version.default_version()
+ }
+ if utils.allow_portgroups():
+ v1['portgroups'] = [
+ link.make_link('self', api.request.public_url,
+ 'portgroups', ''),
+ link.make_link('bookmark', api.request.public_url,
+ 'portgroups', '', bookmark=True)
+ ]
+ if utils.allow_volume():
+ v1['volume'] = [
+ link.make_link('self',
+ api.request.public_url,
+ 'volume', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'volume', '',
+ bookmark=True)
+ ]
+ if utils.allow_ramdisk_endpoints():
+ v1['lookup'] = [
+ link.make_link('self', api.request.public_url,
+ 'lookup', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'lookup', '',
+ bookmark=True)
+ ]
+ v1['heartbeat'] = [
+ link.make_link('self',
+ api.request.public_url,
+ 'heartbeat', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'heartbeat', '',
+ bookmark=True)
+ ]
+ if utils.allow_expose_conductors():
+ v1['conductors'] = [
+ link.make_link('self',
+ api.request.public_url,
+ 'conductors', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'conductors', '',
+ bookmark=True)
+ ]
+ if utils.allow_allocations():
+ v1['allocations'] = [
+ link.make_link('self',
+ api.request.public_url,
+ 'allocations', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'allocations', '',
+ bookmark=True)
+ ]
+ if utils.allow_expose_events():
+ v1['events'] = [
+ link.make_link('self', api.request.public_url,
+ 'events', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'events', '',
+ bookmark=True)
+ ]
+ if utils.allow_deploy_templates():
+ v1['deploy_templates'] = [
+ link.make_link('self',
+ api.request.public_url,
+ 'deploy_templates', ''),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'deploy_templates', '',
+ bookmark=True)
+ ]
+ return v1
+
+
+class Controller(object):
"""Version 1 API controller root."""
- nodes = node.NodesController()
- ports = port.PortsController()
- portgroups = portgroup.PortgroupsController()
- chassis = chassis.ChassisController()
- drivers = driver.DriversController()
- volume = volume.VolumeController()
- lookup = ramdisk.LookupController()
- heartbeat = ramdisk.HeartbeatController()
- conductors = conductor.ConductorsController()
- allocations = allocation.AllocationsController()
- events = event.EventsController()
- deploy_templates = deploy_template.DeployTemplatesController()
-
- @expose.expose(V1)
- def get(self):
- # NOTE: The reason why convert() it's being called for every
+ _subcontroller_map = {
+ 'nodes': node.NodesController(),
+ 'ports': port.PortsController(),
+ 'portgroups': portgroup.PortgroupsController(),
+ 'chassis': chassis.ChassisController(),
+ 'drivers': driver.DriversController(),
+ 'volume': volume.VolumeController(),
+ 'lookup': ramdisk.LookupController(),
+ 'heartbeat': ramdisk.HeartbeatController(),
+ 'conductors': conductor.ConductorsController(),
+ 'allocations': allocation.AllocationsController(),
+ 'events': event.EventsController(),
+ 'deploy_templates': deploy_template.DeployTemplatesController()
+ }
+
+ @method.expose()
+ def index(self):
+ # NOTE: The reason why v1() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
- return V1.convert()
+ self._add_version_attributes()
+ if api.request.method != "GET":
+ pecan.abort(http_client.METHOD_NOT_ALLOWED)
+
+ return v1()
def _check_version(self, version, headers=None):
if headers is None:
@@ -279,8 +236,7 @@ class Controller(rest.RestController):
'max': versions.max_version_string()},
headers=headers)
- @pecan.expose()
- def _route(self, args, request=None):
+ def _add_version_attributes(self):
v = base.Version(api.request.headers, versions.min_version_string(),
versions.max_version_string())
@@ -295,7 +251,15 @@ class Controller(rest.RestController):
api.response.headers[base.Version.string] = str(v)
api.request.version = v
- return super(Controller, self)._route(args, request)
+ @pecan.expose()
+ def _lookup(self, primary_key, *remainder):
+ self._add_version_attributes()
+
+ controller = self._subcontroller_map.get(primary_key)
+ if not controller:
+ pecan.abort(http_client.NOT_FOUND)
+
+ return controller, remainder
__all__ = ('Controller',)
diff --git a/ironic/api/controllers/v1/allocation.py b/ironic/api/controllers/v1/allocation.py
index 8520558a9..9cf18d0b6 100644
--- a/ironic/api/controllers/v1/allocation.py
+++ b/ironic/api/controllers/v1/allocation.py
@@ -17,7 +17,6 @@ from ironic_lib import metrics_utils
from oslo_utils import uuidutils
import pecan
from webob import exc as webob_exc
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -66,7 +65,7 @@ class Allocation(base.APIBase):
name = atypes.wsattr(str)
"""The logical name for this allocation"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated allocation links"""
state = atypes.wsattr(str, readonly=True)
@@ -108,9 +107,9 @@ class Allocation(base.APIBase):
# This field is only used in POST, never return it.
allocation.node = atypes.Unset
allocation.links = [
- link.Link.make_link('self', url, 'allocations', allocation.uuid),
- link.Link.make_link('bookmark', url, 'allocations',
- allocation.uuid, bookmark=True)
+ link.make_link('self', url, 'allocations', allocation.uuid),
+ link.make_link('bookmark', url, 'allocations',
+ allocation.uuid, bookmark=True)
]
return allocation
@@ -475,7 +474,7 @@ class AllocationsController(pecan.rest.RestController):
self._check_allowed_allocation_fields(fields)
@METRICS.timer('AllocationsController.patch')
- @wsme.validate(types.uuid, [AllocationPatchType])
+ @expose.validate(types.uuid, [AllocationPatchType])
@expose.expose(Allocation, types.uuid_or_name, body=[AllocationPatchType])
def patch(self, allocation_ident, patch):
"""Update an existing allocation.
diff --git a/ironic/api/controllers/v1/bios.py b/ironic/api/controllers/v1/bios.py
index 3a21c5627..db45e3ce3 100644
--- a/ironic/api/controllers/v1/bios.py
+++ b/ironic/api/controllers/v1/bios.py
@@ -37,7 +37,7 @@ class BIOSSetting(base.APIBase):
value = atypes.wsattr(str)
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
def __init__(self, **kwargs):
self.fields = []
@@ -52,11 +52,11 @@ class BIOSSetting(base.APIBase):
def _convert_with_links(bios, node_uuid, url):
"""Add links to the bios setting."""
name = bios.name
- bios.links = [link.Link.make_link('self', url, 'nodes',
- "%s/bios/%s" % (node_uuid, name)),
- link.Link.make_link('bookmark', url, 'nodes',
- "%s/bios/%s" % (node_uuid, name),
- bookmark=True)]
+ bios.links = [link.make_link('self', url, 'nodes',
+ "%s/bios/%s" % (node_uuid, name)),
+ link.make_link('bookmark', url, 'nodes',
+ "%s/bios/%s" % (node_uuid, name),
+ bookmark=True)]
return bios
@classmethod
diff --git a/ironic/api/controllers/v1/chassis.py b/ironic/api/controllers/v1/chassis.py
index d02156c59..ba6db6aad 100644
--- a/ironic/api/controllers/v1/chassis.py
+++ b/ironic/api/controllers/v1/chassis.py
@@ -19,7 +19,6 @@ from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -59,10 +58,10 @@ class Chassis(base.APIBase):
extra = {str: types.jsontype}
"""The metadata of the chassis"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated chassis links"""
- nodes = atypes.wsattr([link.Link], readonly=True)
+ nodes = None
"""Links to the collection of nodes contained in this chassis"""
def __init__(self, **kwargs):
@@ -77,23 +76,23 @@ class Chassis(base.APIBase):
@staticmethod
def _convert_with_links(chassis, url, fields=None):
if fields is None:
- chassis.nodes = [link.Link.make_link('self',
- url,
- 'chassis',
- chassis.uuid + "/nodes"),
- link.Link.make_link('bookmark',
- url,
- 'chassis',
- chassis.uuid + "/nodes",
- bookmark=True)
+ chassis.nodes = [link.make_link('self',
+ url,
+ 'chassis',
+ chassis.uuid + "/nodes"),
+ link.make_link('bookmark',
+ url,
+ 'chassis',
+ chassis.uuid + "/nodes",
+ bookmark=True)
]
- chassis.links = [link.Link.make_link('self',
- url,
- 'chassis', chassis.uuid),
- link.Link.make_link('bookmark',
- url,
- 'chassis', chassis.uuid,
- bookmark=True)
+ chassis.links = [link.make_link('self',
+ url,
+ 'chassis', chassis.uuid),
+ link.make_link('bookmark',
+ url,
+ 'chassis', chassis.uuid,
+ bookmark=True)
]
return chassis
@@ -313,7 +312,7 @@ class ChassisController(rest.RestController):
return Chassis.convert_with_links(new_chassis)
@METRICS.timer('ChassisController.patch')
- @wsme.validate(types.uuid, [ChassisPatchType])
+ @expose.validate(types.uuid, [ChassisPatchType])
@expose.expose(Chassis, types.uuid, body=[ChassisPatchType])
def patch(self, chassis_uuid, patch):
"""Update an existing chassis.
diff --git a/ironic/api/controllers/v1/collection.py b/ironic/api/controllers/v1/collection.py
index 8fc44d62c..c669b9309 100644
--- a/ironic/api/controllers/v1/collection.py
+++ b/ironic/api/controllers/v1/collection.py
@@ -19,6 +19,38 @@ from ironic.api.controllers import link
from ironic.api import types as atypes
+def has_next(collection, limit):
+ """Return whether collection has more items."""
+ return len(collection) and len(collection) == limit
+
+
+def get_next(collection, limit, url=None, key_field='uuid', **kwargs):
+ """Return a link to the next subset of the collection."""
+ if not has_next(collection, limit):
+ return None
+
+ fields = kwargs.pop('fields', None)
+ # NOTE(saga): If fields argument is present in kwargs and not None. It
+ # is a list so convert it into a comma seperated string.
+ if fields:
+ kwargs['fields'] = ','.join(fields)
+ q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
+
+ last_item = collection[-1]
+ # handle items which are either objects or dicts
+ if hasattr(last_item, key_field):
+ marker = getattr(last_item, key_field)
+ else:
+ marker = last_item.get(key_field)
+
+ next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
+ 'args': q_args, 'limit': limit,
+ 'marker': marker}
+
+ return link.make_link('next', api.request.public_url,
+ url, next_args)['href']
+
+
class Collection(base.Base):
next = str
@@ -34,23 +66,13 @@ class Collection(base.Base):
def has_next(self, limit):
"""Return whether collection has more items."""
- return len(self.collection) and len(self.collection) == limit
+ return has_next(self.collection, limit)
def get_next(self, limit, url=None, **kwargs):
"""Return a link to the next subset of the collection."""
- if not self.has_next(limit):
- return atypes.Unset
-
resource_url = url or self._type
- fields = kwargs.pop('fields', None)
- # NOTE(saga): If fields argument is present in kwargs and not None. It
- # is a list so convert it into a comma seperated string.
- if fields:
- kwargs['fields'] = ','.join(fields)
- q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
- next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
- 'args': q_args, 'limit': limit,
- 'marker': getattr(self.collection[-1], self.get_key_field())}
-
- return link.Link.make_link('next', api.request.public_url,
- resource_url, next_args).href
+ the_next = get_next(self.collection, limit, url=resource_url,
+ key_field=self.get_key_field(), **kwargs)
+ if the_next is None:
+ return atypes.Unset
+ return the_next
diff --git a/ironic/api/controllers/v1/conductor.py b/ironic/api/controllers/v1/conductor.py
index 8ab1922ae..096c3c587 100644
--- a/ironic/api/controllers/v1/conductor.py
+++ b/ironic/api/controllers/v1/conductor.py
@@ -53,7 +53,7 @@ class Conductor(base.APIBase):
drivers = atypes.wsattr([str])
"""The drivers enabled on this conductor"""
- links = atypes.wsattr([link.Link])
+ links = None
"""A list containing a self link and associated conductor links"""
def __init__(self, **kwargs):
@@ -72,11 +72,11 @@ class Conductor(base.APIBase):
@staticmethod
def _convert_with_links(conductor, url, fields=None):
- conductor.links = [link.Link.make_link('self', url, 'conductors',
- conductor.hostname),
- link.Link.make_link('bookmark', url, 'conductors',
- conductor.hostname,
- bookmark=True)]
+ conductor.links = [link.make_link('self', url, 'conductors',
+ conductor.hostname),
+ link.make_link('bookmark', url, 'conductors',
+ conductor.hostname,
+ bookmark=True)]
return conductor
@classmethod
diff --git a/ironic/api/controllers/v1/deploy_template.py b/ironic/api/controllers/v1/deploy_template.py
index 326ed91a6..90555bad2 100644
--- a/ironic/api/controllers/v1/deploy_template.py
+++ b/ironic/api/controllers/v1/deploy_template.py
@@ -21,7 +21,6 @@ from oslo_utils import uuidutils
import pecan
from pecan import rest
from webob import exc as webob_exc
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -83,7 +82,7 @@ class DeployTemplate(base.APIBase):
steps = atypes.wsattr([DeployStepType], mandatory=True)
"""The deploy steps of this deploy template."""
- links = atypes.wsattr([link.Link])
+ links = None
"""A list containing a self link and associated deploy template links."""
extra = {str: types.jsontype}
@@ -149,11 +148,11 @@ class DeployTemplate(base.APIBase):
@staticmethod
def _convert_with_links(template, url, fields=None):
template.links = [
- link.Link.make_link('self', url, 'deploy_templates',
- template.uuid),
- link.Link.make_link('bookmark', url, 'deploy_templates',
- template.uuid,
- bookmark=True)
+ link.make_link('self', url, 'deploy_templates',
+ template.uuid),
+ link.make_link('bookmark', url, 'deploy_templates',
+ template.uuid,
+ bookmark=True)
]
return template
@@ -383,7 +382,7 @@ class DeployTemplatesController(rest.RestController):
return api_template
@METRICS.timer('DeployTemplatesController.patch')
- @wsme.validate(types.uuid, types.boolean, [DeployTemplatePatchType])
+ @expose.validate(types.uuid, types.boolean, [DeployTemplatePatchType])
@expose.expose(DeployTemplate, types.uuid_or_name, types.boolean,
body=[DeployTemplatePatchType])
def patch(self, template_ident, patch=None):
diff --git a/ironic/api/controllers/v1/driver.py b/ironic/api/controllers/v1/driver.py
index ef63074b3..2e87a20ee 100644
--- a/ironic/api/controllers/v1/driver.py
+++ b/ironic/api/controllers/v1/driver.py
@@ -96,10 +96,10 @@ class Driver(base.Base):
type = str
"""Whether the driver is classic or dynamic (hardware type)"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing self and bookmark links"""
- properties = atypes.wsattr([link.Link], readonly=True)
+ properties = None
"""A list containing links to driver properties"""
"""Default interface for a hardware type"""
@@ -146,23 +146,23 @@ class Driver(base.Base):
driver.name = name
driver.hosts = hosts
driver.links = [
- link.Link.make_link('self',
- api.request.public_url,
- 'drivers', name),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'drivers', name,
- bookmark=True)
+ link.make_link('self',
+ api.request.public_url,
+ 'drivers', name),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'drivers', name,
+ bookmark=True)
]
if api_utils.allow_links_node_states_and_driver_properties():
driver.properties = [
- link.Link.make_link('self',
- api.request.public_url,
- 'drivers', name + "/properties"),
- link.Link.make_link('bookmark',
- api.request.public_url,
- 'drivers', name + "/properties",
- bookmark=True)
+ link.make_link('self',
+ api.request.public_url,
+ 'drivers', name + "/properties"),
+ link.make_link('bookmark',
+ api.request.public_url,
+ 'drivers', name + "/properties",
+ bookmark=True)
]
if api_utils.allow_dynamic_drivers():
diff --git a/ironic/api/controllers/v1/network-data-schema.json b/ironic/api/controllers/v1/network-data-schema.json
new file mode 100644
index 000000000..7162daf34
--- /dev/null
+++ b/ironic/api/controllers/v1/network-data-schema.json
@@ -0,0 +1,580 @@
+{
+ "$schema": "http://openstack.org/nova/network_data.json#",
+ "id": "http://openstack.org/nova/network_data.json",
+ "type": "object",
+ "title": "OpenStack Nova network metadata schema",
+ "description": "Schema of Nova instance network configuration information",
+ "required": [
+ "links",
+ "networks",
+ "services"
+ ],
+ "properties": {
+ "links": {
+ "$id": "#/properties/links",
+ "type": "array",
+ "title": "L2 interfaces settings",
+ "items": {
+ "$id": "#/properties/links/items",
+ "oneOf": [
+ {
+ "$ref": "#/definitions/l2_link"
+ },
+ {
+ "$ref": "#/definitions/l2_bond"
+ },
+ {
+ "$ref": "#/definitions/l2_vlan"
+ }
+ ]
+ }
+ },
+ "networks": {
+ "$id": "#/properties/networks",
+ "type": "array",
+ "title": "L3 networks",
+ "items": {
+ "$id": "#/properties/networks/items",
+ "oneOf": [
+ {
+ "$ref": "#/definitions/l3_ipv4_network"
+ },
+ {
+ "$ref": "#/definitions/l3_ipv6_network"
+ }
+ ]
+ }
+ },
+ "services": {
+ "$ref": "#/definitions/services"
+ }
+ },
+ "definitions": {
+ "l2_address": {
+ "$id": "#/definitions/l2_address",
+ "type": "string",
+ "pattern": "(?i)^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$",
+ "title": "L2 interface address",
+ "examples": [
+ "fa:16:3e:9c:bf:3d"
+ ]
+ },
+ "l2_id": {
+ "$id": "#/definitions/l2_id",
+ "type": "string",
+ "title": "L2 interface ID",
+ "examples": [
+ "eth0"
+ ]
+ },
+ "l2_mtu": {
+ "$id": "#/definitions/l2_mtu",
+ "title": "L2 interface MTU",
+ "anyOf": [
+ {
+ "type": "number",
+ "minimum": 1,
+ "maximum": 65535
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "examples": [
+ 1500
+ ]
+ },
+ "l2_vif_id": {
+ "$id": "#/definitions/l2_vif_id",
+ "type": "string",
+ "title": "Virtual interface ID",
+ "examples": [
+ "cd9f6d46-4a3a-43ab-a466-994af9db96fc"
+ ]
+ },
+ "l2_link": {
+ "$id": "#/definitions/l2_link",
+ "type": "object",
+ "title": "L2 interface configuration settings",
+ "required": [
+ "ethernet_mac_address",
+ "id",
+ "type"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l2_id"
+ },
+ "ethernet_mac_address": {
+ "$ref": "#/definitions/l2_address"
+ },
+ "mtu": {
+ "$ref": "#/definitions/l2_mtu"
+ },
+ "type": {
+ "$id": "#/definitions/l2_link/properties/type",
+ "type": "string",
+ "enum": [
+ "bridge",
+ "dvs",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "tap",
+ "vhostuser",
+ "vif",
+ "phy"
+ ],
+ "title": "Interface type",
+ "examples": [
+ "bridge"
+ ]
+ },
+ "vif_id": {
+ "$ref": "#/definitions/l2_vif_id"
+ }
+ }
+ },
+ "l2_bond": {
+ "$id": "#/definitions/l2_bond",
+ "type": "object",
+ "title": "L2 bonding interface configuration settings",
+ "required": [
+ "ethernet_mac_address",
+ "id",
+ "type",
+ "bond_mode",
+ "bond_links"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l2_id"
+ },
+ "ethernet_mac_address": {
+ "$ref": "#/definitions/l2_address"
+ },
+ "mtu": {
+ "$ref": "#/definitions/l2_mtu"
+ },
+ "type": {
+ "$id": "#/definitions/l2_bond/properties/type",
+ "type": "string",
+ "enum": [
+ "bond"
+ ],
+ "title": "Interface type",
+ "examples": [
+ "bond"
+ ]
+ },
+ "vif_id": {
+ "$ref": "#/definitions/l2_vif_id"
+ },
+ "bond_mode": {
+ "$id": "#/definitions/bond/properties/bond_mode",
+ "type": "string",
+ "title": "Port bonding type",
+ "enum": [
+ "802.1ad",
+ "balance-rr",
+ "active-backup",
+ "balance-xor",
+ "broadcast",
+ "balance-tlb",
+ "balance-alb"
+ ],
+ "examples": [
+ "802.1ad"
+ ]
+ },
+ "bond_links": {
+ "$id": "#/definitions/bond/properties/bond_links",
+ "type": "array",
+ "title": "Port bonding links",
+ "items": {
+ "$id": "#/definitions/bond/properties/bond_links/items",
+ "type": "string"
+ }
+ }
+ }
+ },
+ "l2_vlan": {
+ "$id": "#/definitions/l2_vlan",
+ "type": "object",
+ "title": "L2 VLAN interface configuration settings",
+ "required": [
+ "vlan_mac_address",
+ "id",
+ "type",
+ "vlan_link",
+ "vlan_id"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l2_id"
+ },
+ "vlan_mac_address": {
+ "$ref": "#/definitions/l2_address"
+ },
+ "mtu": {
+ "$ref": "#/definitions/l2_mtu"
+ },
+ "type": {
+ "$id": "#/definitions/l2_vlan/properties/type",
+ "type": "string",
+ "enum": [
+ "vlan"
+ ],
+ "title": "VLAN interface type",
+ "examples": [
+ "vlan"
+ ]
+ },
+ "vif_id": {
+ "$ref": "#/definitions/l2_vif_id"
+ },
+ "vlan_id": {
+ "$id": "#/definitions/l2_vlan/properties/vlan_id",
+ "type": "integer",
+ "title": "VLAN ID"
+ },
+ "vlan_link": {
+ "$id": "#/definitions/l2_vlan/properties/vlan_link",
+ "type": "string",
+ "title": "VLAN link name"
+ }
+ }
+ },
+ "l3_id": {
+ "$id": "#/definitions/l3_id",
+ "type": "string",
+ "title": "Network name",
+ "examples": [
+ "network0"
+ ]
+ },
+ "l3_link": {
+ "$id": "#/definitions/l3_link",
+ "type": "string",
+ "title": "L2 network link to use for L3 interface",
+ "examples": [
+ "99e88329-f20d-4741-9593-25bf07847b16"
+ ]
+ },
+ "l3_network_id": {
+ "$id": "#/definitions/l3_network_id",
+ "type": "string",
+ "title": "Network ID",
+ "examples": [
+ "99e88329-f20d-4741-9593-25bf07847b16"
+ ]
+ },
+ "l3_ipv4_type": {
+ "$id": "#/definitions/l3_ipv4_type",
+ "type": "string",
+ "enum": [
+ "ipv4",
+ "ipv4_dhcp"
+ ],
+ "title": "L3 IPv4 network type",
+ "examples": [
+ "ipv4_dhcp"
+ ]
+ },
+ "l3_ipv6_type": {
+ "$id": "#/definitions/l3_ipv6_type",
+ "type": "string",
+ "enum": [
+ "ipv6",
+ "ipv6_dhcp",
+ "ipv6_slaac"
+ ],
+ "title": "L3 IPv6 network type",
+ "examples": [
+ "ipv6_dhcp"
+ ]
+ },
+ "l3_ipv4_host": {
+ "$id": "#/definitions/l3_ipv4_host",
+ "type": "string",
+ "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
+ "title": "L3 IPv4 host address",
+ "examples": [
+ "192.168.81.99"
+ ]
+ },
+ "l3_ipv6_host": {
+ "$id": "#/definitions/l3_ipv6_host",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(/[0-9]{1,2})?$",
+ "title": "L3 IPv6 host address",
+ "examples": [
+ "2001:db8:3:4::192.168.81.99"
+ ]
+ },
+ "l3_ipv4_netmask": {
+ "$id": "#/definitions/l3_ipv4_netmask",
+ "type": "string",
+ "pattern": "^(254|252|248|240|224|192|128|0)\\.0\\.0\\.0|255\\.(254|252|248|240|224|192|128|0)\\.0\\.0|255\\.255\\.(254|252|248|240|224|192|128|0)\\.0|255\\.255\\.255\\.(254|252|248|240|224|192|128|0)$",
+ "title": "L3 IPv4 network mask",
+ "examples": [
+ "255.255.252.0"
+ ]
+ },
+ "l3_ipv6_netmask": {
+ "$id": "#/definitions/l3_ipv6_netmask",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$",
+ "title": "L3 IPv6 network mask",
+ "examples": [
+ "ffff:ffff:ffff:ffff::"
+ ]
+ },
+ "l3_ipv4_nw": {
+ "$id": "#/definitions/l3_ipv4_nw",
+ "type": "string",
+ "pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$",
+ "title": "L3 IPv4 network address",
+ "examples": [
+ "0.0.0.0"
+ ]
+ },
+ "l3_ipv6_nw": {
+ "$id": "#/definitions/l3_ipv6_nw",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7})|(::))$",
+ "title": "L3 IPv6 network address",
+ "examples": [
+ "8000::"
+ ]
+ },
+ "l3_ipv4_gateway": {
+ "$id": "#/definitions/l3_ipv4_gateway",
+ "type": "string",
+ "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
+ "title": "L3 IPv4 gateway address",
+ "examples": [
+ "192.168.200.1"
+ ]
+ },
+ "l3_ipv6_gateway": {
+ "$id": "#/definitions/l3_ipv6_gateway",
+ "type": "string",
+ "pattern": "^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$",
+ "title": "L3 IPv6 gateway address",
+ "examples": [
+ "2001:db8:3:4::192.168.81.99"
+ ]
+ },
+ "l3_ipv4_network_route": {
+ "$id": "#/definitions/l3_ipv4_network_route",
+ "type": "object",
+ "title": "L3 IPv4 routing configuration item",
+ "required": [
+ "gateway",
+ "netmask",
+ "network"
+ ],
+ "properties": {
+ "network": {
+ "$ref": "#/definitions/l3_ipv4_nw"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv4_netmask"
+ },
+ "gateway": {
+ "$ref": "#/definitions/l3_ipv4_gateway"
+ },
+ "services": {
+ "$ref": "#/definitions/ipv4_services"
+ }
+ }
+ },
+ "l3_ipv6_network_route": {
+ "$id": "#/definitions/l3_ipv6_network_route",
+ "type": "object",
+ "title": "L3 IPv6 routing configuration item",
+ "required": [
+ "gateway",
+ "netmask",
+ "network"
+ ],
+ "properties": {
+ "network": {
+ "$ref": "#/definitions/l3_ipv6_nw"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv6_netmask"
+ },
+ "gateway": {
+ "$ref": "#/definitions/l3_ipv6_gateway"
+ },
+ "services": {
+ "$ref": "#/definitions/ipv6_services"
+ }
+ }
+ },
+ "l3_ipv4_network": {
+ "$id": "#/definitions/l3_ipv4_network",
+ "type": "object",
+ "title": "L3 IPv4 network configuration",
+ "required": [
+ "id",
+ "link",
+ "network_id",
+ "type"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l3_id"
+ },
+ "link": {
+ "$ref": "#/definitions/l3_link"
+ },
+ "network_id": {
+ "$ref": "#/definitions/l3_network_id"
+ },
+ "type": {
+ "$ref": "#/definitions/l3_ipv4_type"
+ },
+ "ip_address": {
+ "$ref": "#/definitions/l3_ipv4_host"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv4_netmask"
+ },
+ "routes": {
+ "$id": "#/definitions/l3_ipv4_network/routes",
+ "type": "array",
+ "title": "L3 IPv4 network routes",
+ "items": {
+ "$ref": "#/definitions/l3_ipv4_network_route"
+ }
+ }
+ }
+ },
+ "l3_ipv6_network": {
+ "$id": "#/definitions/l3_ipv6_network",
+ "type": "object",
+ "title": "L3 IPv6 network configuration",
+ "required": [
+ "id",
+ "link",
+ "network_id",
+ "type"
+ ],
+ "properties": {
+ "id": {
+ "$ref": "#/definitions/l3_id"
+ },
+ "link": {
+ "$ref": "#/definitions/l3_link"
+ },
+ "network_id": {
+ "$ref": "#/definitions/l3_network_id"
+ },
+ "type": {
+ "$ref": "#/definitions/l3_ipv6_type"
+ },
+ "ip_address": {
+ "$ref": "#/definitions/l3_ipv6_host"
+ },
+ "netmask": {
+ "$ref": "#/definitions/l3_ipv6_netmask"
+ },
+ "routes": {
+ "$id": "#/definitions/properties/l3_ipv6_network/routes",
+ "type": "array",
+ "title": "L3 IPv6 network routes",
+ "items": {
+ "$ref": "#/definitions/l3_ipv6_network_route"
+ }
+ }
+ }
+ },
+ "ipv4_service": {
+ "$id": "#/definitions/ipv4_service",
+ "type": "object",
+ "title": "Service on a IPv4 network",
+ "required": [
+ "address",
+ "type"
+ ],
+ "properties": {
+ "address": {
+ "$ref": "#/definitions/l3_ipv4_host"
+ },
+ "type": {
+ "$id": "#/definitions/ipv4_service/properties/type",
+ "type": "string",
+ "enum": [
+ "dns"
+ ],
+ "title": "Service type",
+ "examples": [
+ "dns"
+ ]
+ }
+ }
+ },
+ "ipv6_service": {
+ "$id": "#/definitions/ipv6_service",
+ "type": "object",
+ "title": "Service on a IPv6 network",
+ "required": [
+ "address",
+ "type"
+ ],
+ "properties": {
+ "address": {
+ "$ref": "#/definitions/l3_ipv6_host"
+ },
+ "type": {
+ "$id": "#/definitions/ipv4_service/properties/type",
+ "type": "string",
+ "enum": [
+ "dns"
+ ],
+ "title": "Service type",
+ "examples": [
+ "dns"
+ ]
+ }
+ }
+ },
+ "ipv4_services": {
+ "$id": "#/definitions/ipv4_services",
+ "type": "array",
+ "title": "Network services on IPv4 network",
+ "items": {
+ "$id": "#/definitions/ipv4_services/items",
+ "$ref": "#/definitions/ipv4_service"
+ }
+ },
+ "ipv6_services": {
+ "$id": "#/definitions/ipv6_services",
+ "type": "array",
+ "title": "Network services on IPv6 network",
+ "items": {
+ "$id": "#/definitions/ipv6_services/items",
+ "$ref": "#/definitions/ipv6_service"
+ }
+ },
+ "services": {
+ "$id": "#/definitions/services",
+ "type": "array",
+ "title": "Network services",
+ "items": {
+ "$id": "#/definitions/services/items",
+ "anyOf": [
+ {
+ "$ref": "#/definitions/ipv4_service"
+ },
+ {
+ "$ref": "#/definitions/ipv6_service"
+ }
+ ]
+ }
+ }
+ }
+}
diff --git a/ironic/api/controllers/v1/node.py b/ironic/api/controllers/v1/node.py
index f0745ce2f..cba7a4e5c 100644
--- a/ironic/api/controllers/v1/node.py
+++ b/ironic/api/controllers/v1/node.py
@@ -15,15 +15,17 @@
import datetime
from http import client as http_client
+import json
+import os
from ironic_lib import metrics_utils
import jsonschema
+from jsonschema import exceptions as json_schema_exc
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import uuidutils
import pecan
from pecan import rest
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -115,6 +117,10 @@ ALLOWED_TARGET_POWER_STATES = (ir_states.POWER_ON,
_NODE_DESCRIPTION_MAX_LENGTH = 4096
+NETWORK_DATA_SCHEMA = os.path.join(
+ os.path.dirname(__file__), 'network-data-schema.json')
+
+
def get_nodes_controller_reserved_names():
global _NODES_CONTROLLER_RESERVED_WORDS
if _NODES_CONTROLLER_RESERVED_WORDS is None:
@@ -179,6 +185,28 @@ def update_state_in_older_versions(obj):
obj.provision_state = ir_states.INSPECTING
+def validate_network_data(network_data):
+ """Validates node network_data field.
+
+ This method validates network data configuration against JSON
+ schema.
+
+ :param network_data: a network_data field to validate
+ :raises: Invalid if network data is not schema-compliant
+ """
+ with open(NETWORK_DATA_SCHEMA, 'rb') as fl:
+ network_data_schema = json.load(fl)
+
+ try:
+ jsonschema.validate(network_data, network_data_schema)
+
+ except json_schema_exc.ValidationError as e:
+ # NOTE: Even though e.message is deprecated in general, it is
+ # said in jsonschema documentation to use this still.
+ msg = _("Invalid network_data: %s ") % e.message
+ raise exception.Invalid(msg)
+
+
class BootDeviceController(rest.RestController):
_custom_actions = {
@@ -315,7 +343,7 @@ class Indicator(base.APIBase):
states = atypes.ArrayType(str)
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
def __init__(self, **kwargs):
self.name = kwargs.get('name')
@@ -327,11 +355,11 @@ class Indicator(base.APIBase):
def _convert_with_links(node_uuid, indicator, url):
"""Add links to the indicator."""
indicator.links = [
- link.Link.make_link(
+ link.make_link(
'self', url, 'nodes',
'%s/management/indicators/%s' % (
node_uuid, indicator.name)),
- link.Link.make_link(
+ link.make_link(
'bookmark', url, 'nodes',
'%s/management/indicators/%s' % (
node_uuid, indicator.name),
@@ -1178,19 +1206,19 @@ class Node(base.APIBase):
_set_chassis_uuid)
"""The UUID of the chassis this node belongs"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated node links"""
- ports = atypes.wsattr([link.Link], readonly=True)
+ ports = None
"""Links to the collection of ports on this node"""
- portgroups = atypes.wsattr([link.Link], readonly=True)
+ portgroups = None
"""Links to the collection of portgroups on this node"""
- volume = atypes.wsattr([link.Link], readonly=True)
+ volume = None
"""Links to endpoint for retrieving volume resources on this node"""
- states = atypes.wsattr([link.Link], readonly=True)
+ states = None
"""Links to endpoint for retrieving and setting node states"""
boot_interface = atypes.wsattr(str)
@@ -1265,6 +1293,9 @@ class Node(base.APIBase):
retired_reason = atypes.wsattr(str)
"""Indicates the reason for a node's retirement."""
+ network_data = atypes.wsattr({str: types.jsontype})
+ """Static network configuration JSON ironic will hand over to the node."""
+
# NOTE(tenbrae): "conductor_affinity" shouldn't be presented on the
# API because it's an internal value. Don't add it here.
@@ -1305,38 +1336,38 @@ class Node(base.APIBase):
def _convert_with_links(node, url, fields=None, show_states_links=True,
show_portgroups=True, show_volume=True):
if fields is None:
- node.ports = [link.Link.make_link('self', url, 'nodes',
- node.uuid + "/ports"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/ports",
- bookmark=True)
+ node.ports = [link.make_link('self', url, 'nodes',
+ node.uuid + "/ports"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/ports",
+ bookmark=True)
]
if show_states_links:
- node.states = [link.Link.make_link('self', url, 'nodes',
- node.uuid + "/states"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/states",
- bookmark=True)]
+ node.states = [link.make_link('self', url, 'nodes',
+ node.uuid + "/states"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/states",
+ bookmark=True)]
if show_portgroups:
node.portgroups = [
- link.Link.make_link('self', url, 'nodes',
- node.uuid + "/portgroups"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/portgroups",
- bookmark=True)]
+ link.make_link('self', url, 'nodes',
+ node.uuid + "/portgroups"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/portgroups",
+ bookmark=True)]
if show_volume:
node.volume = [
- link.Link.make_link('self', url, 'nodes',
- node.uuid + "/volume"),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid + "/volume",
- bookmark=True)]
-
- node.links = [link.Link.make_link('self', url, 'nodes',
- node.uuid),
- link.Link.make_link('bookmark', url, 'nodes',
- node.uuid, bookmark=True)
+ link.make_link('self', url, 'nodes',
+ node.uuid + "/volume"),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/volume",
+ bookmark=True)]
+
+ node.links = [link.make_link('self', url, 'nodes',
+ node.uuid),
+ link.make_link('bookmark', url, 'nodes',
+ node.uuid, bookmark=True)
]
return node
@@ -1485,7 +1516,9 @@ class Node(base.APIBase):
automated_clean=None, protected=False,
protected_reason=None, owner=None,
allocation_uuid='982ddb5b-bce5-4d23-8fb8-7f710f648cd5',
- retired=False, retired_reason=None, lessee=None)
+ retired=False, retired_reason=None, lessee=None,
+ network_data={})
+
# NOTE(matty_dubs): The chassis_uuid getter() is based on the
# _chassis_uuid variable:
sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12'
@@ -1695,6 +1728,10 @@ class NodeVIFController(rest.RestController):
for that VIF.
"""
rpc_node, topic = self._get_node_and_topic('baremetal:node:vif:attach')
+ if api.request.version.minor >= versions.MINOR_67_NODE_VIF_ATTACH_PORT:
+ if 'port_uuid' in vif and 'portgroup_uuid' in vif:
+ msg = _("Cannot specify both port_uuid and portgroup_uuid")
+ raise exception.Invalid(msg)
api.request.rpcapi.vif_attach(api.request.context, rpc_node.uuid,
vif_info=vif, topic=topic)
@@ -1746,7 +1783,7 @@ class NodesController(rest.RestController):
'instance_info', 'driver_internal_info',
'clean_step', 'deploy_step',
'raid_config', 'target_raid_config',
- 'traits']
+ 'traits', 'network_data']
_subcontroller_map = {
'ports': port.PortsController,
@@ -2231,6 +2268,9 @@ class NodesController(rest.RestController):
msg = _("Allocation UUID cannot be specified, use allocations API")
raise exception.Invalid(msg)
+ if node.network_data is not atypes.Unset:
+ validate_network_data(node.network_data)
+
# NOTE(tenbrae): get_topic_for checks if node.driver is in the hash
# ring and raises NoValidHost if it is not.
# We need to ensure that node has a UUID before it can
@@ -2293,6 +2333,12 @@ class NodesController(rest.RestController):
"characters") % _NODE_DESCRIPTION_MAX_LENGTH
raise exception.Invalid(msg)
+ network_data_fields = api_utils.get_patch_values(
+ patch, '/network_data')
+
+ for network_data in network_data_fields:
+ validate_network_data(network_data)
+
def _authorize_patch_and_get_node(self, node_ident, patch):
# deal with attribute-specific policy rules
policy_checks = []
@@ -2313,7 +2359,7 @@ class NodesController(rest.RestController):
policy_checks, node_ident, with_suffix=True)
@METRICS.timer('NodesController.patch')
- @wsme.validate(types.uuid, types.boolean, [NodePatchType])
+ @expose.validate(types.uuid, types.boolean, [NodePatchType])
@expose.expose(Node, types.uuid_or_name, types.boolean,
body=[NodePatchType])
def patch(self, node_ident, reset_interfaces=None, patch=None):
diff --git a/ironic/api/controllers/v1/port.py b/ironic/api/controllers/v1/port.py
index 61cb11732..3f0ae6b47 100644
--- a/ironic/api/controllers/v1/port.py
+++ b/ironic/api/controllers/v1/port.py
@@ -20,7 +20,6 @@ from ironic_lib import metrics_utils
from oslo_log import log
from oslo_utils import uuidutils
from pecan import rest
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -156,7 +155,7 @@ class Port(base.APIBase):
physical_network = atypes.StringType(max_length=64)
"""The name of the physical network to which this port is connected."""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated port links"""
is_smartnic = types.boolean
@@ -200,11 +199,11 @@ class Port(base.APIBase):
url = api.request.public_url
- port.links = [link.Link.make_link('self', url,
- 'ports', port.uuid),
- link.Link.make_link('bookmark', url,
- 'ports', port.uuid,
- bookmark=True)
+ port.links = [link.make_link('self', url,
+ 'ports', port.uuid),
+ link.make_link('bookmark', url,
+ 'ports', port.uuid,
+ bookmark=True)
]
if not sanitize:
@@ -340,7 +339,30 @@ class PortsController(rest.RestController):
def _get_ports_collection(self, node_ident, address, portgroup_ident,
marker, limit, sort_key, sort_dir,
resource_url=None, fields=None, detail=None,
- owner=None):
+ project=None):
+ """Retrieve a collection of ports.
+
+ :param node_ident: UUID or name of a node, to get only ports for that
+ node.
+ :param address: MAC address of a port, to get the port which has
+ this MAC address.
+ :param portgroup_ident: UUID or name of a portgroup, to get only ports
+ for that portgroup.
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ This value cannot be larger than the value of max_limit
+ in the [api] section of the ironic configuration, or only
+ max_limit resources will be returned.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ :param resource_url: Optional, base url to be used for links
+ :param fields: Optional, a list with a specified set of fields
+ of the resource to be returned.
+ :param detail: Optional, show detailed list of ports
+ :param project: Optional, filter by project
+ :returns: a list of ports.
+
+ """
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
@@ -372,7 +394,7 @@ class PortsController(rest.RestController):
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir,
- owner=owner)
+ project=project)
elif node_ident:
# FIXME(comstud): Since all we need is the node ID, we can
# make this more efficient by only querying
@@ -383,13 +405,13 @@ class PortsController(rest.RestController):
node.id, limit, marker_obj,
sort_key=sort_key,
sort_dir=sort_dir,
- owner=owner)
+ project=project)
elif address:
- ports = self._get_ports_by_address(address, owner=owner)
+ ports = self._get_ports_by_address(address, project=project)
else:
ports = objects.Port.list(api.request.context, limit,
marker_obj, sort_key=sort_key,
- sort_dir=sort_dir, owner=owner)
+ sort_dir=sort_dir, project=project)
parameters = {}
if detail is not None:
@@ -402,17 +424,18 @@ class PortsController(rest.RestController):
sort_dir=sort_dir,
**parameters)
- def _get_ports_by_address(self, address, owner=None):
+ def _get_ports_by_address(self, address, project=None):
"""Retrieve a port by its address.
:param address: MAC address of a port, to get the port which has
this MAC address.
+ :param project: Optional, filter by project
:returns: a list with the port, or an empty list if no port is found.
"""
try:
port = objects.Port.get_by_address(api.request.context, address,
- owner=owner)
+ project=project)
return [port]
except exception.PortNotFound:
return []
@@ -481,7 +504,7 @@ class PortsController(rest.RestController):
for that portgroup.
:raises: NotAcceptable, HTTPNotFound
"""
- owner = api_utils.check_port_list_policy()
+ project = api_utils.check_port_list_policy()
api_utils.check_allow_specify_fields(fields)
self._check_allowed_port_fields(fields)
@@ -504,7 +527,7 @@ class PortsController(rest.RestController):
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
sort_dir, fields=fields,
- detail=detail, owner=owner)
+ detail=detail, project=project)
@METRICS.timer('PortsController.detail')
@expose.expose(PortCollection, types.uuid_or_name, types.uuid,
@@ -534,7 +557,7 @@ class PortsController(rest.RestController):
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
:raises: NotAcceptable, HTTPNotFound
"""
- owner = api_utils.check_port_list_policy()
+ project = api_utils.check_port_list_policy()
self._check_allowed_port_fields([sort_key])
if portgroup and not api_utils.allow_portgroups_subcontrollers():
@@ -556,7 +579,8 @@ class PortsController(rest.RestController):
resource_url = '/'.join(['ports', 'detail'])
return self._get_ports_collection(node_uuid or node, address,
portgroup, marker, limit, sort_key,
- sort_dir, resource_url, owner=owner)
+ sort_dir, resource_url,
+ project=project)
@METRICS.timer('PortsController.get_one')
@expose.expose(Port, types.uuid, types.listtype)
@@ -660,7 +684,7 @@ class PortsController(rest.RestController):
return Port.convert_with_links(new_port)
@METRICS.timer('PortsController.patch')
- @wsme.validate(types.uuid, [PortPatchType])
+ @expose.validate(types.uuid, [PortPatchType])
@expose.expose(Port, types.uuid, body=[PortPatchType])
def patch(self, port_uuid, patch):
"""Update an existing port.
diff --git a/ironic/api/controllers/v1/portgroup.py b/ironic/api/controllers/v1/portgroup.py
index f55a17377..fe877c67a 100644
--- a/ironic/api/controllers/v1/portgroup.py
+++ b/ironic/api/controllers/v1/portgroup.py
@@ -16,7 +16,6 @@ from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
import pecan
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -91,7 +90,7 @@ class Portgroup(base.APIBase):
name = atypes.wsattr(str)
"""The logical name for this portgroup"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated portgroup links"""
standalone_ports_supported = types.boolean
@@ -106,7 +105,7 @@ class Portgroup(base.APIBase):
properties = {str: types.jsontype}
"""This portgroup's properties"""
- ports = atypes.wsattr([link.Link], readonly=True)
+ ports = None
"""Links to the collection of ports of this portgroup"""
def __init__(self, **kwargs):
@@ -134,20 +133,20 @@ class Portgroup(base.APIBase):
"""Add links to the portgroup."""
if fields is None:
portgroup.ports = [
- link.Link.make_link('self', url, 'portgroups',
- portgroup.uuid + "/ports"),
- link.Link.make_link('bookmark', url, 'portgroups',
- portgroup.uuid + "/ports", bookmark=True)
+ link.make_link('self', url, 'portgroups',
+ portgroup.uuid + "/ports"),
+ link.make_link('bookmark', url, 'portgroups',
+ portgroup.uuid + "/ports", bookmark=True)
]
# never expose the node_id attribute
portgroup.node_id = atypes.Unset
- portgroup.links = [link.Link.make_link('self', url,
- 'portgroups', portgroup.uuid),
- link.Link.make_link('bookmark', url,
- 'portgroups', portgroup.uuid,
- bookmark=True)
+ portgroup.links = [link.make_link('self', url,
+ 'portgroups', portgroup.uuid),
+ link.make_link('bookmark', url,
+ 'portgroups', portgroup.uuid,
+ bookmark=True)
]
return portgroup
@@ -511,7 +510,7 @@ class PortgroupsController(pecan.rest.RestController):
return Portgroup.convert_with_links(new_portgroup)
@METRICS.timer('PortgroupsController.patch')
- @wsme.validate(types.uuid_or_name, [PortgroupPatchType])
+ @expose.validate(types.uuid_or_name, [PortgroupPatchType])
@expose.expose(Portgroup, types.uuid_or_name, body=[PortgroupPatchType])
def patch(self, portgroup_ident, patch):
"""Update an existing portgroup.
diff --git a/ironic/api/controllers/v1/ramdisk.py b/ironic/api/controllers/v1/ramdisk.py
index 8d68032b7..a79b070fa 100644
--- a/ironic/api/controllers/v1/ramdisk.py
+++ b/ironic/api/controllers/v1/ramdisk.py
@@ -184,6 +184,7 @@ class HeartbeatController(rest.RestController):
before sending agent_version was introduced so agent v3.0.0 (the
last release before sending agent_version was introduced) will be
assumed.
+ :param agent_token: randomly generated validation token.
:raises: NodeNotFound if node with provided UUID or name was not found.
:raises: InvalidUuidOrName if node_ident is not valid name or UUID.
:raises: NoValidHost if RPC topic for node could not be retrieved.
diff --git a/ironic/api/controllers/v1/state.py b/ironic/api/controllers/v1/state.py
index 3fa3f7a10..f6972ff82 100644
--- a/ironic/api/controllers/v1/state.py
+++ b/ironic/api/controllers/v1/state.py
@@ -14,7 +14,6 @@
# under the License.
from ironic.api.controllers import base
-from ironic.api.controllers import link
class State(base.APIBase):
@@ -28,5 +27,5 @@ class State(base.APIBase):
available = [str]
"""A list of available states it is able to transition to"""
- links = [link.Link]
+ links = None
"""A list containing a self link and associated state links"""
diff --git a/ironic/api/controllers/v1/utils.py b/ironic/api/controllers/v1/utils.py
index 895947122..742625e6d 100644
--- a/ironic/api/controllers/v1/utils.py
+++ b/ironic/api/controllers/v1/utils.py
@@ -15,6 +15,7 @@
from http import client as http_client
import inspect
+import io
import re
import jsonpatch
@@ -24,8 +25,6 @@ import os_traits
from oslo_config import cfg
from oslo_utils import uuidutils
from pecan import rest
-from webob import static
-import wsme
from ironic import api
from ironic.api.controllers.v1 import versions
@@ -419,21 +418,15 @@ def vendor_passthru(ident, method, topic, data=None, driver_passthru=False):
status_code = http_client.ACCEPTED if response['async'] else http_client.OK
return_value = response['return']
- response_params = {'status_code': status_code}
# Attach the return value to the response object
if response.get('attach'):
if isinstance(return_value, str):
# If unicode, convert to bytes
return_value = return_value.encode('utf-8')
- file_ = atypes.File(content=return_value)
- api.response.app_iter = static.FileIter(file_.file)
- # Since we've attached the return value to the response
- # object the response body should now be empty.
- return_value = None
- response_params['return_type'] = None
+ return_value = io.BytesIO(return_value)
- return wsme.api.Response(return_value, **response_params)
+ return atypes.PassthruResponse(return_value, status_code=status_code)
def check_for_invalid_fields(fields, object_fields):
@@ -492,6 +485,7 @@ VERSIONED_FIELDS = {
'retired': versions.MINOR_61_NODE_RETIRED,
'retired_reason': versions.MINOR_61_NODE_RETIRED,
'lessee': versions.MINOR_65_NODE_LESSEE,
+ 'network_data': versions.MINOR_66_NODE_NETWORK_DATA,
}
for field in V31_FIELDS:
diff --git a/ironic/api/controllers/v1/versions.py b/ironic/api/controllers/v1/versions.py
index 26b5c7722..af65c183d 100644
--- a/ironic/api/controllers/v1/versions.py
+++ b/ironic/api/controllers/v1/versions.py
@@ -103,6 +103,8 @@ BASE_VERSION = 1
# v1.63: Add support for indicators
# v1.64: Add network_type to port.local_link_connection
# v1.65: Add lessee to the node object.
+# v1.66: Add support for node network_data field.
+# v1.67: Add support for port_uuid/portgroup_uuid in node vif_attach
MINOR_0_JUNO = 0
MINOR_1_INITIAL_VERSION = 1
@@ -170,6 +172,8 @@ MINOR_62_AGENT_TOKEN = 62
MINOR_63_INDICATORS = 63
MINOR_64_LOCAL_LINK_CONNECTION_NETWORK_TYPE = 64
MINOR_65_NODE_LESSEE = 65
+MINOR_66_NODE_NETWORK_DATA = 66
+MINOR_67_NODE_VIF_ATTACH_PORT = 67
# When adding another version, update:
# - MINOR_MAX_VERSION
@@ -177,7 +181,7 @@ MINOR_65_NODE_LESSEE = 65
# explanation of what changed in the new version
# - common/release_mappings.py, RELEASE_MAPPING['master']['api']
-MINOR_MAX_VERSION = MINOR_65_NODE_LESSEE
+MINOR_MAX_VERSION = MINOR_67_NODE_VIF_ATTACH_PORT
# String representations of the minor and maximum versions
_MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, MINOR_1_INITIAL_VERSION)
diff --git a/ironic/api/controllers/v1/volume.py b/ironic/api/controllers/v1/volume.py
index 9678ed835..0797cd389 100644
--- a/ironic/api/controllers/v1/volume.py
+++ b/ironic/api/controllers/v1/volume.py
@@ -24,7 +24,6 @@ from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import volume_connector
from ironic.api.controllers.v1 import volume_target
from ironic.api import expose
-from ironic.api import types as atypes
from ironic.common import exception
from ironic.common import policy
@@ -36,13 +35,13 @@ class Volume(base.APIBase):
targets controllers.
"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated volume links"""
- connectors = atypes.wsattr([link.Link], readonly=True)
+ connectors = None
"""Links to the volume connectors resource"""
- targets = atypes.wsattr([link.Link], readonly=True)
+ targets = None
"""Links to the volume targets resource"""
@staticmethod
@@ -57,19 +56,19 @@ class Volume(base.APIBase):
args = ''
volume.links = [
- link.Link.make_link('self', url, resource, args),
- link.Link.make_link('bookmark', url, resource, args,
- bookmark=True)]
+ link.make_link('self', url, resource, args),
+ link.make_link('bookmark', url, resource, args,
+ bookmark=True)]
volume.connectors = [
- link.Link.make_link('self', url, resource, args + 'connectors'),
- link.Link.make_link('bookmark', url, resource, args + 'connectors',
- bookmark=True)]
+ link.make_link('self', url, resource, args + 'connectors'),
+ link.make_link('bookmark', url, resource, args + 'connectors',
+ bookmark=True)]
volume.targets = [
- link.Link.make_link('self', url, resource, args + 'targets'),
- link.Link.make_link('bookmark', url, resource, args + 'targets',
- bookmark=True)]
+ link.make_link('self', url, resource, args + 'targets'),
+ link.make_link('bookmark', url, resource, args + 'targets',
+ bookmark=True)]
return volume
diff --git a/ironic/api/controllers/v1/volume_connector.py b/ironic/api/controllers/v1/volume_connector.py
index 680648a7c..595798cd8 100644
--- a/ironic/api/controllers/v1/volume_connector.py
+++ b/ironic/api/controllers/v1/volume_connector.py
@@ -18,7 +18,6 @@ from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -88,7 +87,7 @@ class VolumeConnector(base.APIBase):
_set_node_identifiers, mandatory=True)
"""The UUID of the node this volume connector belongs to"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated volume connector links"""
def __init__(self, **kwargs):
@@ -118,13 +117,13 @@ class VolumeConnector(base.APIBase):
@staticmethod
def _convert_with_links(connector, url):
- connector.links = [link.Link.make_link('self', url,
- 'volume/connectors',
- connector.uuid),
- link.Link.make_link('bookmark', url,
- 'volume/connectors',
- connector.uuid,
- bookmark=True)
+ connector.links = [link.make_link('self', url,
+ 'volume/connectors',
+ connector.uuid),
+ link.make_link('bookmark', url,
+ 'volume/connectors',
+ connector.uuid,
+ bookmark=True)
]
return connector
@@ -377,7 +376,7 @@ class VolumeConnectorsController(rest.RestController):
return VolumeConnector.convert_with_links(new_connector)
@METRICS.timer('VolumeConnectorsController.patch')
- @wsme.validate(types.uuid, [VolumeConnectorPatchType])
+ @expose.validate(types.uuid, [VolumeConnectorPatchType])
@expose.expose(VolumeConnector, types.uuid,
body=[VolumeConnectorPatchType])
def patch(self, connector_uuid, patch):
diff --git a/ironic/api/controllers/v1/volume_target.py b/ironic/api/controllers/v1/volume_target.py
index 80ebb291f..6667bcca5 100644
--- a/ironic/api/controllers/v1/volume_target.py
+++ b/ironic/api/controllers/v1/volume_target.py
@@ -18,7 +18,6 @@ from http import client as http_client
from ironic_lib import metrics_utils
from oslo_utils import uuidutils
from pecan import rest
-import wsme
from ironic import api
from ironic.api.controllers import base
@@ -95,7 +94,7 @@ class VolumeTarget(base.APIBase):
_set_node_identifiers, mandatory=True)
"""The UUID of the node this volume target belongs to"""
- links = atypes.wsattr([link.Link], readonly=True)
+ links = None
"""A list containing a self link and associated volume target links"""
def __init__(self, **kwargs):
@@ -125,13 +124,13 @@ class VolumeTarget(base.APIBase):
@staticmethod
def _convert_with_links(target, url):
- target.links = [link.Link.make_link('self', url,
- 'volume/targets',
- target.uuid),
- link.Link.make_link('bookmark', url,
- 'volume/targets',
- target.uuid,
- bookmark=True)
+ target.links = [link.make_link('self', url,
+ 'volume/targets',
+ target.uuid),
+ link.make_link('bookmark', url,
+ 'volume/targets',
+ target.uuid,
+ bookmark=True)
]
return target
@@ -391,7 +390,7 @@ class VolumeTargetsController(rest.RestController):
return VolumeTarget.convert_with_links(new_target)
@METRICS.timer('VolumeTargetsController.patch')
- @wsme.validate(types.uuid, [VolumeTargetPatchType])
+ @expose.validate(types.uuid, [VolumeTargetPatchType])
@expose.expose(VolumeTarget, types.uuid,
body=[VolumeTargetPatchType])
def patch(self, target_uuid, patch):
diff --git a/ironic/api/controllers/version.py b/ironic/api/controllers/version.py
index a24ab32c6..b8b567f56 100644
--- a/ironic/api/controllers/version.py
+++ b/ironic/api/controllers/version.py
@@ -11,53 +11,40 @@
# under the License.
from ironic import api
-from ironic.api.controllers import base
from ironic.api.controllers import link
ID_VERSION1 = 'v1'
-class Version(base.Base):
- """An API version representation.
+def all_versions():
+ return [default_version()]
- This class represents an API version, including the minimum and
- maximum minor versions that are supported within the major version.
- """
-
- id = str
- """The ID of the (major) version, also acts as the release number"""
-
- links = [link.Link]
- """A Link that point to a specific version of the API"""
-
- status = str
- """Status of the version.
- One of:
- * CURRENT - the latest version of API,
- * SUPPORTED - supported, but not latest, version of API,
- * DEPRECATED - supported, but deprecated, version of API.
- """
+def default_version():
+ """Return a dict representing the current default version
- version = str
- """The current, maximum supported (major.minor) version of API."""
+ id: The ID of the (major) version, also acts as the release number
+ links: A list containing one link that points to the current version
+ of the API
- min_version = str
- """Minimum supported (major.minor) version of API."""
+ status: Status of the version, one of CURRENT, SUPPORTED, DEPRECATED
- def __init__(self, id, min_version, version, status='CURRENT'):
- self.id = id
- self.links = [link.Link.make_link('self', api.request.public_url,
- self.id, '', bookmark=True)]
- self.status = status
- self.version = version
- self.min_version = min_version
+ min_version: The current, maximum supported (major.minor) version of API.
+ version: Minimum supported (major.minor) version of API.
+ """
-def default_version():
# NOTE(dtantsur): avoid circular imports
from ironic.api.controllers.v1 import versions
- return Version(ID_VERSION1,
- versions.min_version_string(),
- versions.max_version_string())
+ return {
+ 'id': ID_VERSION1,
+ 'links': [
+ link.make_link('self',
+ api.request.public_url,
+ ID_VERSION1, '', bookmark=True)
+ ],
+ 'status': 'CURRENT',
+ 'min_version': versions.min_version_string(),
+ 'version': versions.max_version_string()
+ }
diff --git a/ironic/api/expose.py b/ironic/api/expose.py
index 46d4649a6..16eecb1c2 100644
--- a/ironic/api/expose.py
+++ b/ironic/api/expose.py
@@ -14,11 +14,209 @@
# License for the specific language governing permissions and limitations
# under the License.
-import wsmeext.pecan as wsme_pecan
+import datetime
+import functools
+from http import client as http_client
+import inspect
+import json
+import sys
+import traceback
+
+from oslo_config import cfg
+from oslo_log import log
+import pecan
+from webob import static
+
+from ironic.api import args as api_args
+from ironic.api import functions
+from ironic.api import types as atypes
+
+LOG = log.getLogger(__name__)
+
+
+class JSonRenderer(object):
+ @staticmethod
+ def __init__(path, extra_vars):
+ pass
+
+ @staticmethod
+ def render(template_path, namespace):
+ if 'faultcode' in namespace:
+ return encode_error(None, namespace)
+ result = encode_result(
+ namespace['result'],
+ namespace['datatype']
+ )
+ return result
+
+
+pecan.templating._builtin_renderers['wsmejson'] = JSonRenderer
+
+pecan_json_decorate = pecan.expose(
+ template='wsmejson:',
+ content_type='application/json',
+ generic=False)
def expose(*args, **kwargs):
- """Ensure that only JSON, and not XML, is supported."""
- if 'rest_content_types' not in kwargs:
- kwargs['rest_content_types'] = ('json',)
- return wsme_pecan.wsexpose(*args, **kwargs)
+ sig = functions.signature(*args, **kwargs)
+
+ def decorate(f):
+ sig(f)
+ funcdef = functions.FunctionDefinition.get(f)
+ funcdef.resolve_types(atypes.registry)
+
+ @functools.wraps(f)
+ def callfunction(self, *args, **kwargs):
+ return_type = funcdef.return_type
+
+ try:
+ args, kwargs = api_args.get_args(
+ funcdef, args, kwargs, pecan.request.params,
+ pecan.request.body, pecan.request.content_type
+ )
+ result = f(self, *args, **kwargs)
+
+ # NOTE: Support setting of status_code with default 201
+ pecan.response.status = funcdef.status_code
+ if isinstance(result, atypes.PassthruResponse):
+ pecan.response.status = result.status_code
+
+ # NOTE(lucasagomes): If the return code is 204
+ # (No Response) we have to make sure that we are not
+ # returning anything in the body response and the
+ # content-length is 0
+ if result.status_code == 204:
+ return_type = None
+
+ if callable(getattr(result.obj, 'read', None)):
+ # Stream the files-like data directly to the response
+ pecan.response.app_iter = static.FileIter(result.obj)
+ return_type = None
+ result = None
+ else:
+ result = result.obj
+
+ except Exception:
+ try:
+ exception_info = sys.exc_info()
+ orig_exception = exception_info[1]
+ orig_code = getattr(orig_exception, 'code', None)
+ data = format_exception(
+ exception_info,
+ cfg.CONF.debug_tracebacks_in_api
+ )
+ finally:
+ del exception_info
+
+ if orig_code and orig_code in http_client.responses:
+ pecan.response.status = orig_code
+ else:
+ pecan.response.status = 500
+
+ return data
+
+ if return_type is None:
+ pecan.request.pecan['content_type'] = None
+ pecan.response.content_type = None
+ return ''
+
+ return dict(
+ datatype=return_type,
+ result=result
+ )
+
+ pecan_json_decorate(callfunction)
+ pecan.util._cfg(callfunction)['argspec'] = inspect.getfullargspec(f)
+ callfunction._wsme_definition = funcdef
+ return callfunction
+
+ return decorate
+
+
+def tojson(datatype, value):
+ """A generic converter from python to jsonify-able datatypes.
+
+ """
+ if value is None:
+ return None
+ if isinstance(datatype, atypes.ArrayType):
+ return [tojson(datatype.item_type, item) for item in value]
+ if isinstance(datatype, atypes.DictType):
+ return dict((
+ (tojson(datatype.key_type, item[0]),
+ tojson(datatype.value_type, item[1]))
+ for item in value.items()
+ ))
+ if isinstance(value, datetime.datetime):
+ return value.isoformat()
+ if atypes.iscomplex(datatype):
+ d = dict()
+ for attr in atypes.list_attributes(datatype):
+ attr_value = getattr(value, attr.key)
+ if attr_value is not atypes.Unset:
+ d[attr.name] = tojson(attr.datatype, attr_value)
+ return d
+ if isinstance(datatype, atypes.UserType):
+ return tojson(datatype.basetype, datatype.tobasetype(value))
+ return value
+
+
+def encode_result(value, datatype, **options):
+ jsondata = tojson(datatype, value)
+ return json.dumps(jsondata)
+
+
+def encode_error(context, errordetail):
+ return json.dumps(errordetail)
+
+
+def format_exception(excinfo, debug=False):
+ """Extract informations that can be sent to the client."""
+ error = excinfo[1]
+ code = getattr(error, 'code', None)
+ if code and code in http_client.responses and (400 <= code < 500):
+ faultstring = (error.faultstring if hasattr(error, 'faultstring')
+ else str(error))
+ faultcode = getattr(error, 'faultcode', 'Client')
+ r = dict(faultcode=faultcode,
+ faultstring=faultstring)
+ LOG.debug("Client-side error: %s", r['faultstring'])
+ r['debuginfo'] = None
+ return r
+ else:
+ faultstring = str(error)
+ debuginfo = "\n".join(traceback.format_exception(*excinfo))
+
+ LOG.error('Server-side error: "%s". Detail: \n%s',
+ faultstring, debuginfo)
+
+ faultcode = getattr(error, 'faultcode', 'Server')
+ r = dict(faultcode=faultcode, faultstring=faultstring)
+ if debug:
+ r['debuginfo'] = debuginfo
+ else:
+ r['debuginfo'] = None
+ return r
+
+
+class validate(object):
+ """Decorator that define the arguments types of a function.
+
+
+ Example::
+
+ class MyController(object):
+ @expose(str)
+ @validate(datetime.date, datetime.time)
+ def format(self, d, t):
+ return d.isoformat() + ' ' + t.isoformat()
+ """
+ def __init__(self, *param_types):
+ self.param_types = param_types
+
+ def __call__(self, func):
+ argspec = functions.getargspec(func)
+ fd = functions.FunctionDefinition.get(func)
+ fd.set_arg_types(argspec, self.param_types)
+ return func
diff --git a/ironic/api/functions.py b/ironic/api/functions.py
new file mode 100644
index 000000000..8554bd7e4
--- /dev/null
+++ b/ironic/api/functions.py
@@ -0,0 +1,182 @@
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
+#
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import inspect
+import logging
+
+log = logging.getLogger(__name__)
+
+
+def iswsmefunction(f):
+ return hasattr(f, '_wsme_definition')
+
+
+def wrapfunc(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return f(*args, **kwargs)
+ wrapper._wsme_original_func = f
+ return wrapper
+
+
+def getargspec(f):
+ f = getattr(f, '_wsme_original_func', f)
+ func_argspec = inspect.getfullargspec(f)
+ return func_argspec[0:4]
+
+
+class FunctionArgument(object):
+ """An argument definition of an api entry"""
+ def __init__(self, name, datatype, mandatory, default):
+ #: argument name
+ self.name = name
+
+ #: Data type
+ self.datatype = datatype
+
+ #: True if the argument is mandatory
+ self.mandatory = mandatory
+
+ #: Default value if argument is omitted
+ self.default = default
+
+ def resolve_type(self, registry):
+ self.datatype = registry.resolve_type(self.datatype)
+
+
+class FunctionDefinition(object):
+ """An api entry definition"""
+ def __init__(self, func):
+ #: Function name
+ self.name = func.__name__
+
+ #: Function documentation
+ self.doc = func.__doc__
+
+ #: Return type
+ self.return_type = None
+
+ #: The function arguments (list of :class:`FunctionArgument`)
+ self.arguments = []
+
+ #: If the body carry the datas of a single argument, its type
+ self.body_type = None
+
+ #: Status code
+ self.status_code = 200
+
+ #: True if extra arguments should be ignored, NOT inserted in
+ #: the kwargs of the function and not raise UnknownArgument
+ #: exceptions
+ self.ignore_extra_args = False
+
+ #: Dictionnary of protocol-specific options.
+ self.extra_options = None
+
+ @staticmethod
+ def get(func):
+ """Returns the :class:`FunctionDefinition` of a method."""
+ if not hasattr(func, '_wsme_definition'):
+ fd = FunctionDefinition(func)
+ func._wsme_definition = fd
+
+ return func._wsme_definition
+
+ def get_arg(self, name):
+ """Returns a :class:`FunctionArgument` from its name"""
+ for arg in self.arguments:
+ if arg.name == name:
+ return arg
+ return None
+
+ def resolve_types(self, registry):
+ self.return_type = registry.resolve_type(self.return_type)
+ self.body_type = registry.resolve_type(self.body_type)
+ for arg in self.arguments:
+ arg.resolve_type(registry)
+
+ def set_options(self, body=None, ignore_extra_args=False, status_code=200,
+ rest_content_types=('json', 'xml'), **extra_options):
+ self.body_type = body
+ self.status_code = status_code
+ self.ignore_extra_args = ignore_extra_args
+ self.rest_content_types = rest_content_types
+ self.extra_options = extra_options
+
+ def set_arg_types(self, argspec, arg_types):
+ args, varargs, keywords, defaults = argspec
+ if args[0] == 'self':
+ args = args[1:]
+ arg_types = list(arg_types)
+ if self.body_type is not None:
+ arg_types.append(self.body_type)
+ for i, argname in enumerate(args):
+ datatype = arg_types[i]
+ mandatory = defaults is None or i < (len(args) - len(defaults))
+ default = None
+ if not mandatory:
+ default = defaults[i - (len(args) - len(defaults))]
+ self.arguments.append(FunctionArgument(argname, datatype,
+ mandatory, default))
+
+
+class signature(object):
+
+ """Decorator that specify the argument types of an exposed function.
+
+ :param return_type: Type of the value returned by the function
+ :param argN: Type of the Nth argument
+ :param body: If the function takes a final argument that is supposed to be
+ the request body by itself, its type.
+ :param status_code: HTTP return status code of the function.
+ :param ignore_extra_args: Allow extra/unknow arguments (default to False)
+
+ Most of the time this decorator is not supposed to be used directly,
+ unless you are not using WSME on top of another framework.
+
+ If an adapter is used, it will provide either a specialised version of this
+ decororator, either a new decorator named @wsexpose that takes the same
+ parameters (it will in addition expose the function, hence its name).
+ """
+
+ def __init__(self, *types, **options):
+ self.return_type = types[0] if types else None
+ self.arg_types = []
+ if len(types) > 1:
+ self.arg_types.extend(types[1:])
+ if 'body' in options:
+ self.arg_types.append(options['body'])
+ self.wrap = options.pop('wrap', False)
+ self.options = options
+
+ def __call__(self, func):
+ argspec = getargspec(func)
+ if self.wrap:
+ func = wrapfunc(func)
+ fd = FunctionDefinition.get(func)
+ if fd.extra_options is not None:
+ raise ValueError("This function is already exposed")
+ fd.return_type = self.return_type
+ fd.set_options(**self.options)
+ if self.arg_types:
+ fd.set_arg_types(argspec, self.arg_types)
+ return func
+
+
+sig = signature
diff --git a/ironic/api/hooks.py b/ironic/api/hooks.py
index 2e7cc9e7a..758e56e29 100644
--- a/ironic/api/hooks.py
+++ b/ironic/api/hooks.py
@@ -97,7 +97,7 @@ class ContextHook(hooks.PecanHook):
ctx = context.RequestContext.from_environ(state.request.environ,
is_public_api=is_public_api)
# Do not pass any token with context for noauth mode
- if cfg.CONF.auth_strategy == 'noauth':
+ if cfg.CONF.auth_strategy != 'keystone':
ctx.auth_token = None
creds = ctx.to_policy_values()
diff --git a/ironic/api/method.py b/ironic/api/method.py
new file mode 100644
index 000000000..50f672a29
--- /dev/null
+++ b/ironic/api/method.py
@@ -0,0 +1,95 @@
+#
+# Copyright 2015 Rackspace, Inc
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+from http import client as http_client
+import json
+import sys
+import traceback
+
+from oslo_config import cfg
+from oslo_log import log
+import pecan
+
+LOG = log.getLogger(__name__)
+
+
+pecan_json_decorate = pecan.expose(
+ content_type='application/json',
+ generic=False)
+
+
+def expose():
+
+ def decorate(f):
+
+ @functools.wraps(f)
+ def callfunction(self, *args, **kwargs):
+ try:
+ result = f(self, *args, **kwargs)
+
+ except Exception:
+ try:
+ exception_info = sys.exc_info()
+ orig_exception = exception_info[1]
+ orig_code = getattr(orig_exception, 'code', None)
+ result = format_exception(
+ exception_info,
+ cfg.CONF.debug_tracebacks_in_api
+ )
+ finally:
+ del exception_info
+
+ if orig_code and orig_code in http_client.responses:
+ pecan.response.status = orig_code
+ else:
+ pecan.response.status = 500
+
+ return json.dumps(result)
+
+ pecan_json_decorate(callfunction)
+ return callfunction
+
+ return decorate
+
+
+def format_exception(excinfo, debug=False):
+ """Extract informations that can be sent to the client."""
+ error = excinfo[1]
+ code = getattr(error, 'code', None)
+ if code and code in http_client.responses and (400 <= code < 500):
+ faultstring = (error.faultstring if hasattr(error, 'faultstring')
+ else str(error))
+ faultcode = getattr(error, 'faultcode', 'Client')
+ r = dict(faultcode=faultcode,
+ faultstring=faultstring)
+ LOG.debug("Client-side error: %s", r['faultstring'])
+ r['debuginfo'] = None
+ return r
+ else:
+ faultstring = str(error)
+ debuginfo = "\n".join(traceback.format_exception(*excinfo))
+
+ LOG.error('Server-side error: "%s". Detail: \n%s',
+ faultstring, debuginfo)
+
+ faultcode = getattr(error, 'faultcode', 'Server')
+ r = dict(faultcode=faultcode, faultstring=faultstring)
+ if debug:
+ r['debuginfo'] = debuginfo
+ else:
+ r['debuginfo'] = None
+ return r
diff --git a/ironic/api/middleware/__init__.py b/ironic/api/middleware/__init__.py
index 81bda5d3b..415ca07ea 100644
--- a/ironic/api/middleware/__init__.py
+++ b/ironic/api/middleware/__init__.py
@@ -12,15 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-from ironic.api.middleware import auth_token
+from ironic.api.middleware import auth_public_routes
from ironic.api.middleware import json_ext
from ironic.api.middleware import parsable_error
ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware
-AuthTokenMiddleware = auth_token.AuthTokenMiddleware
+AuthPublicRoutes = auth_public_routes.AuthPublicRoutes
JsonExtensionMiddleware = json_ext.JsonExtensionMiddleware
__all__ = ('ParsableErrorMiddleware',
- 'AuthTokenMiddleware',
+ 'AuthPublicRoutes',
'JsonExtensionMiddleware')
diff --git a/ironic/api/middleware/auth_token.py b/ironic/api/middleware/auth_public_routes.py
index 1f7500c0a..d5c5fa848 100644
--- a/ironic/api/middleware/auth_token.py
+++ b/ironic/api/middleware/auth_public_routes.py
@@ -14,23 +14,22 @@
import re
-from keystonemiddleware import auth_token
-
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import utils
-class AuthTokenMiddleware(auth_token.AuthProtocol):
- """A wrapper on Keystone auth_token middleware.
+class AuthPublicRoutes(object):
+ """A wrapper on authentication middleware.
Does not perform verification of authentication tokens
for public routes in the API.
"""
- def __init__(self, app, conf, public_api_routes=None):
+ def __init__(self, app, auth, public_api_routes=None):
api_routes = [] if public_api_routes is None else public_api_routes
self._ironic_app = app
+ self._middleware = auth
# TODO(mrda): Remove .xml and ensure that doesn't result in a
# 401 Authentication Required instead of 404 Not Found
route_pattern_tpl = '%s(\\.json|\\.xml)?$'
@@ -42,8 +41,6 @@ class AuthTokenMiddleware(auth_token.AuthProtocol):
raise exception.ConfigInvalid(
error_msg=_('Cannot compile public API routes: %s') % e)
- super(AuthTokenMiddleware, self).__init__(app, conf)
-
def __call__(self, env, start_response):
path = utils.safe_rstrip(env.get('PATH_INFO'), '/')
@@ -56,4 +53,4 @@ class AuthTokenMiddleware(auth_token.AuthProtocol):
if env['is_public_api']:
return self._ironic_app(env, start_response)
- return super(AuthTokenMiddleware, self).__call__(env, start_response)
+ return self._middleware(env, start_response)
diff --git a/ironic/api/types.py b/ironic/api/types.py
index 527abd722..b022e50a3 100644
--- a/ironic/api/types.py
+++ b/ironic/api/types.py
@@ -1,29 +1,709 @@
# coding: utf-8
#
-# Copyright 2020 Red Hat, Inc.
-# All Rights Reserved.
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from wsme.types import ArrayType # noqa
-from wsme.types import Base # noqa
-from wsme.types import DictType # noqa
-from wsme.types import Enum # noqa
-from wsme.types import File # noqa
-from wsme.types import IntegerType # noqa
-from wsme.types import StringType # noqa
-from wsme.types import text # noqa
-from wsme.types import Unset # noqa
-from wsme.types import UserType # noqa
-from wsme.types import wsattr # noqa
-from wsme.types import wsproperty # noqa
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import base64
+import datetime
+import decimal
+import inspect
+import re
+import weakref
+
+from oslo_log import log
+
+from ironic.common import exception
+
+
+LOG = log.getLogger(__name__)
+
+
+pod_types = (int, bytes, str, float, bool)
+native_types = pod_types + (datetime.datetime, decimal.Decimal)
+_promotable_types = (int, str, bytes)
+
+
+class ArrayType(object):
+ def __init__(self, item_type):
+ if iscomplex(item_type):
+ self._item_type = weakref.ref(item_type)
+ else:
+ self._item_type = item_type
+
+ def __hash__(self):
+ return hash(self.item_type)
+
+ def __eq__(self, other):
+ return isinstance(other, ArrayType) \
+ and self.item_type == other.item_type
+
+ def sample(self):
+ return [getattr(self.item_type, 'sample', self.item_type)()]
+
+ @property
+ def item_type(self):
+ if isinstance(self._item_type, weakref.ref):
+ return self._item_type()
+ else:
+ return self._item_type
+
+ def validate(self, value):
+ if value is None:
+ return
+ if not isinstance(value, list):
+ raise ValueError("Wrong type. Expected '[%s]', got '%s'" % (
+ self.item_type, type(value)
+ ))
+ return [
+ validate_value(self.item_type, item)
+ for item in value
+ ]
+
+
+class DictType(object):
+ def __init__(self, key_type, value_type):
+ if key_type not in (int, bytes, str, float, bool):
+ raise ValueError("Dictionaries key can only be a pod type")
+ self.key_type = key_type
+ if iscomplex(value_type):
+ self._value_type = weakref.ref(value_type)
+ else:
+ self._value_type = value_type
+
+ def __hash__(self):
+ return hash((self.key_type, self.value_type))
+
+ def sample(self):
+ key = getattr(self.key_type, 'sample', self.key_type)()
+ value = getattr(self.value_type, 'sample', self.value_type)()
+ return {key: value}
+
+ @property
+ def value_type(self):
+ if isinstance(self._value_type, weakref.ref):
+ return self._value_type()
+ else:
+ return self._value_type
+
+ def validate(self, value):
+ if not isinstance(value, dict):
+ raise ValueError("Wrong type. Expected '{%s: %s}', got '%s'" % (
+ self.key_type, self.value_type, type(value)
+ ))
+ return dict((
+ (
+ validate_value(self.key_type, key),
+ validate_value(self.value_type, v)
+ ) for key, v in value.items()
+ ))
+
+
+class UserType(object):
+ basetype = None
+ name = None
+
+ def validate(self, value):
+ return value
+
+ def tobasetype(self, value):
+ return value
+
+ def frombasetype(self, value):
+ return value
+
+
+def isusertype(class_):
+ return isinstance(class_, UserType)
+
+
+class BinaryType(UserType):
+ """A user type that use base64 strings to carry binary data.
+
+ """
+ basetype = bytes
+ name = 'binary'
+
+ def tobasetype(self, value):
+ if value is None:
+ return None
+ return base64.encodestring(value)
+
+ def frombasetype(self, value):
+ if value is None:
+ return None
+ return base64.decodestring(value)
+
+
+#: The binary almost-native type
+binary = BinaryType()
+
+
+class IntegerType(UserType):
+ """A simple integer type. Can validate a value range.
+
+ :param minimum: Possible minimum value
+ :param maximum: Possible maximum value
+
+ Example::
+
+ Price = IntegerType(minimum=1)
+
+ """
+ basetype = int
+ name = "integer"
+
+ def __init__(self, minimum=None, maximum=None):
+ self.minimum = minimum
+ self.maximum = maximum
+
+ @staticmethod
+ def frombasetype(value):
+ return int(value) if value is not None else None
+
+ def validate(self, value):
+ if self.minimum is not None and value < self.minimum:
+ error = 'Value should be greater or equal to %s' % self.minimum
+ raise ValueError(error)
+
+ if self.maximum is not None and value > self.maximum:
+ error = 'Value should be lower or equal to %s' % self.maximum
+ raise ValueError(error)
+
+ return value
+
+
+class StringType(UserType):
+ """A simple string type. Can validate a length and a pattern.
+
+ :param min_length: Possible minimum length
+ :param max_length: Possible maximum length
+ :param pattern: Possible string pattern
+
+ Example::
+
+ Name = StringType(min_length=1, pattern='^[a-zA-Z ]*$')
+
+ """
+ basetype = str
+ name = "string"
+
+ def __init__(self, min_length=None, max_length=None, pattern=None):
+ self.min_length = min_length
+ self.max_length = max_length
+ if isinstance(pattern, str):
+ self.pattern = re.compile(pattern)
+ else:
+ self.pattern = pattern
+
+ def validate(self, value):
+ if not isinstance(value, self.basetype):
+ error = 'Value should be string'
+ raise ValueError(error)
+
+ if self.min_length is not None and len(value) < self.min_length:
+ error = 'Value should have a minimum character requirement of %s' \
+ % self.min_length
+ raise ValueError(error)
+
+ if self.max_length is not None and len(value) > self.max_length:
+ error = 'Value should have a maximum character requirement of %s' \
+ % self.max_length
+ raise ValueError(error)
+
+ if self.pattern is not None and not self.pattern.search(value):
+ error = 'Value should match the pattern %s' % self.pattern.pattern
+ raise ValueError(error)
+
+ return value
+
+
+class Enum(UserType):
+ """A simple enumeration type. Can be based on any non-complex type.
+
+ :param basetype: The actual data type
+ :param values: A set of possible values
+
+ If nullable, 'None' should be added the values set.
+
+ Example::
+
+ Gender = Enum(str, 'male', 'female')
+ Specie = Enum(str, 'cat', 'dog')
+
+ """
+ def __init__(self, basetype, *values, **kw):
+ self.basetype = basetype
+ self.values = set(values)
+ name = kw.pop('name', None)
+ if name is None:
+ name = "Enum(%s)" % ', '.join((str(v) for v in values))
+ self.name = name
+
+ def validate(self, value):
+ if value not in self.values:
+ raise ValueError("Value should be one of: %s" %
+ ', '.join(map(str, self.values)))
+ return value
+
+ def tobasetype(self, value):
+ return value
+
+ def frombasetype(self, value):
+ return value
+
+
+class UnsetType(object):
+ def __bool__(self):
+ return False
+
+ def __repr__(self):
+ return 'Unset'
+
+
+Unset = UnsetType()
+
+
+def validate_value(datatype, value):
+ if value in (Unset, None) or datatype is None:
+ return value
+
+ # Try to promote the data type to one of our complex types.
+ if isinstance(datatype, list):
+ datatype = ArrayType(datatype[0])
+ elif isinstance(datatype, dict):
+ datatype = DictType(*list(datatype.items())[0])
+
+ # If the datatype has its own validator, use that.
+ if hasattr(datatype, 'validate'):
+ return datatype.validate(value)
+
+ # Do type promotion/conversion and data validation for builtin
+ # types.
+ v_type = type(value)
+ if datatype == int:
+ if v_type in _promotable_types:
+ try:
+ # Try to turn the value into an int
+ value = datatype(value)
+ except ValueError:
+ # An error is raised at the end of the function
+ # when the types don't match.
+ pass
+ elif datatype is float and v_type in _promotable_types:
+ try:
+ value = float(value)
+ except ValueError:
+ # An error is raised at the end of the function
+ # when the types don't match.
+ pass
+ elif datatype is str and isinstance(value, bytes):
+ value = value.decode()
+ elif datatype is bytes and isinstance(value, str):
+ value = value.encode()
+
+ if not isinstance(value, datatype):
+ raise ValueError(
+ "Wrong type. Expected '%s', got '%s'" % (
+ datatype, v_type
+ ))
+ return value
+
+
+def iscomplex(datatype):
+ return inspect.isclass(datatype) \
+ and '_wsme_attributes' in datatype.__dict__
+
+
+class wsproperty(property):
+ """A specialised :class:`property` to define typed-property on complex types.
+
+ Example::
+
+ class MyComplexType(Base):
+ def get_aint(self):
+ return self._aint
+
+ def set_aint(self, value):
+ assert avalue < 10 # Dummy input validation
+ self._aint = value
+
+ aint = wsproperty(int, get_aint, set_aint, mandatory=True)
+
+ """
+ def __init__(self, datatype, fget, fset=None,
+ mandatory=False, doc=None, name=None):
+ property.__init__(self, fget, fset)
+ #: The property name in the parent python class
+ self.key = None
+ #: The attribute name on the public of the api.
+ #: Defaults to :attr:`key`
+ self.name = name
+ #: property data type
+ self.datatype = datatype
+ #: True if the property is mandatory
+ self.mandatory = mandatory
+
+
+class wsattr(object):
+ """Complex type attribute definition.
+
+ Example::
+
+ class MyComplexType(ctypes.Base):
+ optionalvalue = int
+ mandatoryvalue = wsattr(int, mandatory=True)
+ named_value = wsattr(int, name='named.value')
+
+ After inspection, the non-wsattr attributes will be replaced, and
+ the above class will be equivalent to::
+
+ class MyComplexType(ctypes.Base):
+ optionalvalue = wsattr(int)
+ mandatoryvalue = wsattr(int, mandatory=True)
+
+ """
+ def __init__(self, datatype, mandatory=False, name=None, default=Unset,
+ readonly=False):
+ #: The attribute name in the parent python class.
+ #: Set by :func:`inspect_class`
+ self.key = None # will be set by class inspection
+ #: The attribute name on the public of the api.
+ #: Defaults to :attr:`key`
+ self.name = name
+ self._datatype = (datatype,)
+ #: True if the attribute is mandatory
+ self.mandatory = mandatory
+ #: Default value. The attribute will return this instead
+ #: of :data:`Unset` if no value has been set.
+ self.default = default
+ #: If True value cannot be set from json/xml input data
+ self.readonly = readonly
+
+ self.complextype = None
+
+ def _get_dataholder(self, instance):
+ dataholder = getattr(instance, '_wsme_dataholder', None)
+ if dataholder is None:
+ dataholder = instance._wsme_DataHolderClass()
+ instance._wsme_dataholder = dataholder
+ return dataholder
+
+ def __get__(self, instance, owner):
+ if instance is None:
+ return self
+ return getattr(
+ self._get_dataholder(instance),
+ self.key,
+ self.default
+ )
+
+ def __set__(self, instance, value):
+ try:
+ value = validate_value(self.datatype, value)
+ except (ValueError, TypeError) as e:
+ raise exception.InvalidInput(self.name, value, str(e))
+ dataholder = self._get_dataholder(instance)
+ if value is Unset:
+ if hasattr(dataholder, self.key):
+ delattr(dataholder, self.key)
+ else:
+ setattr(dataholder, self.key, value)
+
+ def __delete__(self, instance):
+ self.__set__(instance, Unset)
+
+ def _get_datatype(self):
+ if isinstance(self._datatype, tuple):
+ self._datatype = \
+ self.complextype().__registry__.resolve_type(self._datatype[0])
+ if isinstance(self._datatype, weakref.ref):
+ return self._datatype()
+ if isinstance(self._datatype, list):
+ return [
+ item() if isinstance(item, weakref.ref) else item
+ for item in self._datatype
+ ]
+ return self._datatype
+
+ def _set_datatype(self, datatype):
+ self._datatype = datatype
+
+ #: attribute data type. Can be either an actual type,
+ #: or a type name, in which case the actual type will be
+ #: determined when needed (generally just before scanning the api).
+ datatype = property(_get_datatype, _set_datatype)
+
+
+def iswsattr(attr):
+ if inspect.isfunction(attr) or inspect.ismethod(attr):
+ return False
+ if isinstance(attr, property) and not isinstance(attr, wsproperty):
+ return False
+ return True
+
+
+def sort_attributes(class_, attributes):
+ """Sort a class attributes list.
+
+ 3 mechanisms are attempted :
+
+ #. Look for a _wsme_attr_order attribute on the class. This allow
+ to define an arbitrary order of the attributes (useful for
+ generated types).
+
+ #. Access the object source code to find the declaration order.
+
+ #. Sort by alphabetically
+
+ """
+
+ if not len(attributes):
+ return
+
+ attrs = dict((a.key, a) for a in attributes)
+
+ if hasattr(class_, '_wsme_attr_order'):
+ names_order = class_._wsme_attr_order
+ else:
+ names = attrs.keys()
+ names_order = []
+ try:
+ lines = []
+ for cls in inspect.getmro(class_):
+ if cls is object:
+ continue
+ lines[len(lines):] = inspect.getsourcelines(cls)[0]
+ for line in lines:
+ line = line.strip().replace(" ", "")
+ if '=' in line:
+ aname = line[:line.index('=')]
+ if aname in names and aname not in names_order:
+ names_order.append(aname)
+ if len(names_order) < len(names):
+ names_order.extend((
+ name for name in names if name not in names_order))
+ assert len(names_order) == len(names)
+ except (TypeError, IOError):
+ names_order = list(names)
+ names_order.sort()
+
+ attributes[:] = [attrs[name] for name in names_order]
+
+
+def inspect_class(class_):
+ """Extract a list of (name, wsattr|wsproperty) for the given class"""
+ attributes = []
+ for name, attr in inspect.getmembers(class_, iswsattr):
+ if name.startswith('_'):
+ continue
+ if inspect.isroutine(attr):
+ continue
+
+ if isinstance(attr, (wsattr, wsproperty)):
+ attrdef = attr
+ else:
+ if (attr not in native_types
+ and (inspect.isclass(attr) or isinstance(attr, (list, dict)))):
+ register_type(attr)
+ attrdef = getattr(class_, '__wsattrclass__', wsattr)(attr)
+
+ attrdef.key = name
+ if attrdef.name is None:
+ attrdef.name = name
+ attrdef.complextype = weakref.ref(class_)
+ attributes.append(attrdef)
+ setattr(class_, name, attrdef)
+
+ sort_attributes(class_, attributes)
+ return attributes
+
+
+def list_attributes(class_):
+ """Returns a list of a complex type attributes."""
+ if not iscomplex(class_):
+ raise TypeError("%s is not a registered type")
+ return class_._wsme_attributes
+
+
+def make_dataholder(class_):
+ # the slots are computed outside the class scope to avoid
+ # 'attr' to pullute the class namespace, which leads to weird
+ # things if one of the slots is named 'attr'.
+ slots = [attr.key for attr in class_._wsme_attributes]
+
+ class DataHolder(object):
+ __slots__ = slots
+
+ DataHolder.__name__ = class_.__name__ + 'DataHolder'
+ return DataHolder
+
+
+class Registry(object):
+ def __init__(self):
+ self._complex_types = []
+ self.array_types = set()
+ self.dict_types = set()
+
+ @property
+ def complex_types(self):
+ return [t() for t in self._complex_types if t()]
+
+ def register(self, class_):
+ """Make sure a type is registered.
+
+ It is automatically called by :class:`expose() <expose.expose>`
+ and :class:`validate() <expose.validate>`.
+ Unless you want to control when the class inspection is done there
+ is no need to call it.
+
+ """
+ if class_ is None or \
+ class_ in native_types or \
+ isinstance(class_, UserType) or iscomplex(class_) or \
+ isinstance(class_, ArrayType) or isinstance(class_, DictType):
+ return class_
+
+ if isinstance(class_, list):
+ if len(class_) != 1:
+ raise ValueError("Cannot register type %s" % repr(class_))
+ dt = ArrayType(class_[0])
+ self.register(dt.item_type)
+ self.array_types.add(dt)
+ return dt
+
+ if isinstance(class_, dict):
+ if len(class_) != 1:
+ raise ValueError("Cannot register type %s" % repr(class_))
+ dt = DictType(*list(class_.items())[0])
+ self.register(dt.value_type)
+ self.dict_types.add(dt)
+ return dt
+
+ class_._wsme_attributes = None
+ class_._wsme_attributes = inspect_class(class_)
+ class_._wsme_DataHolderClass = make_dataholder(class_)
+
+ class_.__registry__ = self
+ self._complex_types.append(weakref.ref(class_))
+ return class_
+
+ def reregister(self, class_):
+ """Register a type which may already have been registered.
+
+ """
+ self._unregister(class_)
+ return self.register(class_)
+
+ def _unregister(self, class_):
+ """Remove a previously registered type.
+
+ """
+ # Clear the existing attribute reference so it is rebuilt if
+ # the class is registered again later.
+ if hasattr(class_, '_wsme_attributes'):
+ del class_._wsme_attributes
+ # FIXME(dhellmann): This method does not recurse through the
+ # types like register() does. Should it?
+ if isinstance(class_, list):
+ at = ArrayType(class_[0])
+ try:
+ self.array_types.remove(at)
+ except KeyError:
+ pass
+ elif isinstance(class_, dict):
+ key_type, value_type = list(class_.items())[0]
+ self.dict_types = set(
+ dt for dt in self.dict_types
+ if (dt.key_type, dt.value_type) != (key_type, value_type)
+ )
+ # We can't use remove() here because the items in
+ # _complex_types are weakref objects pointing to the classes,
+ # so we can't compare with them directly.
+ self._complex_types = [
+ ct for ct in self._complex_types
+ if ct() is not class_
+ ]
+
+ def lookup(self, typename):
+ LOG.debug('Lookup %s', typename)
+ modname = None
+ if '.' in typename:
+ modname, typename = typename.rsplit('.', 1)
+ for ct in self._complex_types:
+ ct = ct()
+ if ct is not None and typename == ct.__name__ and (
+ modname is None or modname == ct.__module__):
+ return ct
+
+ def resolve_type(self, type_):
+ if isinstance(type_, str):
+ return self.lookup(type_)
+ if isinstance(type_, list):
+ type_ = ArrayType(type_[0])
+ if isinstance(type_, dict):
+ type_ = DictType(list(type_.keys())[0], list(type_.values())[0])
+ if isinstance(type_, ArrayType):
+ type_ = ArrayType(self.resolve_type(type_.item_type))
+ self.array_types.add(type_)
+ elif isinstance(type_, DictType):
+ type_ = DictType(
+ type_.key_type,
+ self.resolve_type(type_.value_type)
+ )
+ self.dict_types.add(type_)
+ else:
+ type_ = self.register(type_)
+ return type_
+
+
+# Default type registry
+registry = Registry()
+
+
+def register_type(class_):
+ return registry.register(class_)
+
+
+class BaseMeta(type):
+ def __new__(cls, name, bases, dct):
+ if bases and bases[0] is not object and '__registry__' not in dct:
+ dct['__registry__'] = registry
+ return type.__new__(cls, name, bases, dct)
+
+ def __init__(cls, name, bases, dct):
+ if bases and bases[0] is not object and cls.__registry__:
+ cls.__registry__.register(cls)
+
+
+class Base(metaclass=BaseMeta):
+ """Base type for complex types"""
+ def __init__(self, **kw):
+ for key, value in kw.items():
+ if hasattr(self, key):
+ setattr(self, key, value)
+
+
+class PassthruResponse(object):
+ """Object to hold the "response" from a passthru call"""
+ def __init__(self, obj, status_code=None):
+ #: Store the result object from the view
+ self.obj = obj
+
+ #: Store an optional status_code
+ self.status_code = status_code
diff --git a/ironic/cmd/__init__.py b/ironic/cmd/__init__.py
index 8acedfa92..403f918d9 100644
--- a/ironic/cmd/__init__.py
+++ b/ironic/cmd/__init__.py
@@ -19,12 +19,19 @@
# concurrency models can cause undefined behavior and potentially API timeouts.
import os
-os.environ['EVENTLET_NO_GREENDNS'] = 'yes' # noqa E402
+os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
eventlet.monkey_patch(os=False)
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+# all the noqa below are for I202 due to 'import eventlet' above
+import __original_module_threading as orig_threading # noqa
+import threading # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
-from ironic.common import i18n # noqa for I202 due to 'import eventlet' above
+from ironic.common import i18n # noqa
i18n.install('ironic')
diff --git a/ironic/common/cinder.py b/ironic/common/cinder.py
index c2afc021c..cc140547e 100644
--- a/ironic/common/cinder.py
+++ b/ironic/common/cinder.py
@@ -49,16 +49,10 @@ def get_client(context):
service_auth = keystone.get_auth('cinder')
session = _get_cinder_session()
- # TODO(pas-ha) remove in Rocky
- adapter_opts = {}
- # NOTE(pas-ha) new option must always win if set
- if CONF.cinder.url and not CONF.cinder.endpoint_override:
- adapter_opts['endpoint_override'] = CONF.cinder.url
-
# TODO(pas-ha) use versioned endpoint data to select required
# cinder api version
cinder_url = keystone.get_endpoint('cinder', session=session,
- auth=service_auth, **adapter_opts)
+ auth=service_auth)
# TODO(pas-ha) investigate possibility of passing a user context here,
# similar to what neutron/glance-related code does
# NOTE(pas-ha) cinderclient has both 'connect_retries' (passed to
diff --git a/ironic/common/exception.py b/ironic/common/exception.py
index 221a21ca4..1ade17253 100644
--- a/ironic/common/exception.py
+++ b/ironic/common/exception.py
@@ -20,7 +20,6 @@ from http import client as http_client
from ironic_lib.exception import IronicException
from oslo_log import log as logging
-import wsme
from ironic.common.i18n import _
@@ -664,6 +663,10 @@ class AgentConnectionFailed(IronicException):
_msg_fmt = _("Connection to agent failed: %(reason)s")
+class AgentCommandTimeout(IronicException):
+ _msg_fmt = _("Timeout executing command %(command)s on node %(node)s")
+
+
class NodeProtected(HTTPForbidden):
_msg_fmt = _("Node %(node)s is protected and cannot be undeployed, "
"rebuilt or deleted")
@@ -709,8 +712,21 @@ class IBMCConnectionError(IBMCError):
_msg_fmt = _("IBMC connection failed for node %(node)s: %(error)s")
-class ClientSideError(wsme.exc.ClientSideError):
- pass
+class ClientSideError(RuntimeError):
+ def __init__(self, msg=None, status_code=400, faultcode='Client'):
+ self.msg = msg
+ self.code = status_code
+ self.faultcode = faultcode
+ super(ClientSideError, self).__init__(self.faultstring)
+
+ @property
+ def faultstring(self):
+ if self.msg is None:
+ return str(self)
+ elif isinstance(self.msg, str):
+ return self.msg
+ else:
+ return str(self.msg)
class NodeIsRetired(Invalid):
@@ -721,3 +737,75 @@ class NodeIsRetired(Invalid):
class NoFreeIPMITerminalPorts(TemporaryFailure):
_msg_fmt = _("Unable to allocate a free port on host %(host)s for IPMI "
"terminal, not enough free ports.")
+
+
+class InvalidInput(ClientSideError):
+ def __init__(self, fieldname, value, msg=''):
+ self.fieldname = fieldname
+ self.value = value
+ super(InvalidInput, self).__init__(msg)
+
+ @property
+ def faultstring(self):
+ return _(
+ "Invalid input for field/attribute %(fieldname)s. "
+ "Value: '%(value)s'. %(msg)s"
+ ) % {
+ 'fieldname': self.fieldname,
+ 'value': self.value,
+ 'msg': self.msg
+ }
+
+
+class UnknownArgument(ClientSideError):
+ def __init__(self, argname, msg=''):
+ self.argname = argname
+ super(UnknownArgument, self).__init__(msg)
+
+ @property
+ def faultstring(self):
+ return _('Unknown argument: "%(argname)s"%(msg)s') % {
+ 'argname': self.argname,
+ 'msg': self.msg and ": " + self.msg or ""
+ }
+
+
+class MissingArgument(ClientSideError):
+ def __init__(self, argname, msg=''):
+ self.argname = argname
+ super(MissingArgument, self).__init__(msg)
+
+ @property
+ def faultstring(self):
+ return _('Missing argument: "%(argname)s"%(msg)s') % {
+ 'argname': self.argname,
+ 'msg': self.msg and ": " + self.msg or ""
+ }
+
+
+class UnknownAttribute(ClientSideError):
+ def __init__(self, fieldname, attributes, msg=''):
+ self.fieldname = fieldname
+ self.attributes = attributes
+ self.msg = msg
+ super(UnknownAttribute, self).__init__(self.msg)
+
+ @property
+ def faultstring(self):
+ error = _("Unknown attribute for argument %(argn)s: %(attrs)s")
+ if len(self.attributes) > 1:
+ error = _("Unknown attributes for argument %(argn)s: %(attrs)s")
+ str_attrs = ", ".join(self.attributes)
+ return error % {'argn': self.fieldname, 'attrs': str_attrs}
+
+ def add_fieldname(self, name):
+ """Add a fieldname to concatenate the full name.
+
+ Add a fieldname so that the whole hierarchy is displayed. Successive
+ calls to this method will prepend ``name`` to the hierarchy of names.
+ """
+ if self.fieldname is not None:
+ self.fieldname = "{}.{}".format(name, self.fieldname)
+ else:
+ self.fieldname = name
+ super(UnknownAttribute, self).__init__(self.msg)
diff --git a/ironic/common/hash_ring.py b/ironic/common/hash_ring.py
index d682fcc0d..0c2c534a9 100644
--- a/ironic/common/hash_ring.py
+++ b/ironic/common/hash_ring.py
@@ -69,7 +69,8 @@ class HashRingManager(object):
for driver_name, hosts in d2c.items():
rings[driver_name] = hashring.HashRing(
- hosts, partitions=2 ** CONF.hash_partition_exponent)
+ hosts, partitions=2 ** CONF.hash_partition_exponent,
+ hash_function=CONF.hash_ring_algorithm)
return rings
diff --git a/ironic/common/images.py b/ironic/common/images.py
index 1bd1755fd..31332df76 100644
--- a/ironic/common/images.py
+++ b/ironic/common/images.py
@@ -414,22 +414,36 @@ def fetch(context, image_href, path, force_raw=False):
image_to_raw(image_href, path, "%s.part" % path)
-def image_to_raw(image_href, path, path_tmp):
+def force_raw_get_source_format(image_href, path):
+ data = disk_utils.qemu_img_info(path)
+
+ fmt = data.file_format
+ if fmt is None:
+ raise exception.ImageUnacceptable(
+ reason=_("'qemu-img info' parsing failed."),
+ image_id=image_href)
+
+ backing_file = data.backing_file
+ if backing_file is not None:
+ raise exception.ImageUnacceptable(
+ image_id=image_href,
+ reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
+ {'fmt': fmt, 'backing_file': backing_file})
+
+ return fmt
+
+
+def force_raw_will_convert(image_href, path_tmp):
with fileutils.remove_path_on_error(path_tmp):
- data = disk_utils.qemu_img_info(path_tmp)
+ fmt = force_raw_get_source_format(image_href, path_tmp)
+ if fmt != "raw":
+ return True
+ return False
- fmt = data.file_format
- if fmt is None:
- raise exception.ImageUnacceptable(
- reason=_("'qemu-img info' parsing failed."),
- image_id=image_href)
- backing_file = data.backing_file
- if backing_file is not None:
- raise exception.ImageUnacceptable(
- image_id=image_href,
- reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
- {'fmt': fmt, 'backing_file': backing_file})
+def image_to_raw(image_href, path, path_tmp):
+ with fileutils.remove_path_on_error(path_tmp):
+ fmt = force_raw_get_source_format(image_href, path_tmp)
if fmt != "raw":
staged = "%s.converted" % path
@@ -512,7 +526,7 @@ def get_temp_url_for_glance_image(context, image_uuid):
def create_boot_iso(context, output_filename, kernel_href,
ramdisk_href, deploy_iso_href=None, esp_image_href=None,
root_uuid=None, kernel_params=None, boot_mode=None,
- configdrive_href=None):
+ configdrive_href=None, base_iso=None):
"""Creates a bootable ISO image for a node.
Given the hrefs for kernel, ramdisk, root partition's UUID and
@@ -539,14 +553,26 @@ def create_boot_iso(context, output_filename, kernel_href,
:param configdrive_href: URL to ISO9660 or FAT-formatted OpenStack config
drive image. This image will be embedded into the built ISO image.
Optional.
+ :param base_iso: URL or glance UUID of a to be used as an override of
+ what should be retrieved for to use, instead of building an ISO
+ bootable ramdisk.
:raises: ImageCreationFailed, if creating boot ISO failed.
"""
with utils.tempdir() as tmpdir:
- kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
- ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
-
- fetch(context, kernel_href, kernel_path)
- fetch(context, ramdisk_href, ramdisk_path)
+ if base_iso:
+ # NOTE(TheJulia): Eventually we want to use the creation method
+ # to perform the massaging of the image, because oddly enough
+ # we need to do all the same basic things, just a little
+ # differently.
+ fetch(context, base_iso, output_filename)
+ # Temporary, return to the caller until we support the combined
+ # operation.
+ return
+ else:
+ kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
+ ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
+ fetch(context, kernel_href, kernel_path)
+ fetch(context, ramdisk_href, ramdisk_path)
if configdrive_href:
configdrive_path = os.path.join(
@@ -578,7 +604,11 @@ def create_boot_iso(context, output_filename, kernel_href,
elif CONF.esp_image:
esp_image_path = CONF.esp_image
-
+ # TODO(TheJulia): we should opportunisticly try to make bios
+ # bootable and UEFI. In other words, collapse a lot of this
+ # path since they are not mutually exclusive.
+ # UEFI boot mode, but Network iPXE -> ISO means bios bootable
+ # contents are still required.
create_esp_image_for_uefi(
output_filename, kernel_path, ramdisk_path,
deploy_iso=deploy_iso_path, esp_image=esp_image_path,
diff --git a/ironic/common/json_rpc/__init__.py b/ironic/common/json_rpc/__init__.py
index 280b93f62..ad58e3bc6 100644
--- a/ironic/common/json_rpc/__init__.py
+++ b/ironic/common/json_rpc/__init__.py
@@ -16,5 +16,5 @@ from oslo_config import cfg
CONF = cfg.CONF
-def require_authentication():
- return (CONF.json_rpc.auth_strategy or CONF.auth_strategy) == 'keystone'
+def auth_strategy():
+ return CONF.json_rpc.auth_strategy or CONF.auth_strategy
diff --git a/ironic/common/json_rpc/client.py b/ironic/common/json_rpc/client.py
index b6692d901..8979d6f02 100644
--- a/ironic/common/json_rpc/client.py
+++ b/ironic/common/json_rpc/client.py
@@ -36,16 +36,30 @@ def _get_session():
global _SESSION
if _SESSION is None:
- if json_rpc.require_authentication():
- auth = keystone.get_auth('json_rpc')
- else:
- auth = None
-
- _SESSION = keystone.get_session('json_rpc', auth=auth)
- _SESSION.headers = {
+ kwargs = {}
+ auth_strategy = json_rpc.auth_strategy()
+ if auth_strategy != 'keystone':
+ auth_type = 'none' if auth_strategy == 'noauth' else auth_strategy
+ CONF.set_default('auth_type', auth_type, group='json_rpc')
+
+ # Deprecated, remove in W
+ if auth_strategy == 'http_basic':
+ if CONF.json_rpc.http_basic_username:
+ kwargs['username'] = CONF.json_rpc.http_basic_username
+ if CONF.json_rpc.http_basic_password:
+ kwargs['password'] = CONF.json_rpc.http_basic_password
+
+ auth = keystone.get_auth('json_rpc', **kwargs)
+
+ session = keystone.get_session('json_rpc', auth=auth)
+ headers = {
'Content-Type': 'application/json'
}
+ # Adds options like connect_retries
+ _SESSION = keystone.get_adapter('json_rpc', session=session,
+ additional_headers=headers)
+
return _SESSION
@@ -153,7 +167,10 @@ class _CallContext(object):
body['id'] = context.request_id or uuidutils.generate_uuid()
LOG.debug("RPC %s with %s", method, strutils.mask_dict_password(body))
- url = 'http://%s:%d' % (self.host, CONF.json_rpc.port)
+ scheme = 'http'
+ if CONF.json_rpc.use_ssl:
+ scheme = 'https'
+ url = '%s://%s:%d' % (scheme, self.host, CONF.json_rpc.port)
result = _get_session().post(url, json=body)
LOG.debug('RPC %s returned %s', method,
strutils.mask_password(result.text or '<None>'))
diff --git a/ironic/common/json_rpc/server.py b/ironic/common/json_rpc/server.py
index 39fb67be0..2fdab0c4f 100644
--- a/ironic/common/json_rpc/server.py
+++ b/ironic/common/json_rpc/server.py
@@ -21,6 +21,7 @@ https://www.jsonrpc.org/specification. Main differences:
import json
+from ironic_lib import auth_basic
from keystonemiddleware import auth_token
from oslo_config import cfg
from oslo_log import log
@@ -38,7 +39,7 @@ from ironic.common import json_rpc
CONF = cfg.CONF
LOG = log.getLogger(__name__)
-_BLACK_LIST = {'init_host', 'del_host', 'target', 'iter_nodes'}
+_DENY_LIST = {'init_host', 'del_host', 'target', 'iter_nodes'}
def _build_method_map(manager):
@@ -49,7 +50,7 @@ def _build_method_map(manager):
"""
result = {}
for method in dir(manager):
- if method.startswith('_') or method in _BLACK_LIST:
+ if method.startswith('_') or method in _DENY_LIST:
continue
func = getattr(manager, method)
if not callable(func):
@@ -90,9 +91,14 @@ class WSGIService(service.Service):
self.manager = manager
self.serializer = serializer
self._method_map = _build_method_map(manager)
- if json_rpc.require_authentication():
+ auth_strategy = json_rpc.auth_strategy()
+ if auth_strategy == 'keystone':
conf = dict(CONF.keystone_authtoken)
app = auth_token.AuthProtocol(self._application, conf)
+ elif auth_strategy == 'http_basic':
+ app = auth_basic.BasicAuthMiddleware(
+ self._application,
+ cfg.CONF.json_rpc.http_basic_auth_user_file)
else:
app = self._application
self.server = wsgi.Server(CONF, 'ironic-json-rpc', app,
@@ -109,7 +115,7 @@ class WSGIService(service.Service):
return webob.Response(status_code=405, json_body=body)(
environment, start_response)
- if json_rpc.require_authentication():
+ if json_rpc.auth_strategy() == 'keystone':
roles = (request.headers.get('X-Roles') or '').split(',')
if 'admin' not in roles:
LOG.debug('Roles %s do not contain "admin", rejecting '
diff --git a/ironic/common/neutron.py b/ironic/common/neutron.py
index 7182cd357..15a4201b5 100644
--- a/ironic/common/neutron.py
+++ b/ironic/common/neutron.py
@@ -11,12 +11,12 @@
# under the License.
import copy
+import ipaddress
from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.v2_0 import client as clientv20
from oslo_log import log
-from oslo_utils import netutils
from oslo_utils import uuidutils
import retrying
@@ -247,7 +247,8 @@ def _add_ip_addresses_for_ipv6_stateful(context, port, client):
"""
fixed_ips = port['port']['fixed_ips']
if (not fixed_ips
- or not netutils.is_valid_ipv6(fixed_ips[0]['ip_address'])):
+ or ipaddress.ip_address(
+ fixed_ips[0]['ip_address']).version != 6):
return
subnet = client.show_subnet(
@@ -474,6 +475,160 @@ def remove_neutron_ports(task, params):
{'node_uuid': node_uuid})
+def _uncidr(cidr, ipv6=False):
+ """Convert CIDR network representation into network/netmask form
+
+ :param cidr: network in CIDR form
+ :param ipv6: if `True`, consider `cidr` being IPv6
+ :returns: a tuple of network/host number in dotted
+ decimal notation, netmask in dotted decimal notation
+
+ """
+ net = ipaddress.ip_interface(cidr).network
+ return str(net.network_address), str(net.netmask)
+
+
+def get_neutron_port_data(port_id, vif_id, client=None, context=None):
+ """Gather Neutron port and network configuration
+
+ Query Neutron for port and network configuration, return whatever
+ is available.
+
+ :param port_id: ironic port/portgroup ID.
+ :param vif_id: Neutron port ID.
+ :param client: Optional a Neutron client object.
+ :param context: request context
+ :type context: ironic.common.context.RequestContext
+ :raises: NetworkError
+ :returns: a dict holding network configuration information
+ associated with this ironic or Neutron port.
+ """
+
+ if not client:
+ client = get_client(context=context)
+
+ try:
+ port_config = client.show_port(
+ vif_id, fields=['id', 'name', 'dns_assignment', 'fixed_ips',
+ 'mac_address', 'network_id'])
+
+ except neutron_exceptions.NeutronClientException as e:
+ msg = (_('Unable to get port info for %(port_id)s. Error: '
+ '%(err)s') % {'port_id': vif_id, 'err': e})
+ LOG.exception(msg)
+ raise exception.NetworkError(msg)
+
+ LOG.debug('Received port %(port)s data: %(info)s',
+ {'port': vif_id, 'info': port_config})
+
+ port_config = port_config['port']
+
+ port_id = port_config['name'] or port_id
+
+ network_id = port_config.get('network_id')
+
+ try:
+ network_config = client.show_network(
+ network_id, fields=['id', 'mtu', 'subnets'])
+
+ except neutron_exceptions.NeutronClientException as e:
+ msg = (_('Unable to get network info for %(network_id)s. Error: '
+ '%(err)s') % {'network_id': network_id, 'err': e})
+ LOG.exception(msg)
+ raise exception.NetworkError(msg)
+
+ LOG.debug('Received network %(network)s data: %(info)s',
+ {'network': network_id, 'info': network_config})
+
+ network_config = network_config['network']
+
+ subnets_config = {}
+
+ network_data = {
+ 'links': [
+ {
+ 'id': port_id,
+ 'type': 'vif',
+ 'ethernet_mac_address': port_config['mac_address'],
+ 'vif_id': port_config['id'],
+ 'mtu': network_config['mtu']
+ }
+ ],
+ 'networks': [
+
+ ]
+ }
+
+ for fixed_ip in port_config.get('fixed_ips', []):
+ subnet_id = fixed_ip['subnet_id']
+
+ try:
+ subnet_config = client.show_subnet(
+ subnet_id, fields=['id', 'name', 'enable_dhcp',
+ 'dns_nameservers', 'host_routes',
+ 'ip_version', 'gateway_ip', 'cidr'])
+
+ LOG.debug('Received subnet %(subnet)s data: %(info)s',
+ {'subnet': subnet_id, 'info': subnet_config})
+
+ subnets_config[subnet_id] = subnet_config['subnet']
+
+ except neutron_exceptions.NeutronClientException as e:
+ msg = (_('Unable to get subnet info for %(subnet_id)s. Error: '
+ '%(err)s') % {'subnet_id': subnet_id, 'err': e})
+ LOG.exception(msg)
+ raise exception.NetworkError(msg)
+
+ subnet_config = subnets_config[subnet_id]
+
+ subnet_network, netmask = _uncidr(
+ subnet_config['cidr'], subnet_config['ip_version'] == 6)
+
+ network = {
+ 'id': fixed_ip['subnet_id'],
+ 'network_id': port_config['network_id'],
+ 'type': 'ipv%s' % subnet_config['ip_version'],
+ 'link': port_id,
+ 'ip_address': fixed_ip['ip_address'],
+ 'netmask': netmask,
+ 'routes': [
+
+ ]
+ }
+
+ # TODO(etingof): Adding default route if gateway is present.
+ # This is a hack, Neutron should have given us a route.
+
+ if subnet_config['gateway_ip']:
+ zero_addr = ('::0' if subnet_config['ip_version'] == 6
+ else '0.0.0.0')
+
+ route = {
+ 'network': zero_addr,
+ 'netmask': zero_addr,
+ 'gateway': subnet_config['gateway_ip']
+ }
+
+ network['routes'].append(route)
+
+ for host_config in subnet_config['host_routes']:
+ subnet_network, netmask = _uncidr(
+ host_config['destination'],
+ subnet_config['ip_version'] == 6)
+
+ route = {
+ 'network': subnet_network,
+ 'netmask': netmask,
+ 'gateway': host_config['nexthop']
+ }
+
+ network['routes'].append(route)
+
+ network_data['networks'].append(network)
+
+ return network_data
+
+
def get_node_portmap(task):
"""Extract the switch port information for the node.
diff --git a/ironic/common/policy.py b/ironic/common/policy.py
index 3f009e9c8..240797731 100644
--- a/ironic/common/policy.py
+++ b/ironic/common/policy.py
@@ -36,7 +36,7 @@ default_policies = [
policy.RuleDefault('admin_api',
'role:admin or role:administrator',
description='Legacy rule for cloud admin access'),
- # is_public_api is set in the environment from AuthTokenMiddleware
+ # is_public_api is set in the environment from AuthPublicRoutes
policy.RuleDefault('public_api',
'is_public_api:True',
description='Internal flag for public API routes'),
@@ -633,9 +633,9 @@ def authorize(rule, target, creds, *args, **kwargs):
Checks authorization of a rule against the target and credentials, and
raises an exception if the rule is not defined.
- Always returns true if CONF.auth_strategy == noauth.
+ Always returns true if CONF.auth_strategy is not keystone.
"""
- if CONF.auth_strategy == 'noauth':
+ if CONF.auth_strategy != 'keystone':
return True
enforcer = get_enforcer()
try:
diff --git a/ironic/common/pxe_utils.py b/ironic/common/pxe_utils.py
index 4e49bcc78..7d0af987c 100644
--- a/ironic/common/pxe_utils.py
+++ b/ironic/common/pxe_utils.py
@@ -265,7 +265,10 @@ def create_pxe_config(task, pxe_options, template=None, ipxe_enabled=False):
"""
LOG.debug("Building PXE config for node %s", task.node.uuid)
if template is None:
- template = deploy_utils.get_pxe_config_template(task.node)
+ if ipxe_enabled:
+ template = deploy_utils.get_ipxe_config_template(task.node)
+ else:
+ template = deploy_utils.get_pxe_config_template(task.node)
_ensure_config_dirs_exist(task, ipxe_enabled)
@@ -384,7 +387,16 @@ def _dhcp_option_file_or_url(task, urlboot=False, ip_version=None):
to return options for DHCP. Possible options
are 4, and 6.
"""
- boot_file = deploy_utils.get_pxe_boot_file(task.node)
+ try:
+ if task.driver.boot.ipxe_enabled:
+ boot_file = deploy_utils.get_ipxe_boot_file(task.node)
+ else:
+ boot_file = deploy_utils.get_pxe_boot_file(task.node)
+ except AttributeError:
+ # Support boot interfaces that lack an explicit ipxe_enabled
+ # attribute flag.
+ boot_file = deploy_utils.get_pxe_boot_file(task.node)
+
# NOTE(TheJulia): There are additional cases as we add new
# features, so the logic below is in the form of if/elif/elif
if not urlboot:
@@ -480,7 +492,7 @@ def dhcp_options_for_instance(task, ipxe_enabled=False, url_boot=False,
LOG.warning('IPv6 is enabled and the DHCP driver appears set '
'to a plugin aside from "neutron". Node %(name)s '
'may not receive proper DHCPv6 provided '
- 'boot parameters.'.format(name=task.node.uuid))
+ 'boot parameters.', {'name': task.node.uuid})
# NOTE(TheJulia): This was added for ISC DHCPd support, however it
# appears that isc support was never added to neutron and is likely
# a down stream driver.
@@ -616,6 +628,13 @@ def get_instance_image_info(task, ipxe_enabled=False):
else:
root_dir = get_root_dir()
i_info = node.instance_info
+ if i_info.get('boot_iso'):
+ image_info['boot_iso'] = (
+ i_info['boot_iso'],
+ os.path.join(root_dir, node.uuid, 'boot_iso'))
+
+ return image_info
+
labels = ('kernel', 'ramdisk')
d_info = deploy_utils.get_image_instance_info(node)
if not (i_info.get('kernel') and i_info.get('ramdisk')):
@@ -625,7 +644,6 @@ def get_instance_image_info(task, ipxe_enabled=False):
i_info[label] = str(iproperties[label + '_id'])
node.instance_info = i_info
node.save()
-
for label in labels:
image_info[label] = (
i_info[label],
@@ -714,6 +732,14 @@ def build_instance_pxe_options(task, pxe_info, ipxe_enabled=False):
pxe_opts['ramdisk_opts'] = i_info['ramdisk_kernel_arguments']
except KeyError:
pass
+ try:
+ # TODO(TheJulia): Boot iso should change at a later point
+ # if we serve more than just as a pass-through.
+ if i_info.get('boot_iso'):
+ pxe_opts['boot_iso_url'] = '/'.join(
+ [CONF.deploy.http_url, node.uuid, 'boot_iso'])
+ except KeyError:
+ pass
return pxe_opts
@@ -782,7 +808,8 @@ def build_pxe_config_options(task, pxe_info, service=False,
def build_service_pxe_config(task, instance_image_info,
root_uuid_or_disk_id,
ramdisk_boot=False,
- ipxe_enabled=False):
+ ipxe_enabled=False,
+ is_whole_disk_image=None):
node = task.node
pxe_config_path = get_pxe_config_file_path(node.uuid,
ipxe_enabled=ipxe_enabled)
@@ -799,15 +826,22 @@ def build_service_pxe_config(task, instance_image_info,
pxe_options = build_pxe_config_options(task, instance_image_info,
service=True,
ipxe_enabled=ipxe_enabled)
- pxe_config_template = deploy_utils.get_pxe_config_template(node)
+ if ipxe_enabled:
+ pxe_config_template = deploy_utils.get_ipxe_config_template(node)
+ else:
+ pxe_config_template = deploy_utils.get_pxe_config_template(node)
create_pxe_config(task, pxe_options, pxe_config_template,
ipxe_enabled=ipxe_enabled)
- iwdi = node.driver_internal_info.get('is_whole_disk_image')
+
+ if is_whole_disk_image is None:
+ is_whole_disk_image = node.driver_internal_info.get(
+ 'is_whole_disk_image')
deploy_utils.switch_pxe_config(
pxe_config_path, root_uuid_or_disk_id,
boot_mode_utils.get_boot_mode(node),
- iwdi, deploy_utils.is_trusted_boot_requested(node),
+ is_whole_disk_image,
+ deploy_utils.is_trusted_boot_requested(node),
deploy_utils.is_iscsi_boot(task), ramdisk_boot,
ipxe_enabled=ipxe_enabled)
@@ -917,7 +951,6 @@ def prepare_instance_pxe_config(task, image_info,
is in use by the caller.
:returns: None
"""
-
node = task.node
# Generate options for both IPv4 and IPv6, and they can be
# filtered down later based upon the port options.
@@ -937,8 +970,12 @@ def prepare_instance_pxe_config(task, image_info,
pxe_options = build_pxe_config_options(
task, image_info, service=ramdisk_boot,
ipxe_enabled=ipxe_enabled)
- pxe_config_template = (
- deploy_utils.get_pxe_config_template(node))
+ if ipxe_enabled:
+ pxe_config_template = (
+ deploy_utils.get_ipxe_config_template(node))
+ else:
+ pxe_config_template = (
+ deploy_utils.get_pxe_config_template(node))
create_pxe_config(
task, pxe_options, pxe_config_template,
ipxe_enabled=ipxe_enabled)
diff --git a/ironic/common/raid.py b/ironic/common/raid.py
index 3f503beb3..ab60bd603 100644
--- a/ironic/common/raid.py
+++ b/ironic/common/raid.py
@@ -114,7 +114,8 @@ def update_raid_info(node, raid_config):
if root_logical_disk:
# Update local_gb and root_device_hint
properties = node.properties
- properties['local_gb'] = root_logical_disk['size_gb']
+ if root_logical_disk['size_gb'] != 'MAX':
+ properties['local_gb'] = root_logical_disk['size_gb']
try:
properties['root_device'] = (
root_logical_disk['root_device_hint'])
diff --git a/ironic/common/release_mappings.py b/ironic/common/release_mappings.py
index bdbb98384..bc9565a98 100644
--- a/ironic/common/release_mappings.py
+++ b/ironic/common/release_mappings.py
@@ -230,12 +230,29 @@ RELEASE_MAPPING = {
'VolumeTarget': ['1.0'],
}
},
+ '15.1': {
+ 'api': '1.67',
+ 'rpc': '1.50',
+ 'objects': {
+ 'Allocation': ['1.1'],
+ 'Node': ['1.35', '1.34'],
+ 'Conductor': ['1.3'],
+ 'Chassis': ['1.3'],
+ 'DeployTemplate': ['1.1'],
+ 'Port': ['1.9'],
+ 'Portgroup': ['1.4'],
+ 'Trait': ['1.0'],
+ 'TraitList': ['1.0'],
+ 'VolumeConnector': ['1.0'],
+ 'VolumeTarget': ['1.0'],
+ }
+ },
'master': {
- 'api': '1.65',
+ 'api': '1.67',
'rpc': '1.50',
'objects': {
'Allocation': ['1.1'],
- 'Node': ['1.34'],
+ 'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
@@ -265,7 +282,7 @@ RELEASE_MAPPING = {
# NOTE(mgoddard): remove Train prior to the Victoria release.
RELEASE_MAPPING['train'] = RELEASE_MAPPING['13.0']
-RELEASE_MAPPING['ussuri'] = RELEASE_MAPPING['15.0']
+RELEASE_MAPPING['ussuri'] = RELEASE_MAPPING['15.1']
# List of available versions with named versions first; 'master' is excluded.
RELEASE_VERSIONS = sorted(set(RELEASE_MAPPING) - {'master'}, reverse=True)
diff --git a/ironic/common/rpc_service.py b/ironic/common/rpc_service.py
index 9df03b917..a38582250 100644
--- a/ironic/common/rpc_service.py
+++ b/ironic/common/rpc_service.py
@@ -48,6 +48,8 @@ class RPCService(service.Service):
admin_context = context.get_admin_context()
serializer = objects_base.IronicObjectSerializer(is_server=True)
+ # Perform preparatory actions before starting the RPC listener
+ self.manager.prepare_host()
if CONF.rpc_transport == 'json-rpc':
self.rpcserver = json_rpc.WSGIService(self.manager,
serializer)
diff --git a/ironic/common/states.py b/ironic/common/states.py
index cefdbd838..567ce0eea 100644
--- a/ironic/common/states.py
+++ b/ironic/common/states.py
@@ -235,7 +235,7 @@ UNSTABLE_STATES = (DEPLOYING, DEPLOYWAIT, CLEANING, CLEANWAIT, VERIFYING,
"""States that can be changed without external request."""
STUCK_STATES_TREATED_AS_FAIL = (DEPLOYING, CLEANING, VERIFYING, INSPECTING,
- ADOPTING, RESCUING, UNRESCUING)
+ ADOPTING, RESCUING, UNRESCUING, DELETING)
"""States that cannot be resumed once a conductor dies.
If a node gets stuck with one of these states for some reason
@@ -384,7 +384,7 @@ machine.add_transition(DEPLOYWAIT, DELETING, 'delete')
machine.add_transition(DEPLOYFAIL, DELETING, 'delete')
# This state can also transition to error
-machine.add_transition(DELETING, ERROR, 'error')
+machine.add_transition(DELETING, ERROR, 'fail')
# When finished deleting, a node will begin cleaning
machine.add_transition(DELETING, CLEANING, 'clean')
diff --git a/ironic/common/utils.py b/ironic/common/utils.py
index 2d389af59..7cc0199bf 100644
--- a/ironic/common/utils.py
+++ b/ironic/common/utils.py
@@ -22,6 +22,7 @@ import contextlib
import datetime
import errno
import hashlib
+import ipaddress
import os
import re
import shutil
@@ -42,6 +43,15 @@ from ironic.conf import CONF
LOG = logging.getLogger(__name__)
+DATE_RE = r'(?P<year>-?\d{4,})-(?P<month>\d{2})-(?P<day>\d{2})'
+TIME_RE = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' + \
+ r'(\.(?P<sec_frac>\d+))?'
+TZ_RE = r'((?P<tz_sign>[+-])(?P<tz_hour>\d{2}):(?P<tz_min>\d{2}))' + \
+ r'|(?P<tz_z>Z)'
+
+DATETIME_RE = re.compile(
+ '%sT%s(%s)?' % (DATE_RE, TIME_RE, TZ_RE))
+
warn_deprecated_extra_vif_port_id = False
@@ -567,6 +577,6 @@ def pop_node_nested_field(node, collection, field, default=None):
def wrap_ipv6(ip):
"""Wrap the address in square brackets if it's an IPv6 address."""
- if netutils.is_valid_ipv6(ip):
+ if ipaddress.ip_address(ip).version == 6:
return "[%s]" % ip
return ip
diff --git a/ironic/conductor/base_manager.py b/ironic/conductor/base_manager.py
index cd63d875c..d4361dfa9 100644
--- a/ironic/conductor/base_manager.py
+++ b/ironic/conductor/base_manager.py
@@ -80,6 +80,33 @@ class BaseConductorManager(object):
self._started = False
self._shutdown = None
self._zeroconf = None
+ self.dbapi = None
+
+ def prepare_host(self):
+ """Prepares host for initialization
+
+ Removes existing database entries involved with node locking for nodes
+ in a transitory power state and nodes that are presently locked by
+ the hostname of this conductor.
+
+ Under normal operation, this is also when the initial database
+ connectivity is established for the conductor's normal operation.
+ """
+ # NOTE(TheJulia) We need to clear locks early on in the process
+ # of starting where the database shows we still hold them.
+ # This must be done before we re-register our existence in the
+ # conductors table and begin accepting new requests via RPC as
+ # if we do not then we may squash our *new* locks from new work.
+
+ if not self.dbapi:
+ LOG.debug('Initializing database client for %s.', self.host)
+ self.dbapi = dbapi.get_instance()
+ LOG.debug('Removing stale locks from the database matching '
+ 'this conductor\'s hostname: %s', self.host)
+ # clear all target_power_state with locks by this conductor
+ self.dbapi.clear_node_target_power_state(self.host)
+ # clear all locks held by this conductor before registering
+ self.dbapi.clear_node_reservations_for_conductor(self.host)
def init_host(self, admin_context=None):
"""Initialize the conductor host.
@@ -97,7 +124,8 @@ class BaseConductorManager(object):
'conductor manager'))
self._shutdown = False
- self.dbapi = dbapi.get_instance()
+ if not self.dbapi:
+ self.dbapi = dbapi.get_instance()
self._keepalive_evt = threading.Event()
"""Event for the keepalive thread."""
@@ -141,10 +169,6 @@ class BaseConductorManager(object):
self._collect_periodic_tasks(admin_context)
- # clear all target_power_state with locks by this conductor
- self.dbapi.clear_node_target_power_state(self.host)
- # clear all locks held by this conductor before registering
- self.dbapi.clear_node_reservations_for_conductor(self.host)
try:
# Register this conductor with the cluster
self.conductor = objects.Conductor.register(
diff --git a/ironic/conductor/cleaning.py b/ironic/conductor/cleaning.py
index 351c38121..9a923abf7 100644
--- a/ironic/conductor/cleaning.py
+++ b/ironic/conductor/cleaning.py
@@ -21,6 +21,7 @@ from ironic.conductor import steps as conductor_steps
from ironic.conductor import task_manager
from ironic.conductor import utils
from ironic.conf import CONF
+from ironic.drivers import utils as driver_utils
LOG = log.getLogger(__name__)
@@ -182,6 +183,7 @@ def do_next_clean_step(task, step_index):
{'node': node.uuid, 'exc': e,
'step': node.clean_step})
LOG.exception(msg)
+ driver_utils.collect_ramdisk_logs(task.node, label='cleaning')
utils.cleaning_error_handler(task, msg)
return
@@ -206,20 +208,12 @@ def do_next_clean_step(task, step_index):
LOG.info('Node %(node)s finished clean step %(step)s',
{'node': node.uuid, 'step': step})
+ if CONF.agent.deploy_logs_collect == 'always':
+ driver_utils.collect_ramdisk_logs(task.node, label='cleaning')
+
# Clear clean_step
node.clean_step = None
- driver_internal_info = node.driver_internal_info
- driver_internal_info['clean_steps'] = None
- driver_internal_info.pop('clean_step_index', None)
- driver_internal_info.pop('cleaning_reboot', None)
- driver_internal_info.pop('cleaning_polling', None)
- driver_internal_info.pop('agent_secret_token', None)
- driver_internal_info.pop('agent_secret_token_pregenerated', None)
-
- # Remove agent_url
- if not utils.fast_track_able(task):
- driver_internal_info.pop('agent_url', None)
- node.driver_internal_info = driver_internal_info
+ utils.wipe_cleaning_internal_info(task)
node.save()
try:
task.driver.deploy.tear_down_cleaning(task)
@@ -267,15 +261,6 @@ def do_node_clean_abort(task, step_name=None):
node.last_error = last_error
node.clean_step = None
- info = node.driver_internal_info
- # Clear any leftover metadata about cleaning
- info.pop('clean_step_index', None)
- info.pop('cleaning_reboot', None)
- info.pop('cleaning_polling', None)
- info.pop('skip_current_clean_step', None)
- info.pop('agent_url', None)
- info.pop('agent_secret_token', None)
- info.pop('agent_secret_token_pregenerated', None)
- node.driver_internal_info = info
+ utils.wipe_cleaning_internal_info(task)
node.save()
LOG.info(info_message)
diff --git a/ironic/conductor/deployments.py b/ironic/conductor/deployments.py
index 3bda75b23..780b302c5 100644
--- a/ironic/conductor/deployments.py
+++ b/ironic/conductor/deployments.py
@@ -70,10 +70,6 @@ def start_deploy(task, manager, configdrive=None, event='deploy'):
:param event: event to process: deploy or rebuild.
"""
node = task.node
- # Record of any pre-existing agent_url should be removed
- # except when we are in fast track conditions.
- if not utils.is_fast_track(task):
- utils.remove_agent_url(node)
if event == 'rebuild':
# Note(gilliard) Clear these to force the driver to
@@ -102,7 +98,7 @@ def start_deploy(task, manager, configdrive=None, event='deploy'):
task.driver.power.validate(task)
task.driver.deploy.validate(task)
utils.validate_instance_info_traits(task.node)
- conductor_steps.validate_deploy_templates(task)
+ conductor_steps.validate_deploy_templates(task, skip_missing=True)
except exception.InvalidParameterValue as e:
raise exception.InstanceDeployFailure(
_("Failed to validate deploy or power info for node "
@@ -127,8 +123,7 @@ def start_deploy(task, manager, configdrive=None, event='deploy'):
def do_node_deploy(task, conductor_id=None, configdrive=None):
"""Prepare the environment and deploy a node."""
node = task.node
- utils.wipe_deploy_internal_info(node)
- utils.del_secret_token(node)
+ utils.wipe_deploy_internal_info(task)
try:
if configdrive:
if isinstance(configdrive, dict):
@@ -273,6 +268,13 @@ def do_next_deploy_step(task, step_index, conductor_id):
_("Failed to deploy. Exception: %s") % e, traceback=True)
return
+ if task.node.provision_state == states.DEPLOYFAIL:
+ # NOTE(dtantsur): some deploy steps do not raise but rather update
+ # the node and return. Take them into account.
+ LOG.debug('Node %s is in error state, not processing '
+ 'the remaining deploy steps', task.node)
+ return
+
if ind == 0:
# We've done the very first deploy step.
# Update conductor_affinity to reference this conductor's ID
@@ -291,7 +293,8 @@ def do_next_deploy_step(task, step_index, conductor_id):
LOG.info('Deploy step %(step)s on node %(node)s being '
'executed asynchronously, waiting for driver.',
{'node': node.uuid, 'step': step})
- task.process_event('wait')
+ if task.node.provision_state != states.DEPLOYWAIT:
+ task.process_event('wait')
return
elif result is not None:
# NOTE(rloo): This is an internal/dev error; shouldn't happen.
@@ -308,7 +311,7 @@ def do_next_deploy_step(task, step_index, conductor_id):
# Finished executing the steps. Clear deploy_step.
node.deploy_step = None
- utils.wipe_deploy_internal_info(node)
+ utils.wipe_deploy_internal_info(task)
node.save()
_start_console_in_deploy(task)
diff --git a/ironic/conductor/manager.py b/ironic/conductor/manager.py
index c66217336..356403bab 100644
--- a/ironic/conductor/manager.py
+++ b/ironic/conductor/manager.py
@@ -81,7 +81,8 @@ LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
-SYNC_EXCLUDED_STATES = (states.DEPLOYWAIT, states.CLEANWAIT, states.ENROLL)
+SYNC_EXCLUDED_STATES = (states.DEPLOYWAIT, states.CLEANWAIT, states.ENROLL,
+ states.ADOPTFAIL)
class ConductorManager(base_manager.BaseConductorManager):
@@ -209,6 +210,7 @@ class ConductorManager(base_manager.BaseConductorManager):
"updated unless it is in one of allowed "
"(%(allowed)s) states or in maintenance mode.")
updating_driver = 'driver' in delta
+ check_interfaces = updating_driver
for iface in drivers_base.ALL_INTERFACES:
interface_field = '%s_interface' % iface
if interface_field not in delta:
@@ -224,7 +226,10 @@ class ConductorManager(base_manager.BaseConductorManager):
'allowed': ', '.join(allowed_update_states),
'field': interface_field})
- driver_factory.check_and_update_node_interfaces(node_obj)
+ check_interfaces = True
+
+ if check_interfaces:
+ driver_factory.check_and_update_node_interfaces(node_obj)
# NOTE(dtantsur): if we're updating the driver from an invalid value,
# loading the old driver may be impossible. Since we only need to
@@ -602,12 +607,13 @@ class ConductorManager(base_manager.BaseConductorManager):
node_id, purpose='node rescue') as task:
node = task.node
- # Record of any pre-existing agent_url should be removed.
- utils.remove_agent_url(node)
if node.maintenance:
raise exception.NodeInMaintenance(op=_('rescuing'),
node=node.uuid)
+ # Record of any pre-existing agent_url should be removed.
+ utils.wipe_token_and_url(task)
+
# driver validation may check rescue_password, so save it on the
# node early
i_info = node.instance_info
@@ -754,6 +760,9 @@ class ConductorManager(base_manager.BaseConductorManager):
handle_failure(e,
_('Failed to unrescue. Exception: %s'),
log_func=LOG.exception)
+
+ utils.wipe_token_and_url(task)
+
if next_state == states.ACTIVE:
task.process_event('done')
else:
@@ -875,9 +884,15 @@ class ConductorManager(base_manager.BaseConductorManager):
# Agent is now running, we're ready to validate the remaining steps
if not info.get('steps_validated'):
- conductor_steps.validate_deploy_templates(task)
- conductor_steps.set_node_deployment_steps(
- task, reset_current=False)
+ try:
+ conductor_steps.validate_deploy_templates(task)
+ conductor_steps.set_node_deployment_steps(
+ task, reset_current=False)
+ except exception.IronicException as exc:
+ msg = _('Failed to validate the final deploy steps list '
+ 'for node %(node)s: %(exc)s') % {'node': node.uuid,
+ 'exc': exc}
+ return utils.deploying_error_handler(task, msg)
info['steps_validated'] = True
save_required = True
@@ -1011,7 +1026,7 @@ class ConductorManager(base_manager.BaseConductorManager):
LOG.exception('Error in tear_down of node %(node)s: %(err)s',
{'node': node.uuid, 'err': e})
node.last_error = _("Failed to tear down. Error: %s") % e
- task.process_event('error')
+ task.process_event('fail')
else:
# NOTE(tenbrae): When tear_down finishes, the deletion is done,
# cleaning will start next
@@ -1025,11 +1040,9 @@ class ConductorManager(base_manager.BaseConductorManager):
# But we do need to clear the instance-related fields.
node.instance_info = {}
node.instance_uuid = None
+ utils.wipe_deploy_internal_info(task)
driver_internal_info = node.driver_internal_info
- driver_internal_info.pop('agent_secret_token', None)
- driver_internal_info.pop('agent_secret_token_pregenerated', None)
driver_internal_info.pop('instance', None)
- driver_internal_info.pop('clean_steps', None)
driver_internal_info.pop('root_uuid_or_disk_id', None)
driver_internal_info.pop('is_whole_disk_image', None)
driver_internal_info.pop('deploy_boot_mode', None)
@@ -1220,6 +1233,8 @@ class ConductorManager(base_manager.BaseConductorManager):
error = (_('Failed to validate power driver interface for node '
'%(node)s. Error: %(msg)s') %
{'node': node.uuid, 'msg': e})
+ log_traceback = not isinstance(e, exception.IronicException)
+ LOG.error(error, exc_info=log_traceback)
else:
try:
power_state = task.driver.power.get_power_state(task)
@@ -1227,6 +1242,8 @@ class ConductorManager(base_manager.BaseConductorManager):
error = (_('Failed to get power state for node '
'%(node)s. Error: %(msg)s') %
{'node': node.uuid, 'msg': e})
+ log_traceback = not isinstance(e, exception.IronicException)
+ LOG.error(error, exc_info=log_traceback)
if error is None:
if power_state != node.power_state:
@@ -1238,7 +1255,6 @@ class ConductorManager(base_manager.BaseConductorManager):
else:
task.process_event('done')
else:
- LOG.error(error)
node.last_error = error
task.process_event('fail')
@@ -1599,7 +1615,7 @@ class ConductorManager(base_manager.BaseConductorManager):
@periodics.periodic(
spacing=CONF.conductor.check_provision_state_interval,
enabled=CONF.conductor.check_provision_state_interval > 0
- and CONF.conductor.deploy_callback_timeout != 0)
+ and CONF.conductor.deploy_callback_timeout > 0)
def _check_deploy_timeouts(self, context):
"""Periodically checks whether a deploy RPC call has timed out.
@@ -1607,8 +1623,6 @@ class ConductorManager(base_manager.BaseConductorManager):
:param context: request context.
"""
- # FIXME(rloo): If the value is < 0, it will be enabled. That doesn't
- # seem right.
callback_timeout = CONF.conductor.deploy_callback_timeout
filters = {'reserved': False,
@@ -1764,6 +1778,8 @@ class ConductorManager(base_manager.BaseConductorManager):
msg = (_('Error while attempting to adopt node %(node)s: '
'%(err)s.') % {'node': node.uuid, 'err': err})
LOG.error(msg)
+ # Wipe power state from being preserved as it is likely invalid.
+ node.power_state = states.NOSTATE
node.last_error = msg
task.process_event('fail')
@@ -1821,7 +1837,7 @@ class ConductorManager(base_manager.BaseConductorManager):
@periodics.periodic(
spacing=CONF.conductor.check_provision_state_interval,
enabled=CONF.conductor.check_provision_state_interval > 0
- and CONF.conductor.clean_callback_timeout != 0)
+ and CONF.conductor.clean_callback_timeout > 0)
def _check_cleanwait_timeouts(self, context):
"""Periodically checks for nodes being cleaned.
@@ -1830,8 +1846,6 @@ class ConductorManager(base_manager.BaseConductorManager):
:param context: request context.
"""
- # FIXME(rloo): If the value is < 0, it will be enabled. That doesn't
- # seem right.
callback_timeout = CONF.conductor.clean_callback_timeout
filters = {'reserved': False,
@@ -1994,6 +2008,7 @@ class ConductorManager(base_manager.BaseConductorManager):
# we would disallow it otherwise. That's done for recovering hopelessly
# broken nodes (e.g. with broken BMC).
with task_manager.acquire(context, node_id,
+ load_driver=False,
purpose='node deletion') as task:
node = task.node
if not node.maintenance and node.instance_uuid is not None:
@@ -2028,6 +2043,17 @@ class ConductorManager(base_manager.BaseConductorManager):
if node.console_enabled:
notify_utils.emit_console_notification(
task, 'console_set', fields.NotificationStatus.START)
+
+ try:
+ task.load_driver()
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Could not load the driver for node %s '
+ 'to shut down its console', node.uuid)
+ notify_utils.emit_console_notification(
+ task, 'console_set',
+ fields.NotificationStatus.ERROR)
+
try:
task.driver.console.stop_console(task)
except Exception as err:
@@ -2056,23 +2082,21 @@ class ConductorManager(base_manager.BaseConductorManager):
:raises: NodeLocked if node is locked by another conductor.
:raises: NodeNotFound if the node associated with the port does not
exist.
+ :raises: InvalidState if a vif is still attached to the port.
"""
LOG.debug('RPC destroy_port called for port %(port)s',
{'port': port.uuid})
with task_manager.acquire(context, port.node_id,
purpose='port deletion') as task:
- node = task.node
- vif = task.driver.network.get_current_vif(task, port)
- if ((node.provision_state == states.ACTIVE or node.instance_uuid)
- and not node.maintenance and vif):
- msg = _("Cannot delete the port %(port)s as node "
- "%(node)s is active or has "
- "instance UUID assigned or port is bound "
- "to vif %(vif)s")
- raise exception.InvalidState(msg % {'node': node.uuid,
- 'port': port.uuid,
- 'vif': vif})
+ vif, vif_use = utils.get_attached_vif(port)
+ if vif and not task.node.maintenance:
+ msg = _("Cannot delete the port %(port)s as it is bound "
+ "to VIF %(vif)s for %(use)s use.")
+ raise exception.InvalidState(
+ msg % {'port': port.uuid,
+ 'vif': vif,
+ 'use': vif_use})
port.destroy()
LOG.info('Successfully deleted port %(port)s. '
'The node associated with the port was %(node)s',
@@ -2982,15 +3006,13 @@ class ConductorManager(base_manager.BaseConductorManager):
@periodics.periodic(
spacing=CONF.conductor.check_provision_state_interval,
enabled=CONF.conductor.check_provision_state_interval > 0
- and CONF.conductor.inspect_wait_timeout != 0)
+ and CONF.conductor.inspect_wait_timeout > 0)
def _check_inspect_wait_timeouts(self, context):
"""Periodically checks inspect_wait_timeout and fails upon reaching it.
:param context: request context
"""
- # FIXME(rloo): If the value is < 0, it will be enabled. That doesn't
- # seem right.
callback_timeout = CONF.conductor.inspect_wait_timeout
filters = {'reserved': False,
@@ -3074,6 +3096,7 @@ class ConductorManager(base_manager.BaseConductorManager):
return raid_iface.get_logical_disk_properties()
@METRICS.timer('ConductorManager.heartbeat')
+ @messaging.expected_exceptions(exception.InvalidParameterValue)
@messaging.expected_exceptions(exception.NoFreeConductorWorker)
def heartbeat(self, context, node_id, callback_url, agent_version=None,
agent_token=None):
@@ -3089,6 +3112,7 @@ class ConductorManager(base_manager.BaseConductorManager):
agent_version, in these cases assume agent v3.0.0 (the last release
before sending agent_version was introduced).
:param callback_url: URL to reach back to the ramdisk.
+ :param agent_token: randomly generated validation token.
:raises: NoFreeConductorWorker if there are no conductors to process
this heartbeat request.
"""
@@ -3661,7 +3685,8 @@ def do_sync_power_state(task, count):
handle_sync_power_state_max_retries_exceeded(task, power_state)
return count
- if CONF.conductor.force_power_state_during_sync:
+ if (CONF.conductor.force_power_state_during_sync
+ and task.driver.power.supports_power_sync(task)):
LOG.warning("During sync_power_state, node %(node)s state "
"'%(actual)s' does not match expected state. "
"Changing hardware state to '%(state)s'.",
diff --git a/ironic/conductor/rpcapi.py b/ironic/conductor/rpcapi.py
index 4b1929b02..8752914ac 100644
--- a/ironic/conductor/rpcapi.py
+++ b/ironic/conductor/rpcapi.py
@@ -905,7 +905,9 @@ class ConductorAPI(object):
:param node_id: node ID or UUID.
:param callback_url: URL to reach back to the ramdisk.
:param topic: RPC topic. Defaults to self.topic.
+ :param agent_token: randomly generated validation token.
:param agent_version: the version of the agent that is heartbeating
+ :raises: InvalidParameterValue if an invalid agent token is received.
"""
new_kws = {}
version = '1.34'
diff --git a/ironic/conductor/task_manager.py b/ironic/conductor/task_manager.py
index 5fb42f247..b4089d618 100644
--- a/ironic/conductor/task_manager.py
+++ b/ironic/conductor/task_manager.py
@@ -253,6 +253,10 @@ class TaskManager(object):
self.fsm.initialize(start_state=self.node.provision_state,
target_state=self.node.target_provision_state)
+ def load_driver(self):
+ if self.driver is None:
+ self.driver = driver_factory.build_driver_for_task(self)
+
def _lock(self):
self._debug_timer.restart()
@@ -332,6 +336,19 @@ class TaskManager(object):
self._on_error_args = args
self._on_error_kwargs = kwargs
+ def downgrade_lock(self):
+ """Downgrade the lock to a shared one."""
+ if self.node is None:
+ raise RuntimeError("Cannot downgrade an already released lock")
+
+ if not self.shared:
+ objects.Node.release(self.context, CONF.host, self.node.id)
+ self.shared = True
+ self.node.refresh()
+ LOG.debug("Successfully downgraded lock for %(purpose)s "
+ "on node %(node)s",
+ {'purpose': self._purpose, 'node': self.node.uuid})
+
def release_resources(self):
"""Unlock a node and release resources.
diff --git a/ironic/conductor/utils.py b/ironic/conductor/utils.py
index 836409dad..b30fdee5e 100644
--- a/ironic/conductor/utils.py
+++ b/ironic/conductor/utils.py
@@ -284,6 +284,13 @@ def node_power_action(task, new_state, timeout=None):
driver_internal_info = node.driver_internal_info
driver_internal_info['last_power_state_change'] = str(
timeutils.utcnow().isoformat())
+ # NOTE(dtantsur): wipe token on shutting down, otherwise a reboot in
+ # fast-track (or an accidentally booted agent) will cause subsequent
+ # actions to fail.
+ if target_state in (states.POWER_OFF, states.SOFT_POWER_OFF,
+ states.REBOOT, states.SOFT_REBOOT):
+ if not is_agent_token_pregenerated(node):
+ driver_internal_info.pop('agent_secret_token', False)
node.driver_internal_info = driver_internal_info
node.save()
@@ -444,12 +451,23 @@ def cleaning_error_handler(task, msg, tear_down_cleaning=True,
task.process_event('fail', target_state=target_state)
-def wipe_deploy_internal_info(node):
- """Remove temporary deployment fields from driver_internal_info."""
- info = node.driver_internal_info
+def wipe_token_and_url(task):
+ """Remove agent URL and token from the task."""
+ info = task.node.driver_internal_info
info.pop('agent_secret_token', None)
info.pop('agent_secret_token_pregenerated', None)
+ # Remove agent_url since it will be re-asserted
+ # upon the next deployment attempt.
+ info.pop('agent_url', None)
+ task.node.driver_internal_info = info
+
+
+def wipe_deploy_internal_info(task):
+ """Remove temporary deployment fields from driver_internal_info."""
+ if not fast_track_able(task):
+ wipe_token_and_url(task)
# Clear any leftover metadata about deployment.
+ info = task.node.driver_internal_info
info['deploy_steps'] = None
info.pop('agent_cached_deploy_steps', None)
info.pop('deploy_step_index', None)
@@ -457,10 +475,22 @@ def wipe_deploy_internal_info(node):
info.pop('deployment_polling', None)
info.pop('skip_current_deploy_step', None)
info.pop('steps_validated', None)
- # Remove agent_url since it will be re-asserted
- # upon the next deployment attempt.
- info.pop('agent_url', None)
- node.driver_internal_info = info
+ task.node.driver_internal_info = info
+
+
+def wipe_cleaning_internal_info(task):
+ """Remove temporary cleaning fields from driver_internal_info."""
+ if not fast_track_able(task):
+ wipe_token_and_url(task)
+ info = task.node.driver_internal_info
+ info['clean_steps'] = None
+ info.pop('agent_cached_clean_steps', None)
+ info.pop('clean_step_index', None)
+ info.pop('cleaning_reboot', None)
+ info.pop('cleaning_polling', None)
+ info.pop('skip_current_clean_step', None)
+ info.pop('steps_validated', None)
+ task.node.driver_internal_info = info
def deploying_error_handler(task, logmsg, errmsg=None, traceback=False,
@@ -503,7 +533,7 @@ def deploying_error_handler(task, logmsg, errmsg=None, traceback=False,
# Clear deploy step; we leave the list of deploy steps
# in node.driver_internal_info for debugging purposes.
node.deploy_step = {}
- wipe_deploy_internal_info(node)
+ wipe_deploy_internal_info(task)
if cleanup_err:
node.last_error = cleanup_err
@@ -938,6 +968,21 @@ def value_within_timeout(value, timeout):
return last_valid <= last
+def agent_is_alive(node, timeout=None):
+ """Check that the agent is likely alive.
+
+ The method then checks for the last agent heartbeat, and if it occured
+ within the timeout set by [deploy]fast_track_timeout, then agent is
+ presumed alive.
+
+ :param node: A node object.
+ :param timeout: Heartbeat timeout, defaults to `fast_track_timeout`.
+ """
+ return value_within_timeout(
+ node.driver_internal_info.get('agent_last_heartbeat'),
+ timeout or CONF.deploy.fast_track_timeout)
+
+
def is_fast_track(task):
"""Checks a fast track is available.
@@ -954,11 +999,23 @@ def is_fast_track(task):
:returns: True if the last heartbeat that was recorded was within
the [deploy]fast_track_timeout setting.
"""
- return (fast_track_able(task)
- and value_within_timeout(
- task.node.driver_internal_info.get('agent_last_heartbeat'),
- CONF.deploy.fast_track_timeout)
- and task.driver.power.get_power_state(task) == states.POWER_ON)
+ if (not fast_track_able(task)
+ or task.driver.power.get_power_state(task) != states.POWER_ON):
+ if task.node.last_error:
+ LOG.debug('Node %(node)s is not fast-track-able because it has '
+ 'an error: %(error)s',
+ {'node': task.node.uuid, 'error': task.node.last_error})
+ return False
+
+ if agent_is_alive(task.node):
+ return True
+ else:
+ LOG.debug('Node %(node)s should be fast-track-able, but the agent '
+ 'doesn\'t seem to be running. Last heartbeat: %(last)s',
+ {'node': task.node.uuid,
+ 'last': task.node.driver_internal_info.get(
+ 'agent_last_heartbeat')})
+ return False
def remove_agent_url(node):
@@ -1047,19 +1104,6 @@ def add_secret_token(node, pregenerated=False):
node.driver_internal_info = i_info
-def del_secret_token(node):
- """Deletes the IPA agent secret token.
-
- Removes the agent token secret from the driver_internal_info field
- from the Node object.
-
- :param node: Node object
- """
- i_info = node.driver_internal_info
- i_info.pop('agent_secret_token', None)
- node.driver_internal_info = i_info
-
-
def is_agent_token_present(node):
"""Determines if an agent token is present upon a node.
@@ -1144,3 +1188,33 @@ def hash_password(password=''):
:param value: Value to be hashed
"""
return crypt.crypt(password, make_salt())
+
+
+def get_attached_vif(port):
+ """Get any attached vif ID for the port
+
+ :param port: The port object upon which to check for a vif
+ record.
+ :returns: Returns a tuple of the vif if found and the use of
+ the vif in the form of a string, 'tenant', 'cleaning'
+ 'provisioning', 'rescuing'.
+ :raises: InvalidState exception upon finding a port with a
+ transient state vif on the port.
+ """
+
+ tenant_vif = port.internal_info.get('tenant_vif_port_id')
+ if tenant_vif:
+ return (tenant_vif, 'tenant')
+ clean_vif = port.internal_info.get('cleaning_vif_port_id')
+ if clean_vif:
+ return (clean_vif, 'cleaning')
+ prov_vif = port.internal_info.get('provisioning_vif_port_id')
+ if prov_vif:
+ return (prov_vif, 'provisioning')
+ rescue_vif = port.internal_info.get('rescuing_vif_port_id')
+ if rescue_vif:
+ return (rescue_vif, 'rescuing')
+ inspection_vif = port.internal_info.get('inspection_vif_port_id')
+ if inspection_vif:
+ return (inspection_vif, 'inspecting')
+ return (None, None)
diff --git a/ironic/conf/agent.py b/ironic/conf/agent.py
index f0cb7c820..8572a25bd 100644
--- a/ironic/conf/agent.py
+++ b/ironic/conf/agent.py
@@ -28,6 +28,7 @@ opts = [
'ramdisk.')),
cfg.IntOpt('memory_consumed_by_agent',
default=0,
+ mutable=True,
help=_('The memory size in MiB consumed by agent when it is '
'booted on a bare metal node. This is used for '
'checking if the image can be downloaded and deployed '
@@ -36,6 +37,7 @@ opts = [
'the agent ramdisk image.')),
cfg.BoolOpt('stream_raw_images',
default=True,
+ mutable=True,
help=_('Whether the agent ramdisk should stream raw images '
'directly onto the disk or not. By streaming raw '
'images directly onto the disk the agent ramdisk will '
@@ -63,6 +65,7 @@ opts = [
'failure')),
('never', _('never collect logs'))],
default='on_failure',
+ mutable=True,
help=_('Whether Ironic should collect the deployment logs on '
'deployment failure (on_failure), always or never.')),
cfg.StrOpt('deploy_logs_storage_backend',
@@ -70,20 +73,24 @@ opts = [
('swift', _('store the logs in Object Storage '
'service'))],
default='local',
+ mutable=True,
help=_('The name of the storage backend where the logs '
'will be stored.')),
cfg.StrOpt('deploy_logs_local_path',
default='/var/log/ironic/deploy',
+ mutable=True,
help=_('The path to the directory where the logs should be '
'stored, used when the deploy_logs_storage_backend '
'is configured to "local".')),
cfg.StrOpt('deploy_logs_swift_container',
default='ironic_deploy_logs_container',
+ mutable=True,
help=_('The name of the Swift container to store the logs, '
'used when the deploy_logs_storage_backend is '
'configured to "swift".')),
cfg.IntOpt('deploy_logs_swift_days_to_expire',
default=30,
+ mutable=True,
help=_('Number of days before a log object is marked as '
'expired in Swift. If None, the logs will be kept '
'forever or until manually deleted. Used when the '
@@ -96,6 +103,7 @@ opts = [
'from HTTP service served at conductor '
'nodes.'))],
default='swift',
+ mutable=True,
help=_('Specifies whether direct deploy interface should try '
'to use the image source directly or if ironic should '
'cache the image on the conductor and serve it from '
@@ -104,18 +112,24 @@ opts = [
'service.')),
cfg.IntOpt('command_timeout',
default=60,
- help=_('Timeout (in seconds) for IPA commands. '
- 'Please note, the bootloader installation command '
- 'to the agent is permitted a timeout of twice the '
- 'value set here as these are IO heavy operations '
- 'depending on the configuration of the instance.')),
+ mutable=True,
+ help=_('Timeout (in seconds) for IPA commands.')),
cfg.IntOpt('max_command_attempts',
default=3,
help=_('This is the maximum number of attempts that will be '
'done for IPA commands that fails due to network '
'problems.')),
+ cfg.IntOpt('command_wait_attempts',
+ default=100,
+ help=_('Number of attempts to check for asynchronous commands '
+ 'completion before timing out.')),
+ cfg.IntOpt('command_wait_interval',
+ default=6,
+ help=_('Number of seconds to wait for between checks for '
+ 'asynchronous commands completion.')),
cfg.IntOpt('neutron_agent_poll_interval',
default=2,
+ mutable=True,
help=_('The number of seconds Neutron agent will wait between '
'polling for device changes. This value should be '
'the same as CONF.AGENT.polling_interval in Neutron '
diff --git a/ironic/conf/api.py b/ironic/conf/api.py
index bda28d55a..80a30acfc 100644
--- a/ironic/conf/api.py
+++ b/ironic/conf/api.py
@@ -28,9 +28,11 @@ opts = [
help=_('The TCP port on which ironic-api listens.')),
cfg.IntOpt('max_limit',
default=1000,
+ mutable=True,
help=_('The maximum number of items returned in a single '
'response from a collection resource.')),
cfg.StrOpt('public_endpoint',
+ mutable=True,
help=_("Public URL to use when building the links to the API "
"resources (for example, \"https://ironic.rocks:6384\")."
" If None the links will be built using the request's "
@@ -57,10 +59,12 @@ opts = [
"to set URLs in responses to the SSL terminated one.")),
cfg.BoolOpt('restrict_lookup',
default=True,
+ mutable=True,
help=_('Whether to restrict the lookup API to only nodes '
'in certain states.')),
cfg.IntOpt('ramdisk_heartbeat_timeout',
default=300,
+ mutable=True,
help=_('Maximum interval (in seconds) for agent heartbeats.')),
]
diff --git a/ironic/conf/cinder.py b/ironic/conf/cinder.py
index a67cbd72a..cbe55ea0c 100644
--- a/ironic/conf/cinder.py
+++ b/ironic/conf/cinder.py
@@ -17,14 +17,6 @@ from ironic.common.i18n import _
from ironic.conf import auth
opts = [
- cfg.URIOpt('url',
- schemes=('http', 'https'),
- deprecated_for_removal=True,
- deprecated_reason=_('Use [cinder]/endpoint_override option '
- 'to set a specific cinder API URL to '
- 'connect to.'),
- help=_('URL for connecting to cinder. If set, the value must '
- 'start with either http:// or https://.')),
cfg.IntOpt('retries',
default=3,
help=_('Client retries in the case of a failed request '
diff --git a/ironic/conf/conductor.py b/ironic/conf/conductor.py
index da98678a6..494317f8d 100644
--- a/ironic/conf/conductor.py
+++ b/ironic/conf/conductor.py
@@ -30,22 +30,12 @@ opts = [
cfg.IntOpt('heartbeat_interval',
default=10,
help=_('Seconds between conductor heart beats.')),
- cfg.URIOpt('api_url',
- schemes=('http', 'https'),
- deprecated_for_removal=True,
- deprecated_reason=_("Use [service_catalog]endpoint_override "
- "option instead if required to use "
- "a specific ironic api address, "
- "for example in noauth mode."),
- help=_('URL of Ironic API service. If not set ironic can '
- 'get the current value from the keystone service '
- 'catalog. If set, the value must start with either '
- 'http:// or https://.')),
cfg.IntOpt('heartbeat_timeout',
default=60,
# We're using timedelta which can overflow if somebody sets this
# too high, so limit to a sane value of 10 years.
max=315576000,
+ mutable=True,
help=_('Maximum time (in seconds) since the last check-in '
'of a conductor. A conductor is considered inactive '
'when this time has been exceeded.')),
@@ -70,10 +60,12 @@ opts = [
'in seconds. Set to 0 to disable checks.')),
cfg.IntOpt('deploy_callback_timeout',
default=1800,
+ min=0,
help=_('Timeout (seconds) to wait for a callback from '
'a deploy ramdisk. Set to 0 to disable timeout.')),
cfg.BoolOpt('force_power_state_during_sync',
default=True,
+ mutable=True,
help=_('During sync_power_state, should the hardware power '
'state be set to the state recorded in the database '
'(True) or should the database be updated based on '
@@ -158,10 +150,12 @@ opts = [
'configdrive_use_object_store is True.')),
cfg.IntOpt('inspect_wait_timeout',
default=1800,
+ min=0,
help=_('Timeout (seconds) for waiting for node inspection. '
'0 - unlimited.')),
cfg.BoolOpt('automated_clean',
default=True,
+ mutable=True,
help=_('Enables or disables automated cleaning. Automated '
'cleaning is a configurable set of steps, '
'such as erasing disk drives, that are performed on '
@@ -192,6 +186,7 @@ opts = [
'maintenance will make the process continue.')),
cfg.IntOpt('clean_callback_timeout',
default=1800,
+ min=0,
help=_('Timeout (seconds) to wait for a callback from the '
'ramdisk doing the cleaning. If the timeout is reached '
'the node will be put in the "clean failed" provision '
@@ -206,10 +201,12 @@ opts = [
cfg.IntOpt('soft_power_off_timeout',
default=600,
min=1,
+ mutable=True,
help=_('Timeout (in seconds) of soft reboot and soft power '
'off operation. This value always has to be positive.')),
cfg.IntOpt('power_state_change_timeout',
min=2, default=60,
+ mutable=True,
help=_('Number of seconds to wait for power operations to '
'complete, i.e., so that a baremetal node is in the '
'desired power state. If timed out, the power operation '
@@ -255,11 +252,13 @@ opts = [
cfg.StrOpt('rescue_password_hash_algorithm',
default='sha256',
choices=['sha256', 'sha512'],
+ mutable=True,
help=_('Password hash algorithm to be used for the rescue '
'password.')),
cfg.BoolOpt('require_rescue_password_hashed',
# TODO(TheJulia): Change this to True in Victoria.
default=False,
+ mutable=True,
help=_('Option to cause the conductor to not fallback to '
'an un-hashed version of the rescue password, '
'permitting rescue with older ironic-python-agent '
diff --git a/ironic/conf/console.py b/ironic/conf/console.py
index 566f3bf1f..bb3d67cca 100644
--- a/ironic/conf/console.py
+++ b/ironic/conf/console.py
@@ -47,8 +47,8 @@ opts = [
'to start.')),
cfg.IntOpt('kill_timeout',
default=1,
- help=_('Time (in seconds) to wait for the shellinabox console '
- 'subprocess to exit before sending SIGKILL signal.')),
+ help=_('Time (in seconds) to wait for the console subprocess '
+ 'to exit before sending SIGKILL signal.')),
cfg.IPOpt('socat_address',
default='$my_ip',
help=_('IP address of Socat service running on the host of '
diff --git a/ironic/conf/default.py b/ironic/conf/default.py
index 9621c2db5..b799208f0 100644
--- a/ironic/conf/default.py
+++ b/ironic/conf/default.py
@@ -17,6 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import hashlib
import os
import socket
import tempfile
@@ -58,10 +59,15 @@ api_opts = [
default='keystone',
choices=[('noauth', _('no authentication')),
('keystone', _('use the Identity service for '
- 'authentication'))],
+ 'authentication')),
+ ('http_basic', _('HTTP basic authentication'))],
help=_('Authentication strategy used by ironic-api. "noauth" should '
'not be used in a production environment because all '
'authentication will be disabled.')),
+ cfg.StrOpt('http_basic_auth_user_file',
+ default='/etc/ironic/htpasswd',
+ help=_('Path to Apache format user authentication file used '
+ 'when auth_strategy=http_basic')),
cfg.BoolOpt('debug_tracebacks_in_api',
default=False,
help=_('Return server tracebacks in the API response for any '
@@ -72,6 +78,7 @@ api_opts = [
help=_('Enable pecan debug mode. WARNING: this is insecure '
'and should not be used in a production environment.')),
cfg.StrOpt('default_resource_class',
+ mutable=True,
help=_('Resource class to use for new nodes when no resource '
'class is provided in the creation request.')),
]
@@ -152,18 +159,9 @@ driver_opts = [
]
exc_log_opts = [
- cfg.BoolOpt('fatal_exception_format_errors',
- default=False,
- help=_('Used if there is a formatting error when generating '
- 'an exception message (a programming error). If True, '
- 'raise an exception; if False, use the unformatted '
- 'message.'),
- deprecated_for_removal=True,
- deprecated_reason=_('Same option in the ironic_lib section '
- 'should be used instead.')),
cfg.IntOpt('log_in_db_max_size', default=4096,
help=_('Max number of characters of any node '
- 'last_error/maintenance_reason pushed to database.')),
+ 'last_error/maintenance_reason pushed to database.'))
]
hash_opts = [
@@ -186,11 +184,21 @@ hash_opts = [
help=_('Time (in seconds) after which the hash ring is '
'considered outdated and is refreshed on the next '
'access.')),
+ cfg.StrOpt('hash_ring_algorithm',
+ default='md5',
+ advanced=True,
+ choices=hashlib.algorithms_guaranteed,
+ help=_('Hash function to use when building the hash ring. '
+ 'If running on a FIPS system, do not use md5. '
+ 'WARNING: all ironic services in a cluster MUST use '
+ 'the same algorithm at all times. Changing the '
+ 'algorithm requires an offline update.')),
]
image_opts = [
cfg.BoolOpt('force_raw_images',
default=True,
+ mutable=True,
help=_('If True, convert backing images to "raw" disk image '
'format.')),
cfg.StrOpt('isolinux_bin',
@@ -231,6 +239,7 @@ image_opts = [
img_cache_opts = [
cfg.BoolOpt('parallel_image_downloads',
default=False,
+ mutable=True,
help=_('Run image downloads and raw format conversions in '
'parallel.')),
]
@@ -304,6 +313,7 @@ path_opts = [
portgroup_opts = [
cfg.StrOpt(
'default_portgroup_mode', default='active-backup',
+ mutable=True,
help=_(
'Default mode for portgroups. Allowed values can be found in the '
'linux kernel documentation on bonding: '
@@ -340,6 +350,7 @@ service_opts = [
'conductor and API services')),
cfg.BoolOpt('require_agent_token',
default=False,
+ mutable=True,
help=_('Used to require the use of agent tokens. These '
'tokens are used to guard the api lookup endpoint and '
'conductor heartbeat processing logic to authenticate '
diff --git a/ironic/conf/deploy.py b/ironic/conf/deploy.py
index 500324f1c..cae1b123f 100644
--- a/ironic/conf/deploy.py
+++ b/ironic/conf/deploy.py
@@ -29,15 +29,18 @@ opts = [
help=_("ironic-conductor node's HTTP root path.")),
cfg.BoolOpt('enable_ata_secure_erase',
default=True,
+ mutable=True,
help=_('Whether to support the use of ATA Secure Erase '
'during the cleaning process. Defaults to True.')),
cfg.IntOpt('erase_devices_priority',
+ mutable=True,
help=_('Priority to run in-band erase devices via the Ironic '
'Python Agent ramdisk. If unset, will use the priority '
'set in the ramdisk (defaults to 10 for the '
'GenericHardwareManager). If set to 0, will not run '
'during cleaning.')),
cfg.IntOpt('erase_devices_metadata_priority',
+ mutable=True,
help=_('Priority to run in-band clean step that erases '
'metadata from devices, via the Ironic Python Agent '
'ramdisk. If unset, will use the priority set in the '
@@ -47,11 +50,13 @@ opts = [
cfg.IntOpt('shred_random_overwrite_iterations',
default=1,
min=0,
+ mutable=True,
help=_('During shred, overwrite all block devices N times with '
'random data. This is only used if a device could not '
'be ATA Secure Erased. Defaults to 1.')),
cfg.BoolOpt('shred_final_overwrite_with_zeros',
default=True,
+ mutable=True,
help=_("Whether to write zeros to a node's block devices "
"after writing random data. This will write zeros to "
"the device even when "
@@ -60,6 +65,7 @@ opts = [
"Secure Erased. Defaults to True.")),
cfg.BoolOpt('continue_if_disk_secure_erase_fails',
default=False,
+ mutable=True,
help=_('Defines what to do if an ATA secure erase operation '
'fails during cleaning in the Ironic Python Agent. '
'If False, the cleaning operation will fail and the '
@@ -69,18 +75,21 @@ opts = [
cfg.IntOpt('disk_erasure_concurrency',
default=1,
min=1,
+ mutable=True,
help=_('Defines the target pool size used by Ironic Python '
'Agent ramdisk to erase disk devices. The number of '
'threads created to erase disks will not exceed this '
'value or the number of disks to be erased.')),
cfg.BoolOpt('power_off_after_deploy_failure',
default=True,
+ mutable=True,
help=_('Whether to power off a node after deploy failure. '
'Defaults to True.')),
cfg.StrOpt('default_boot_option',
choices=[('netboot', _('boot from a network')),
('local', _('local boot'))],
default='local',
+ mutable=True,
help=_('Default boot option to use when no boot option is '
'requested in node\'s driver_info. Defaults to '
'"local". Prior to the Ussuri release, the default '
@@ -89,6 +98,7 @@ opts = [
choices=[(boot_modes.UEFI, _('UEFI boot mode')),
(boot_modes.LEGACY_BIOS, _('Legacy BIOS boot mode'))],
default=boot_modes.LEGACY_BIOS,
+ mutable=True,
help=_('Default boot mode to use when no boot mode is '
'requested in node\'s driver_info, capabilities or '
'in the `instance_info` configuration. Currently the '
@@ -103,6 +113,7 @@ opts = [
default=False,
deprecated_group='conductor',
deprecated_name='configdrive_use_swift',
+ mutable=True,
help=_('Whether to upload the config drive to object store. '
'Set this option to True to store config drive '
'in a swift endpoint.')),
@@ -115,6 +126,7 @@ opts = [
'instead of swift tempurls.')),
cfg.BoolOpt('fast_track',
default=False,
+ mutable=True,
help=_('Whether to allow deployment agents to perform lookup, '
'heartbeat operations during initial states of a '
'machine lifecycle and by-pass the normal setup '
@@ -127,6 +139,7 @@ opts = [
default=300,
min=0,
max=300,
+ mutable=True,
help=_('Seconds for which the last heartbeat event is to be '
'considered valid for the purpose of a fast '
'track sequence. This setting should generally be '
@@ -134,6 +147,17 @@ opts = [
'Test" and typical ramdisk start-up. This value should '
'not exceed the [api]ramdisk_heartbeat_timeout '
'setting.')),
+ cfg.BoolOpt('erase_skip_read_only',
+ default=False,
+ mutable=True,
+ help=_('If the ironic-python-agent should skip read-only '
+ 'devices when running the "erase_devices" clean step '
+ 'where block devices are zeroed out. This requires '
+ 'ironic-python-agent 6.0.0 or greater. By default '
+ 'a read-only device will cause non-metadata based '
+ 'cleaning operations to fail due to the possible '
+ 'operational security risk of data being retained '
+ 'between deployments of the bare metal node.')),
]
diff --git a/ironic/conf/ilo.py b/ironic/conf/ilo.py
index 177f31369..5baaf3d4a 100644
--- a/ironic/conf/ilo.py
+++ b/ironic/conf/ilo.py
@@ -72,7 +72,10 @@ opts = [
help=_('Interval (in seconds) between periodic erase-devices '
'status checks to determine whether the asynchronous '
'out-of-band erase-devices was successfully finished or '
- 'not.')),
+ 'not. On an average, a 300GB HDD with default pattern '
+ '"overwrite" would take approximately 9 hours and '
+ '300GB SSD with default pattern "block" would take '
+ 'approx. 30 seconds to complete sanitize disk erase.')),
cfg.StrOpt('ca_file',
help=_('CA certificate file to validate iLO.')),
cfg.StrOpt('default_boot_mode',
diff --git a/ironic/conf/ipmi.py b/ironic/conf/ipmi.py
index 9545bde17..0ef39b7b2 100644
--- a/ironic/conf/ipmi.py
+++ b/ironic/conf/ipmi.py
@@ -22,6 +22,7 @@ from ironic.common.i18n import _
opts = [
cfg.IntOpt('command_retry_timeout',
default=60,
+ mutable=True,
help=_('Maximum time in seconds to retry retryable IPMI '
'operations. (An operation is retryable, for '
'example, if the requested operation fails '
@@ -31,18 +32,29 @@ opts = [
'unresponsive BMCs.')),
cfg.IntOpt('min_command_interval',
default=5,
+ mutable=True,
help=_('Minimum time, in seconds, between IPMI operations '
'sent to a server. There is a risk with some hardware '
'that setting this too low may cause the BMC to crash. '
'Recommended setting is 5 seconds.')),
+ cfg.BoolOpt('use_ipmitool_retries',
+ default=False,
+ help=_('When set to True and the parameters are supported by '
+ 'ipmitool, the number of retries and the retry '
+ 'interval are passed to ipmitool as parameters, and '
+ 'ipmitool will do the retries. When set to False, '
+ 'ironic will retry the ipmitool commands. '
+ 'Recommended setting is False')),
cfg.BoolOpt('kill_on_timeout',
default=True,
+ mutable=True,
help=_('Kill `ipmitool` process invoked by ironic to read '
'node power state if `ipmitool` process does not exit '
'after `command_retry_timeout` timeout expires. '
'Recommended setting is True')),
cfg.BoolOpt('disable_boot_timeout',
default=True,
+ mutable=True,
help=_('Default timeout behavior whether ironic sends a raw '
'IPMI command to disable the 60 second timeout for '
'booting. Setting this option to False will NOT send '
@@ -51,10 +63,12 @@ opts = [
'option in node\'s \'driver_info\' field.')),
cfg.MultiStrOpt('additional_retryable_ipmi_errors',
default=[],
+ mutable=True,
help=_('Additional errors ipmitool may encounter, '
'specific to the environment it is run in.')),
cfg.BoolOpt('debug',
default=False,
+ mutable=True,
help=_('Enables all ipmi commands to be executed with an '
'additional debugging output. This is a separate '
'option as ipmitool can log a substantial amount '
diff --git a/ironic/conf/iscsi.py b/ironic/conf/iscsi.py
index 8b1a258e0..5e977e72c 100644
--- a/ironic/conf/iscsi.py
+++ b/ironic/conf/iscsi.py
@@ -21,9 +21,11 @@ from ironic.common.i18n import _
opts = [
cfg.PortOpt('portal_port',
default=3260,
+ mutable=True,
help=_('The port number on which the iSCSI portal listens '
'for incoming connections.')),
cfg.StrOpt('conv_flags',
+ mutable=True,
help=_('Flags that need to be sent to the dd command, '
'to control the conversion of the original file '
'when copying to the host. It can contain several '
@@ -31,6 +33,7 @@ opts = [
cfg.IntOpt('verify_attempts',
default=3,
min=1,
+ mutable=True,
help=_('Maximum attempts to verify an iSCSI connection is '
'active, sleeping 1 second between attempts. Defaults '
'to 3.')),
diff --git a/ironic/conf/json_rpc.py b/ironic/conf/json_rpc.py
index f11bb457f..3fdff21f4 100644
--- a/ironic/conf/json_rpc.py
+++ b/ironic/conf/json_rpc.py
@@ -19,9 +19,14 @@ opts = [
cfg.StrOpt('auth_strategy',
choices=[('noauth', _('no authentication')),
('keystone', _('use the Identity service for '
- 'authentication'))],
+ 'authentication')),
+ ('http_basic', _('HTTP basic authentication'))],
help=_('Authentication strategy used by JSON RPC. Defaults to '
'the global auth_strategy setting.')),
+ cfg.StrOpt('http_basic_auth_user_file',
+ default='/etc/ironic/htpasswd-json-rpc',
+ help=_('Path to Apache format user authentication file used '
+ 'when auth_strategy=http_basic')),
cfg.HostAddressOpt('host_ip',
default='::',
help=_('The IP address or hostname on which JSON RPC '
@@ -32,12 +37,24 @@ opts = [
cfg.BoolOpt('use_ssl',
default=False,
help=_('Whether to use TLS for JSON RPC')),
+ cfg.StrOpt('http_basic_username',
+ deprecated_for_removal=True,
+ deprecated_reason=_("Use username instead"),
+ help=_("Name of the user to use for HTTP Basic authentication "
+ "client requests.")),
+ cfg.StrOpt('http_basic_password',
+ deprecated_for_removal=True,
+ deprecated_reason=_("Use password instead"),
+ secret=True,
+ help=_("Password to use for HTTP Basic authentication "
+ "client requests.")),
]
def register_opts(conf):
conf.register_opts(opts, group='json_rpc')
auth.register_auth_opts(conf, 'json_rpc')
+ conf.set_default('timeout', 120, group='json_rpc')
def list_opts():
diff --git a/ironic/conf/neutron.py b/ironic/conf/neutron.py
index 377599636..d90395f52 100644
--- a/ironic/conf/neutron.py
+++ b/ironic/conf/neutron.py
@@ -23,12 +23,15 @@ opts = [
cfg.IntOpt('port_setup_delay',
default=0,
min=0,
+ mutable=True,
help=_('Delay value to wait for Neutron agents to setup '
'sufficient DHCP configuration for port.')),
cfg.IntOpt('retries',
default=3,
+ mutable=True,
help=_('Client retries in the case of a failed request.')),
cfg.StrOpt('cleaning_network',
+ mutable=True,
help=_('Neutron network UUID or name for the ramdisk to be '
'booted into for cleaning nodes. Required for "neutron" '
'network interface. It is also required if cleaning '
@@ -37,6 +40,7 @@ opts = [
'unique among all networks or cleaning will fail.'),
deprecated_name='cleaning_network_uuid'),
cfg.StrOpt('provisioning_network',
+ mutable=True,
help=_('Neutron network UUID or name for the ramdisk to be '
'booted into for provisioning nodes. Required for '
'"neutron" network interface. If a name is provided, '
@@ -45,6 +49,7 @@ opts = [
deprecated_name='provisioning_network_uuid'),
cfg.ListOpt('provisioning_network_security_groups',
default=[],
+ mutable=True,
help=_('List of Neutron Security Group UUIDs to be '
'applied during provisioning of the nodes. '
'Optional for the "neutron" network interface and not '
@@ -53,6 +58,7 @@ opts = [
'is used.')),
cfg.ListOpt('cleaning_network_security_groups',
default=[],
+ mutable=True,
help=_('List of Neutron Security Group UUIDs to be '
'applied during cleaning of the nodes. '
'Optional for the "neutron" network interface and not '
@@ -60,6 +66,7 @@ opts = [
'If not specified, default security group '
'is used.')),
cfg.StrOpt('rescuing_network',
+ mutable=True,
help=_('Neutron network UUID or name for booting the ramdisk '
'for rescue mode. This is not the network that the '
'rescue ramdisk will use post-boot -- the tenant '
@@ -70,6 +77,7 @@ opts = [
'among all networks or rescue will fail.')),
cfg.ListOpt('rescuing_network_security_groups',
default=[],
+ mutable=True,
help=_('List of Neutron Security Group UUIDs to be applied '
'during the node rescue process. Optional for the '
'"neutron" network interface and not used for the '
@@ -77,6 +85,7 @@ opts = [
'specified, the default security group is used.')),
cfg.IntOpt('request_timeout',
default=45,
+ mutable=True,
help=_('Timeout for request processing when interacting '
'with Neutron. This value should be increased if '
'neutron port action timeouts are observed as neutron '
@@ -85,18 +94,21 @@ opts = [
'client/server interactions.')),
cfg.BoolOpt('add_all_ports',
default=False,
+ mutable=True,
help=_('Option to enable transmission of all ports '
'to neutron when creating ports for provisioning, '
'cleaning, or rescue. This is done without IP '
'addresses assigned to the port, and may be useful '
'in some bonded network configurations.')),
cfg.StrOpt('inspection_network',
+ mutable=True,
help=_('Neutron network UUID or name for the ramdisk to be '
'booted into for in-band inspection of nodes. '
'If a name is provided, it must be unique among all '
'networks or inspection will fail.')),
cfg.ListOpt('inspection_network_security_groups',
default=[],
+ mutable=True,
help=_('List of Neutron Security Group UUIDs to be applied '
'during the node inspection process. Optional for the '
'"neutron" network interface and not used for the '
@@ -104,6 +116,7 @@ opts = [
'specified, the default security group is used.')),
cfg.IntOpt('dhcpv6_stateful_address_count',
default=4,
+ mutable=True,
help=_('Number of IPv6 addresses to allocate for ports created '
'for provisioning, cleaning, rescue or inspection on '
'DHCPv6-stateful networks. Different stages of the '
diff --git a/ironic/conf/nova.py b/ironic/conf/nova.py
index 9fc8c1c52..0ec1f2a77 100644
--- a/ironic/conf/nova.py
+++ b/ironic/conf/nova.py
@@ -18,6 +18,7 @@ from ironic.conf import auth
opts = [
cfg.BoolOpt('send_power_notifications',
default=True,
+ mutable=True,
help=_('When set to True, it will enable the support '
'for power state change callbacks to nova. This '
'option should be set to False in deployments '
diff --git a/ironic/conf/opts.py b/ironic/conf/opts.py
index 61e6fe895..2c4ec2d04 100644
--- a/ironic/conf/opts.py
+++ b/ironic/conf/opts.py
@@ -88,7 +88,9 @@ def update_opt_defaults():
'amqp=WARNING',
'amqplib=WARNING',
'qpid.messaging=INFO',
+ # This comes in two flavors
'oslo.messaging=INFO',
+ 'oslo_messaging=INFO',
'sqlalchemy=WARNING',
'stevedore=INFO',
'eventlet.wsgi.server=INFO',
diff --git a/ironic/conf/pxe.py b/ironic/conf/pxe.py
index a54beefa6..2ddf13e76 100644
--- a/ironic/conf/pxe.py
+++ b/ironic/conf/pxe.py
@@ -23,9 +23,11 @@ from ironic.common.i18n import _
opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
+ mutable=True,
help=_('Additional append parameters for baremetal PXE boot.')),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
+ mutable=True,
help=_('Default file system format for ephemeral partition, '
'if one is created.')),
cfg.StrOpt('images_path',
@@ -50,16 +52,26 @@ opts = [
cfg.StrOpt('pxe_config_template',
default=os.path.join(
'$pybasedir', 'drivers/modules/pxe_config.template'),
+ mutable=True,
help=_('On ironic-conductor node, template file for PXE '
- 'configuration.')),
+ 'loader configuration.')),
+ cfg.StrOpt('ipxe_config_template',
+ default=os.path.join(
+ '$pybasedir', 'drivers/modules/ipxe_config.template'),
+ mutable=True,
+ help=_('On ironic-conductor node, template file for iPXE '
+ 'operations.')),
cfg.StrOpt('uefi_pxe_config_template',
default=os.path.join(
'$pybasedir',
'drivers/modules/pxe_grub_config.template'),
+ mutable=True,
help=_('On ironic-conductor node, template file for PXE '
- 'configuration for UEFI boot loader.')),
+ 'configuration for UEFI boot loader. Generally this '
+ 'is used for GRUB specific templates.')),
cfg.DictOpt('pxe_config_template_by_arch',
default={},
+ mutable=True,
help=_('On ironic-conductor node, template file for PXE '
'configuration per node architecture. '
'For example: '
@@ -102,10 +114,22 @@ opts = [
cfg.StrOpt('uefi_pxe_bootfile_name',
default='bootx64.efi',
help=_('Bootfile DHCP parameter for UEFI boot mode.')),
+ cfg.StrOpt('ipxe_bootfile_name',
+ default='undionly.kpxe',
+ help=_('Bootfile DHCP parameter.')),
+ cfg.StrOpt('uefi_ipxe_bootfile_name',
+ default='ipxe.efi',
+ help=_('Bootfile DHCP parameter for UEFI boot mode. If you '
+ 'experience problems with booting using it, try '
+ 'snponly.efi.')),
cfg.DictOpt('pxe_bootfile_name_by_arch',
default={},
help=_('Bootfile DHCP parameter per node architecture. '
'For example: aarch64:grubaa64.efi')),
+ cfg.DictOpt('ipxe_bootfile_name_by_arch',
+ default={},
+ help=_('Bootfile DHCP parameter per node architecture. '
+ 'For example: aarch64:ipxe_aa64.efi')),
cfg.StrOpt('ipxe_boot_script',
default=os.path.join(
'$pybasedir', 'drivers/modules/boot.ipxe'),
@@ -129,10 +153,12 @@ opts = [
default='4',
choices=[('4', _('IPv4')),
('6', _('IPv6'))],
+ mutable=True,
help=_('The IP version that will be used for PXE booting. '
'Defaults to 4. EXPERIMENTAL')),
cfg.BoolOpt('ipxe_use_swift',
default=False,
+ mutable=True,
help=_("Download deploy and rescue images directly from swift "
"using temporary URLs. "
"If set to false (default), images are downloaded "
@@ -140,6 +166,15 @@ opts = [
"local HTTP server. "
"Applicable only when 'ipxe' compatible boot interface "
"is used.")),
+ cfg.BoolOpt('enable_netboot_fallback',
+ default=False,
+ mutable=True,
+ help=_('If True, generate a PXE environment even for nodes '
+ 'that use local boot. This is useful when the driver '
+ 'cannot switch nodes to local boot, e.g. with SNMP '
+ 'or with Redfish on machines that cannot do persistent '
+ 'boot. Mostly useful for standalone ironic since '
+ 'Neutron will prevent incorrect PXE boot.')),
]
diff --git a/ironic/conf/redfish.py b/ironic/conf/redfish.py
index af1b06451..7c3eef31a 100644
--- a/ironic/conf/redfish.py
+++ b/ironic/conf/redfish.py
@@ -46,6 +46,7 @@ opts = [
help=_('Redfish HTTP client authentication method.')),
cfg.BoolOpt('use_swift',
default=True,
+ mutable=True,
help=_('Upload generated ISO images for virtual media boot to '
'Swift, then pass temporary URL to BMC for booting the '
'node. If set to false, images are placed on the '
@@ -53,21 +54,32 @@ opts = [
'local HTTP server.')),
cfg.StrOpt('swift_container',
default='ironic_redfish_container',
+ mutable=True,
help=_('The Swift container to store Redfish driver data. '
'Applies only when `use_swift` is enabled.')),
cfg.IntOpt('swift_object_expiry_timeout',
default=900,
+ mutable=True,
help=_('Amount of time in seconds for Swift objects to '
'auto-expire. Applies only when `use_swift` is '
'enabled.')),
cfg.StrOpt('kernel_append_params',
default='nofb nomodeset vga=normal',
+ mutable=True,
help=_('Additional kernel parameters to pass down to the '
'instance kernel. These parameters can be consumed by '
'the kernel or by the applications by reading '
'/proc/cmdline. Mind severe cmdline size limit! Can be '
'overridden by `instance_info/kernel_append_params` '
'property.')),
+ cfg.IntOpt('file_permission',
+ default=0o644,
+ help=_('File permission for swift-less image hosting with the '
+ 'octal permission representation of file access '
+ 'permissions. This setting defaults to ``644``, '
+ 'or as the octal number ``0o644`` in Python. '
+ 'This setting must be set to the octal number '
+ 'representation, meaning starting with ``0o``.')),
]
diff --git a/ironic/db/sqlalchemy/alembic/versions/cf1a80fdb352_add_node_network_data_field.py b/ironic/db/sqlalchemy/alembic/versions/cf1a80fdb352_add_node_network_data_field.py
new file mode 100644
index 000000000..cfd0e8edc
--- /dev/null
+++ b/ironic/db/sqlalchemy/alembic/versions/cf1a80fdb352_add_node_network_data_field.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add nodes.network_data field
+
+Revision ID: cf1a80fdb352
+Revises: b2ad35726bb0
+Create Date: 2020-03-20 22:41:14.163881
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = 'cf1a80fdb352'
+down_revision = 'b2ad35726bb0'
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('network_data', sa.Text(),
+ nullable=True))
diff --git a/ironic/db/sqlalchemy/api.py b/ironic/db/sqlalchemy/api.py
index e3f62335f..374898bdb 100644
--- a/ironic/db/sqlalchemy/api.py
+++ b/ironic/db/sqlalchemy/api.py
@@ -155,6 +155,13 @@ def add_port_filter_by_node_owner(query, value):
return query.filter(models.Node.owner == value)
+def add_port_filter_by_node_project(query, value):
+ query = query.join(models.Node,
+ models.Port.node_id == models.Node.id)
+ return query.filter((models.Node.owner == value)
+ | (models.Node.lessee == value))
+
+
def add_portgroup_filter(query, value):
"""Adds a portgroup-specific filter to a query.
@@ -564,7 +571,7 @@ class Connection(api.Connection):
@oslo_db_api.retry_on_deadlock
def destroy_node(self, node_id):
- with _session_for_write():
+ with _session_for_write() as session:
query = model_query(models.Node)
query = add_identity_filter(query, node_id)
@@ -573,6 +580,11 @@ class Connection(api.Connection):
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
+ # Orphan allocation, if any. On the API level this is only allowed
+ # with maintenance on.
+ node_ref.allocation_id = None
+ node_ref.save(session)
+
# Get node ID, if an UUID was supplied. The ID is
# required for deleting all ports, attached to the node.
if uuidutils.is_uuid_like(node_id):
@@ -682,38 +694,49 @@ class Connection(api.Connection):
except NoResultFound:
raise exception.PortNotFound(port=port_uuid)
- def get_port_by_address(self, address, owner=None):
+ def get_port_by_address(self, address, owner=None, project=None):
query = model_query(models.Port).filter_by(address=address)
if owner:
query = add_port_filter_by_node_owner(query, owner)
+ elif project:
+ query = add_port_filter_by_node_project(query, project)
try:
return query.one()
except NoResultFound:
raise exception.PortNotFound(port=address)
def get_port_list(self, limit=None, marker=None,
- sort_key=None, sort_dir=None, owner=None):
+ sort_key=None, sort_dir=None, owner=None,
+ project=None):
query = model_query(models.Port)
if owner:
query = add_port_filter_by_node_owner(query, owner)
+ elif project:
+ query = add_port_filter_by_node_project(query, project)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
def get_ports_by_node_id(self, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None, owner=None):
+ sort_key=None, sort_dir=None, owner=None,
+ project=None):
query = model_query(models.Port)
query = query.filter_by(node_id=node_id)
if owner:
query = add_port_filter_by_node_owner(query, owner)
+ elif project:
+ query = add_port_filter_by_node_project(query, project)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
def get_ports_by_portgroup_id(self, portgroup_id, limit=None, marker=None,
- sort_key=None, sort_dir=None, owner=None):
+ sort_key=None, sort_dir=None, owner=None,
+ project=None):
query = model_query(models.Port)
query = query.filter_by(portgroup_id=portgroup_id)
if owner:
query = add_port_filter_by_node_owner(query, owner)
+ elif project:
+ query = add_port_filter_by_node_project(query, project)
return _paginate_query(models.Port, limit, marker,
sort_key, sort_dir, query)
@@ -1515,12 +1538,12 @@ class Connection(api.Connection):
per-node trait limit.
"""
if num_traits > MAX_TRAITS_PER_NODE:
- msg = _("Could not modify traits for node %(node_id)s as it would "
- "exceed the maximum number of traits per node "
- "(%(num_traits)d vs. %(max_traits)d)")
- raise exception.InvalidParameterValue(
- msg, node_id=node_id, num_traits=num_traits,
- max_traits=MAX_TRAITS_PER_NODE)
+ msg = (_("Could not modify traits for node %(node_id)s as it "
+ "would exceed the maximum number of traits per node "
+ "(%(num_traits)d vs. %(max_traits)d)")
+ % {'node_id': node_id, 'num_traits': num_traits,
+ 'max_traits': MAX_TRAITS_PER_NODE})
+ raise exception.InvalidParameterValue(err=msg)
@oslo_db_api.retry_on_deadlock
def set_node_traits(self, node_id, traits, version):
diff --git a/ironic/db/sqlalchemy/models.py b/ironic/db/sqlalchemy/models.py
index fa56c2611..68b366f21 100644
--- a/ironic/db/sqlalchemy/models.py
+++ b/ironic/db/sqlalchemy/models.py
@@ -197,6 +197,7 @@ class Node(Base):
retired = Column(Boolean, nullable=True, default=False,
server_default=false())
retired_reason = Column(Text, nullable=True)
+ network_data = Column(db_types.JsonEncodedDict)
storage_interface = Column(String(255), nullable=True)
power_interface = Column(String(255), nullable=True)
vendor_interface = Column(String(255), nullable=True)
diff --git a/ironic/dhcp/neutron.py b/ironic/dhcp/neutron.py
index 4b0210cb3..372858742 100644
--- a/ironic/dhcp/neutron.py
+++ b/ironic/dhcp/neutron.py
@@ -19,7 +19,6 @@ import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo_log import log as logging
-from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
@@ -187,12 +186,18 @@ class NeutronDHCPApi(base.BaseDHCP):
ip_address = fixed_ips[0].get('ip_address', None)
if ip_address:
- if netutils.is_valid_ipv4(ip_address):
- return ip_address
- else:
- LOG.error("Neutron returned invalid IPv4 "
- "address %(ip_address)s on port %(port_uuid)s.",
- {'ip_address': ip_address, 'port_uuid': port_uuid})
+ try:
+ if ipaddress.ip_address(ip_address).version == 4:
+ return ip_address
+ else:
+ LOG.error("Neutron returned invalid IPv4 "
+ "address %(ip_address)s on port %(port_uuid)s.",
+ {'ip_address': ip_address,
+ 'port_uuid': port_uuid})
+ raise exception.InvalidIPv4Address(ip_address=ip_address)
+ except ValueError as exc:
+ LOG.error("An Invalid IP address was supplied and failed "
+ "basic validation: %s", exc)
raise exception.InvalidIPv4Address(ip_address=ip_address)
else:
LOG.error("No IP address assigned to Neutron port %s.",
diff --git a/ironic/drivers/base.py b/ironic/drivers/base.py
index 525342a6e..c33ad05b4 100644
--- a/ironic/drivers/base.py
+++ b/ironic/drivers/base.py
@@ -616,6 +616,18 @@ class PowerInterface(BaseInterface):
"""
return [states.POWER_ON, states.POWER_OFF, states.REBOOT]
+ def supports_power_sync(self, task):
+ """Check if power sync is supported for the given node.
+
+ If ``False``, the conductor will simply store whatever
+ ``get_power_state`` returns in the database instead of trying
+ to force the expected power state.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :returns: boolean, whether power sync is supported.
+ """
+ return True
+
class ConsoleInterface(BaseInterface):
"""Interface for console-related actions."""
@@ -1543,6 +1555,26 @@ class NetworkInterface(BaseInterface):
"""
return False
+ def get_node_network_data(self, task):
+ """Return network configuration for node NICs.
+
+ Gather L2 and L3 network settings from ironic port/portgroups
+ objects and underlying network provider, then put together
+ collected data in form of Nova network metadata (`network_data.json`)
+ dict.
+
+ Ironic would eventually pass network configuration to the node
+ being managed out-of-band.
+
+ :param task: A TaskManager instance.
+ :raises: InvalidParameterValue, if the network interface configuration
+ is invalid.
+ :raises: MissingParameterValue, if some parameters are missing.
+ :returns: a dict holding network configuration information adhearing
+ Nova network metadata layout (`network_data.json`).
+ """
+ return {}
+
class StorageInterface(BaseInterface, metaclass=abc.ABCMeta):
"""Base class for storage interfaces."""
diff --git a/ironic/drivers/drac.py b/ironic/drivers/drac.py
index 430105eae..453f17876 100644
--- a/ironic/drivers/drac.py
+++ b/ironic/drivers/drac.py
@@ -25,7 +25,6 @@ from ironic.drivers.modules.drac import management
from ironic.drivers.modules.drac import power
from ironic.drivers.modules.drac import raid
from ironic.drivers.modules.drac import vendor_passthru
-from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
@@ -69,13 +68,14 @@ class IDRACHardware(generic.GenericHardware):
# if it is enabled by an operator (implying that the service is
# installed).
return [drac_inspect.DracWSManInspect, drac_inspect.DracInspect,
- drac_inspect.DracRedfishInspect, inspector.Inspector,
- noop.NoInspect]
+ drac_inspect.DracRedfishInspect] + super(
+ IDRACHardware, self).supported_inspect_interfaces
@property
def supported_raid_interfaces(self):
"""List of supported raid interfaces."""
- return [raid.DracWSManRAID, raid.DracRAID, noop.NoRAID]
+ return [raid.DracWSManRAID, raid.DracRAID] + super(
+ IDRACHardware, self).supported_raid_interfaces
@property
def supported_vendor_interfaces(self):
diff --git a/ironic/drivers/generic.py b/ironic/drivers/generic.py
index 1e7a83c4b..599e1139c 100644
--- a/ironic/drivers/generic.py
+++ b/ironic/drivers/generic.py
@@ -18,6 +18,7 @@ Generic hardware types.
from ironic.drivers import hardware_type
from ironic.drivers.modules import agent
+from ironic.drivers.modules import agent_power
from ironic.drivers.modules.ansible import deploy as ansible_deploy
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
@@ -102,7 +103,7 @@ class ManualManagementHardware(GenericHardware):
@property
def supported_power_interfaces(self):
"""List of supported power interfaces."""
- return [fake.FakePower]
+ return [agent_power.AgentPower, fake.FakePower]
@property
def supported_vendor_interfaces(self):
diff --git a/ironic/drivers/hardware_type.py b/ironic/drivers/hardware_type.py
index 0ec71210e..df5f43782 100644
--- a/ironic/drivers/hardware_type.py
+++ b/ironic/drivers/hardware_type.py
@@ -42,19 +42,23 @@ class AbstractHardwareType(object, metaclass=abc.ABCMeta):
# Required hardware interfaces
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def supported_boot_interfaces(self):
"""List of supported boot interfaces."""
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def supported_deploy_interfaces(self):
"""List of supported deploy interfaces."""
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def supported_management_interfaces(self):
"""List of supported management interfaces."""
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def supported_power_interfaces(self):
"""List of supported power interfaces."""
diff --git a/ironic/drivers/ibmc.py b/ironic/drivers/ibmc.py
index 0f9ae5a5b..5f37d520e 100644
--- a/ironic/drivers/ibmc.py
+++ b/ironic/drivers/ibmc.py
@@ -18,6 +18,7 @@ CH121 V5.
from ironic.drivers import generic
from ironic.drivers.modules.ibmc import management as ibmc_mgmt
from ironic.drivers.modules.ibmc import power as ibmc_power
+from ironic.drivers.modules.ibmc import raid as ibmc_raid
from ironic.drivers.modules.ibmc import vendor as ibmc_vendor
from ironic.drivers.modules import noop
@@ -39,3 +40,8 @@ class IBMCHardware(generic.GenericHardware):
def supported_vendor_interfaces(self):
"""List of supported vendor interfaces."""
return [ibmc_vendor.IBMCVendor, noop.NoVendor]
+
+ @property
+ def supported_raid_interfaces(self):
+ """List of supported raid interfaces."""
+ return [ibmc_raid.IbmcRAID, noop.NoRAID]
diff --git a/ironic/drivers/ilo.py b/ironic/drivers/ilo.py
index 87fb19cba..4b824fffc 100644
--- a/ironic/drivers/ilo.py
+++ b/ironic/drivers/ilo.py
@@ -24,7 +24,6 @@ from ironic.drivers.modules.ilo import management
from ironic.drivers.modules.ilo import power
from ironic.drivers.modules.ilo import raid
from ironic.drivers.modules.ilo import vendor
-from ironic.drivers.modules import inspector
from ironic.drivers.modules import noop
@@ -53,8 +52,8 @@ class IloHardware(generic.GenericHardware):
@property
def supported_inspect_interfaces(self):
"""List of supported inspect interfaces."""
- return [inspect.IloInspect, inspector.Inspector,
- noop.NoInspect]
+ return [inspect.IloInspect] + super(
+ IloHardware, self).supported_inspect_interfaces
@property
def supported_management_interfaces(self):
@@ -81,7 +80,8 @@ class Ilo5Hardware(IloHardware):
@property
def supported_raid_interfaces(self):
"""List of supported raid interfaces."""
- return [raid.Ilo5RAID, noop.NoRAID]
+ return [raid.Ilo5RAID] + super(
+ Ilo5Hardware, self).supported_raid_interfaces
@property
def supported_management_interfaces(self):
diff --git a/ironic/drivers/modules/agent.py b/ironic/drivers/modules/agent.py
index 9a9bb94b3..1466cb765 100644
--- a/ironic/drivers/modules/agent.py
+++ b/ironic/drivers/modules/agent.py
@@ -19,7 +19,6 @@ from oslo_log import log
from oslo_utils import excutils
from oslo_utils import units
-from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
@@ -60,6 +59,21 @@ OPTIONAL_PROPERTIES = {
'``image_https_proxy`` are not specified. Optional.'),
}
+_RAID_APPLY_CONFIGURATION_ARGSINFO = {
+ "raid_config": {
+ "description": "The RAID configuration to apply.",
+ "required": True,
+ },
+ "delete_existing": {
+ "description": (
+ "Setting this to 'True' indicates to delete existing RAID "
+ "configuration prior to creating the new configuration. "
+ "Default value is 'True'."
+ ),
+ "required": False,
+ }
+}
+
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(agent_base.VENDOR_PROPERTIES)
@@ -71,11 +85,12 @@ PARTITION_IMAGE_LABELS = ('kernel', 'ramdisk', 'root_gb', 'root_mb', 'swap_mb',
@METRICS.timer('check_image_size')
-def check_image_size(task, image_source):
+def check_image_size(task, image_source, image_disk_format=None):
"""Check if the requested image is larger than the ram size.
:param task: a TaskManager instance containing the node to act on.
:param image_source: href of the image.
+ :param image_disk_format: The disk format of the image if provided
:raises: InvalidParameterValue if size of the image is greater than
the available ram size.
"""
@@ -88,7 +103,8 @@ def check_image_size(task, image_source):
return
image_show = images.image_show(task.context, image_source)
- if CONF.agent.stream_raw_images and image_show.get('disk_format') == 'raw':
+ if CONF.agent.stream_raw_images and (image_show.get('disk_format') == 'raw'
+ or image_disk_format == 'raw'):
LOG.debug('Skip the image size check since the image is going to be '
'streamed directly onto the disk for node %s', node.uuid)
return
@@ -168,38 +184,14 @@ def validate_http_provisioning_configuration(node):
class AgentDeployMixin(agent_base.AgentDeployMixin):
- @METRICS.timer('AgentDeployMixin.deploy_has_started')
- def deploy_has_started(self, task):
- commands = self._client.get_commands_status(task.node)
-
- for command in commands:
- if command['command_name'] == 'prepare_image':
- # deploy did start at some point
- return True
- return False
-
- @METRICS.timer('AgentDeployMixin.deploy_is_done')
- def deploy_is_done(self, task):
- commands = self._client.get_commands_status(task.node)
- if not commands:
- return False
-
- last_command = commands[-1]
-
- if last_command['command_name'] != 'prepare_image':
- # catches race condition where prepare_image is still processing
- # so deploy hasn't started yet
- return False
-
- if last_command['command_status'] != 'RUNNING':
- return True
+ has_decomposed_deploy_steps = True
- return False
-
- @METRICS.timer('AgentDeployMixin.continue_deploy')
+ @METRICS.timer('AgentDeployMixin.write_image')
+ @base.deploy_step(priority=80)
@task_manager.require_exclusive_lock
- def continue_deploy(self, task):
- task.process_event('resume')
+ def write_image(self, task):
+ if not task.driver.storage.should_write_image(task):
+ return
node = task.node
image_source = node.instance_info.get('image_source')
LOG.debug('Continuing deploy for node %(node)s with image %(img)s',
@@ -253,11 +245,35 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
if disk_label is not None:
image_info['disk_label'] = disk_label
- # Tell the client to download and write the image with the given args
- self._client.prepare_image(node, image_info)
-
- task.process_event('wait')
-
+ has_write_image = agent_base.find_step(
+ task, 'deploy', 'deploy', 'write_image') is not None
+ if not has_write_image:
+ LOG.warning('The agent on node %s does not have the deploy '
+ 'step deploy.write_image, using the deprecated '
+ 'synchronous fall-back', task.node.uuid)
+
+ if self.has_decomposed_deploy_steps and has_write_image:
+ configdrive = node.instance_info.get('configdrive')
+ # Now switch into the corresponding in-band deploy step and let the
+ # result be polled normally.
+ new_step = {'interface': 'deploy',
+ 'step': 'write_image',
+ 'args': {'image_info': image_info,
+ 'configdrive': configdrive}}
+ return agent_base.execute_step(task, new_step, 'deploy',
+ client=self._client)
+ else:
+ # TODO(dtantsur): remove in W
+ command = self._client.prepare_image(node, image_info, wait=True)
+ if command['command_status'] == 'FAILED':
+ # TODO(jimrollenhagen) power off if using neutron dhcp to
+ # align with pxe driver?
+ msg = (_('node %(node)s command status errored: %(error)s') %
+ {'node': node.uuid, 'error': command['command_error']})
+ LOG.error(msg)
+ deploy_utils.set_failed_state(task, msg)
+
+ # TODO(dtantsur): remove in W
def _get_uuid_from_result(self, task, type_uuid):
command = self._client.get_commands_status(task.node)[-1]
@@ -279,29 +295,18 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
return
return result
- @METRICS.timer('AgentDeployMixin.check_deploy_success')
- def check_deploy_success(self, node):
- # should only ever be called after we've validated that
- # the prepare_image command is complete
- command = self._client.get_commands_status(node)[-1]
- if command['command_status'] == 'FAILED':
- return command['command_error']
-
- @METRICS.timer('AgentDeployMixin.reboot_to_instance')
- def reboot_to_instance(self, task):
- task.process_event('resume')
+ @METRICS.timer('AgentDeployMixin.prepare_instance_boot')
+ @base.deploy_step(priority=60)
+ @task_manager.require_exclusive_lock
+ def prepare_instance_boot(self, task):
+ if not task.driver.storage.should_write_image(task):
+ task.driver.boot.prepare_instance(task)
+ # Move straight to the final steps
+ return
+
node = task.node
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
cpu_arch = task.node.properties.get('cpu_arch')
- error = self.check_deploy_success(node)
- if error is not None:
- # TODO(jimrollenhagen) power off if using neutron dhcp to
- # align with pxe driver?
- msg = (_('node %(node)s command status errored: %(error)s') %
- {'node': node.uuid, 'error': error})
- LOG.error(msg)
- deploy_utils.set_failed_state(task, msg)
- return
# If `boot_option` is set to `netboot`, PXEBoot.prepare_instance()
# would need root_uuid of the whole disk image to add it into the
@@ -317,39 +322,47 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
# ppc64* hardware we need to provide the 'PReP_Boot_partition_uuid' to
# direct where the bootloader should be installed.
driver_internal_info = task.node.driver_internal_info
- root_uuid = self._get_uuid_from_result(task, 'root_uuid')
+ try:
+ partition_uuids = self._client.get_partition_uuids(node).get(
+ 'command_result') or {}
+ root_uuid = partition_uuids.get('root uuid')
+ except exception.AgentAPIError:
+ # TODO(dtantsur): remove in W
+ LOG.warning('Old ironic-python-agent detected, please update '
+ 'to Victoria or newer')
+ partition_uuids = None
+ root_uuid = self._get_uuid_from_result(task, 'root_uuid')
+
if root_uuid:
driver_internal_info['root_uuid_or_disk_id'] = root_uuid
task.node.driver_internal_info = driver_internal_info
task.node.save()
- elif iwdi and CONF.agent.manage_agent_boot:
- # IPA version less than 3.1.0 will not return root_uuid for
- # whole disk image. Also IPA version introduced a requirement
- # for hexdump utility that may not be always available. Need to
- # fall back to older behavior for the same.
- LOG.warning("With the deploy ramdisk based on Ironic Python Agent "
- "version 3.1.0 and beyond, the drivers using "
- "`direct` deploy interface performs `netboot` or "
- "`local` boot for whole disk image based on value "
- "of boot option setting. When you upgrade Ironic "
- "Python Agent in your deploy ramdisk, ensure that "
- "boot option is set appropriately for the node %s. "
- "The boot option can be set using configuration "
- "`[deploy]/default_boot_option` or as a `boot_option` "
- "capability in node's `properties['capabilities']`. "
- "Also please note that this functionality requires "
- "`hexdump` command in the ramdisk.", node.uuid)
+ elif not iwdi:
+ LOG.error('No root UUID returned from the ramdisk for node '
+ '%(node)s, the deploy will likely fail. Partition '
+ 'UUIDs are %(uuids)s',
+ {'node': node.uuid, 'uuid': partition_uuids})
efi_sys_uuid = None
if not iwdi:
if boot_mode_utils.get_boot_mode(node) == 'uefi':
- efi_sys_uuid = (self._get_uuid_from_result(task,
- 'efi_system_partition_uuid'))
+ # TODO(dtantsur): remove in W
+ if partition_uuids is None:
+ efi_sys_uuid = (self._get_uuid_from_result(task,
+ 'efi_system_partition_uuid'))
+ else:
+ efi_sys_uuid = partition_uuids.get(
+ 'efi system partition uuid')
prep_boot_part_uuid = None
if cpu_arch is not None and cpu_arch.startswith('ppc64'):
- prep_boot_part_uuid = (self._get_uuid_from_result(task,
- 'PReP_Boot_partition_uuid'))
+ # TODO(dtantsur): remove in W
+ if partition_uuids is None:
+ prep_boot_part_uuid = (self._get_uuid_from_result(task,
+ 'PReP_Boot_partition_uuid'))
+ else:
+ prep_boot_part_uuid = partition_uuids.get(
+ 'PReP Boot partition uuid')
LOG.info('Image successfully written to node %s', node.uuid)
@@ -369,11 +382,9 @@ class AgentDeployMixin(agent_base.AgentDeployMixin):
if CONF.agent.image_download_source == 'http':
deploy_utils.remove_http_instance_symlink(task.node.uuid)
- LOG.debug('Rebooting node %s to instance', node.uuid)
- self.reboot_and_finish_deploy(task)
-
-class AgentDeploy(AgentDeployMixin, base.DeployInterface):
+class AgentDeploy(AgentDeployMixin, agent_base.AgentBaseMixin,
+ base.DeployInterface):
"""Interface for deploy-related actions."""
def get_properties(self):
@@ -383,6 +394,10 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
"""
return COMMON_PROPERTIES
+ def should_manage_boot(self, task):
+ """Whether agent boot is managed by ironic."""
+ return CONF.agent.manage_agent_boot
+
@METRICS.timer('AgentDeploy.validate')
def validate(self, task):
"""Validate the driver-specific Node deployment info.
@@ -417,6 +432,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
params = {}
image_source = node.instance_info.get('image_source')
image_checksum = node.instance_info.get('image_checksum')
+ image_disk_format = node.instance_info.get('image_disk_format')
os_hash_algo = node.instance_info.get('image_os_hash_algo')
os_hash_value = node.instance_info.get('image_os_hash_value')
@@ -449,7 +465,7 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
validate_http_provisioning_configuration(node)
- check_image_size(task, image_source)
+ check_image_size(task, image_source, image_disk_format)
# Validate the root device hints
deploy_utils.get_root_device_for_deploy(node)
validate_image_proxies(node)
@@ -469,12 +485,10 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
:returns: status of the deploy. One of ironic.common.states.
"""
if manager_utils.is_fast_track(task):
+ # NOTE(mgoddard): For fast track we can skip this step and proceed
+ # immediately to the next deploy step.
LOG.debug('Performing a fast track deployment for %(node)s.',
{'node': task.node.uuid})
- # Update the database for the API and the task tracking resumes
- # the state machine state going from DEPLOYWAIT -> DEPLOYING
- task.process_event('wait')
- self.continue_deploy(task)
elif task.driver.storage.should_write_image(task):
# Check if the driver has already performed a reboot in a previous
# deploy step.
@@ -485,44 +499,6 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
task.node.driver_internal_info = info
task.node.save()
return states.DEPLOYWAIT
- else:
- # TODO(TheJulia): At some point, we should de-dupe this code
- # as it is nearly identical to the iscsi deploy interface.
- # This is not being done now as it is expected to be
- # refactored in the near future.
- manager_utils.node_power_action(task, states.POWER_OFF)
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- task.driver.boot.prepare_instance(task)
- manager_utils.node_power_action(task, states.POWER_ON)
- LOG.info('Deployment to node %s done', task.node.uuid)
- return None
-
- @METRICS.timer('AgentDeploy.tear_down')
- @task_manager.require_exclusive_lock
- def tear_down(self, task):
- """Tear down a previous deployment on the task's node.
-
- :param task: a TaskManager instance.
- :returns: status of the deploy. One of ironic.common.states.
- :raises: NetworkError if the cleaning ports cannot be removed.
- :raises: InvalidParameterValue when the wrong power state is specified
- or the wrong driver info is specified for power management.
- :raises: StorageError when the storage interface attached volumes fail
- to detach.
- :raises: other exceptions by the node's power driver if something
- wrong occurred during the power action.
- """
- manager_utils.node_power_action(task, states.POWER_OFF)
- task.driver.storage.detach_volumes(task)
- deploy_utils.tear_down_storage_configuration(task)
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.unconfigure_tenant_networks(task)
- # NOTE(mgoddard): If the deployment was unsuccessful the node may
- # have ports on the provisioning network which were not deleted.
- task.driver.network.remove_provisioning_network(task)
- return states.DELETED
@METRICS.timer('AgentDeploy.prepare')
@task_manager.require_exclusive_lock
@@ -649,46 +625,10 @@ class AgentDeploy(AgentDeployMixin, base.DeployInterface):
:param task: a TaskManager instance.
"""
- if CONF.agent.manage_agent_boot:
- task.driver.boot.clean_up_ramdisk(task)
- task.driver.boot.clean_up_instance(task)
- provider = dhcp_factory.DHCPFactory()
- provider.clean_dhcp(task)
+ super(AgentDeploy, self).clean_up(task)
if CONF.agent.image_download_source == 'http':
deploy_utils.destroy_http_instance_images(task.node)
- def take_over(self, task):
- """Take over management of this node from a dead conductor.
-
- :param task: a TaskManager instance.
- """
- pass
-
- @METRICS.timer('AgentDeploy.prepare_cleaning')
- def prepare_cleaning(self, task):
- """Boot into the agent to prepare for cleaning.
-
- :param task: a TaskManager object containing the node
- :raises: NodeCleaningFailure, NetworkError if the previous cleaning
- ports cannot be removed or if new cleaning ports cannot be created.
- :raises: InvalidParameterValue if cleaning network UUID config option
- has an invalid value.
- :returns: states.CLEANWAIT to signify an asynchronous prepare
- """
- return deploy_utils.prepare_inband_cleaning(
- task, manage_boot=CONF.agent.manage_agent_boot)
-
- @METRICS.timer('AgentDeploy.tear_down_cleaning')
- def tear_down_cleaning(self, task):
- """Clean up the PXE and DHCP files after cleaning.
-
- :param task: a TaskManager object containing the node
- :raises: NodeCleaningFailure, NetworkError if the cleaning ports cannot
- be removed
- """
- deploy_utils.tear_down_inband_cleaning(
- task, manage_boot=CONF.agent.manage_agent_boot)
-
class AgentRAID(base.RAIDInterface):
"""Implementation of RAIDInterface which uses agent ramdisk."""
@@ -697,6 +637,37 @@ class AgentRAID(base.RAIDInterface):
"""Return the properties of the interface."""
return {}
+ @METRICS.timer('AgentRAID.get_deploy_steps')
+ def get_deploy_steps(self, task):
+ """Get the list of deploy steps from the agent.
+
+ :param task: a TaskManager object containing the node
+ :raises InstanceDeployFailure: if the deploy steps are not yet
+ available (cached), for example, when a node has just been
+ enrolled and has not been deployed yet.
+ :returns: A list of deploy step dictionaries
+ """
+ return agent_base.get_steps(task, 'deploy', interface='raid')
+
+ @METRICS.timer('AgentRAID.apply_configuration')
+ @base.deploy_step(priority=0,
+ argsinfo=_RAID_APPLY_CONFIGURATION_ARGSINFO)
+ def apply_configuration(self, task, raid_config,
+ delete_existing=True):
+ """Applies RAID configuration on the given node.
+
+ :param task: A TaskManager instance.
+ :param raid_config: The RAID configuration to apply.
+ :param delete_existing: Setting this to True indicates to delete RAID
+ configuration prior to creating the new configuration.
+ :raises: InvalidParameterValue, if the RAID configuration is invalid.
+ :returns: states.DEPLOYWAIT if RAID configuration is in progress
+ asynchronously or None if it is complete.
+ """
+ self.validate_raid_config(task, raid_config)
+ step = task.node.deploy_step
+ return agent_base.execute_step(task, step, 'deploy')
+
@METRICS.timer('AgentRAID.create_configuration')
@base.clean_step(priority=0)
def create_configuration(self, task,
@@ -745,6 +716,8 @@ class AgentRAID(base.RAIDInterface):
@staticmethod
@agent_base.post_clean_step_hook(
interface='raid', step='create_configuration')
+ @agent_base.post_deploy_step_hook(
+ interface='raid', step='apply_configuration')
def _create_configuration_final(task, command):
"""Clean step hook after a RAID configuration was created.
@@ -762,15 +735,21 @@ class AgentRAID(base.RAIDInterface):
the 'command' argument passed.
"""
try:
- clean_result = command['command_result']['clean_result']
+ if task.node.provision_state == states.DEPLOYWAIT:
+ result = command['command_result']['deploy_result']
+ else:
+ result = command['command_result']['clean_result']
except KeyError:
+ result = None
+
+ if not result:
raise exception.IronicException(
_("Agent ramdisk didn't return a proper command result while "
- "cleaning %(node)s. It returned '%(result)s' after command "
- "execution.") % {'node': task.node.uuid,
- 'result': command})
+ "building RAID on %(node)s. It returned '%(result)s' after "
+ "command execution.") % {'node': task.node.uuid,
+ 'result': command})
- raid.update_raid_info(task.node, clean_result)
+ raid.update_raid_info(task.node, result)
@METRICS.timer('AgentRAID.delete_configuration')
@base.clean_step(priority=0)
diff --git a/ironic/drivers/modules/agent_base.py b/ironic/drivers/modules/agent_base.py
index 367802093..ee9753dcb 100644
--- a/ironic/drivers/modules/agent_base.py
+++ b/ironic/drivers/modules/agent_base.py
@@ -25,14 +25,17 @@ from oslo_utils import timeutils
import retrying
from ironic.common import boot_devices
+from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import image_service
from ironic.common import states
from ironic.common import utils
from ironic.conductor import steps as conductor_steps
+from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
+from ironic.drivers import base
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
@@ -69,7 +72,14 @@ VENDOR_PROPERTIES = {
'deploy_forces_oob_reboot': _(
'Whether Ironic should force a reboot of the Node via the out-of-band '
'channel after deployment is complete. Provides compatibility with '
- 'older deploy ramdisks. Defaults to False. Optional.')
+ 'older deploy ramdisks. Defaults to False. Optional.'),
+ 'agent_verify_ca': _(
+ 'Either a Boolean value, a path to a CA_BUNDLE file or directory with '
+ 'certificates of trusted CAs. If set to True ironic will verify '
+ 'the agent\'s certificate; if False the driver will ignore verifying '
+ 'the SSL certificate. If it\'s a path the driver will use the '
+ 'specified certificate or one of the certificates in the '
+ 'directory. Defaults to True. Optional'),
}
__HEARTBEAT_RECORD_ONLY = (states.ENROLL, states.MANAGEABLE, states.AVAILABLE,
@@ -333,6 +343,13 @@ def get_steps(task, step_type, interface=None, override_priorities=None):
return steps
+def find_step(task, step_type, interface, name):
+ """Find the given in-band step."""
+ steps = get_steps(task, step_type, interface)
+ return conductor_steps.find_step(
+ steps, {'interface': interface, 'step': name})
+
+
def _raise(step_type, msg):
assert step_type in ('clean', 'deploy')
exc = (exception.NodeCleaningFailure if step_type == 'clean'
@@ -340,18 +357,20 @@ def _raise(step_type, msg):
raise exc(msg)
-def execute_step(task, step, step_type):
+def execute_step(task, step, step_type, client=None):
"""Execute a clean or deploy step asynchronously on the agent.
:param task: a TaskManager object containing the node
:param step: a step dictionary to execute
:param step_type: 'clean' or 'deploy'
+ :param client: agent client (if available)
:raises: NodeCleaningFailure (clean step) or InstanceDeployFailure (deploy
step) if the agent does not return a command status.
:returns: states.CLEANWAIT/DEPLOYWAIT to signify the step will be
completed async
"""
- client = _get_client()
+ if client is None:
+ client = _get_client()
ports = objects.Port.list_by_node_id(
task.context, task.node.id)
call = getattr(client, 'execute_%s_step' % step_type)
@@ -359,8 +378,7 @@ def execute_step(task, step, step_type):
if not result.get('command_status'):
_raise(step_type, _(
'Agent on node %(node)s returned bad command result: '
- '%(result)s') % {'node': task.node.uuid,
- 'result': result.get('command_error')})
+ '%(result)s') % {'node': task.node.uuid, 'result': result})
return states.CLEANWAIT if step_type == 'clean' else states.DEPLOYWAIT
@@ -369,11 +387,33 @@ def execute_clean_step(task, step):
return execute_step(task, step, 'clean')
+def _step_failure_handler(task, msg, step_type):
+ driver_utils.collect_ramdisk_logs(
+ task.node, label='cleaning' if step_type == 'clean' else None)
+ if step_type == 'clean':
+ manager_utils.cleaning_error_handler(task, msg)
+ else:
+ manager_utils.deploying_error_handler(task, msg)
+
+
class HeartbeatMixin(object):
"""Mixin class implementing heartbeat processing."""
+ has_decomposed_deploy_steps = False
+ """Whether the driver supports decomposed deploy steps.
+
+ Previously (since Rocky), drivers used a single 'deploy' deploy step on
+ the deploy interface. Some additional steps were added for the 'direct'
+ and 'iscsi' deploy interfaces in the Ussuri cycle, which means that
+ more of the deployment flow is driven by deploy steps.
+ """
+
def __init__(self):
self._client = _get_client()
+ if not self.has_decomposed_deploy_steps:
+ LOG.warning('%s does not support decomposed deploy steps. This '
+ 'is deprecated and will stop working in a future '
+ 'release', self.__class__.__name__)
def continue_deploy(self, task):
"""Continues the deployment of baremetal node.
@@ -491,8 +531,12 @@ class HeartbeatMixin(object):
# are currently in the core deploy.deploy step. Other deploy steps
# may cause the agent to boot, but we should not trigger deployment
# at that point if the driver is polling for completion of a step.
- if self.in_core_deploy_step(task):
+ if (not self.has_decomposed_deploy_steps
+ and self.in_core_deploy_step(task)):
msg = _('Failed checking if deploy is done')
+ # NOTE(mgoddard): support backwards compatibility for
+ # drivers which do not implement continue_deploy and
+ # reboot_to_instance as deploy steps.
if not self.deploy_has_started(task):
msg = _('Node failed to deploy')
self.continue_deploy(task)
@@ -637,7 +681,7 @@ class HeartbeatMixin(object):
# handler.
fail_reason = (_('Agent returned bad result for command '
'finalize_rescue: %(result)s') %
- {'result': result.get('command_error')})
+ {'result': agent_client.get_command_error(result)})
raise exception.InstanceRescueFailure(node=node.uuid,
instance=node.instance_uuid,
reason=fail_reason)
@@ -648,7 +692,142 @@ class HeartbeatMixin(object):
task.process_event('done')
-class AgentDeployMixin(HeartbeatMixin):
+class AgentBaseMixin(object):
+ """Mixin with base methods not relying on any deploy steps."""
+
+ def should_manage_boot(self, task):
+ """Whether agent boot is managed by ironic."""
+ return True
+
+ @METRICS.timer('AgentBaseMixin.tear_down')
+ @task_manager.require_exclusive_lock
+ def tear_down(self, task):
+ """Tear down a previous deployment on the task's node.
+
+ Power off the node. All actual clean-up is done in the clean_up()
+ method which should be called separately.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :returns: deploy state DELETED.
+ :raises: NetworkError if the cleaning ports cannot be removed.
+ :raises: InvalidParameterValue when the wrong state is specified
+ or the wrong driver info is specified.
+ :raises: StorageError when volume detachment fails.
+ :raises: other exceptions by the node's power driver if something
+ wrong occurred during the power action.
+ """
+ manager_utils.node_power_action(task, states.POWER_OFF)
+ task.driver.storage.detach_volumes(task)
+ deploy_utils.tear_down_storage_configuration(task)
+ with manager_utils.power_state_for_network_configuration(task):
+ task.driver.network.unconfigure_tenant_networks(task)
+ # NOTE(mgoddard): If the deployment was unsuccessful the node may
+ # have ports on the provisioning network which were not deleted.
+ task.driver.network.remove_provisioning_network(task)
+ return states.DELETED
+
+ @METRICS.timer('AgentBaseMixin.clean_up')
+ def clean_up(self, task):
+ """Clean up the deployment environment for the task's node.
+
+ Unlinks TFTP and instance images and triggers image cache cleanup.
+ Removes the TFTP configuration files for this node.
+
+ :param task: a TaskManager instance containing the node to act on.
+ """
+ if self.should_manage_boot(task):
+ task.driver.boot.clean_up_ramdisk(task)
+ task.driver.boot.clean_up_instance(task)
+ provider = dhcp_factory.DHCPFactory()
+ provider.clean_dhcp(task)
+
+ def take_over(self, task):
+ """Take over management of this node from a dead conductor.
+
+ :param task: a TaskManager instance.
+ """
+ pass
+
+ @METRICS.timer('AgentDeployMixin.prepare_cleaning')
+ def prepare_cleaning(self, task):
+ """Boot into the agent to prepare for cleaning.
+
+ :param task: a TaskManager object containing the node
+ :raises: NodeCleaningFailure, NetworkError if the previous cleaning
+ ports cannot be removed or if new cleaning ports cannot be created.
+ :raises: InvalidParameterValue if cleaning network UUID config option
+ has an invalid value.
+ :returns: states.CLEANWAIT to signify an asynchronous prepare
+ """
+ return deploy_utils.prepare_inband_cleaning(
+ task, manage_boot=self.should_manage_boot(task))
+
+ @METRICS.timer('AgentDeployMixin.tear_down_cleaning')
+ def tear_down_cleaning(self, task):
+ """Clean up the PXE and DHCP files after cleaning.
+
+ :param task: a TaskManager object containing the node
+ :raises: NodeCleaningFailure, NetworkError if the cleaning ports cannot
+ be removed
+ """
+ deploy_utils.tear_down_inband_cleaning(
+ task, manage_boot=self.should_manage_boot(task))
+
+
+class AgentOobStepsMixin(object):
+ """Mixin with out-of-band deploy steps."""
+
+ @METRICS.timer('AgentDeployMixin.switch_to_tenant_network')
+ @base.deploy_step(priority=30)
+ @task_manager.require_exclusive_lock
+ def switch_to_tenant_network(self, task):
+ """Deploy step to switch the node to the tenant network.
+
+ :param task: a TaskManager object containing the node
+ """
+ try:
+ with manager_utils.power_state_for_network_configuration(task):
+ task.driver.network.remove_provisioning_network(task)
+ task.driver.network.configure_tenant_networks(task)
+ except Exception as e:
+ msg = (_('Error changing node %(node)s to tenant networks after '
+ 'deploy. %(cls)s: %(error)s') %
+ {'node': task.node.uuid, 'cls': e.__class__.__name__,
+ 'error': e})
+ # NOTE(mgoddard): Don't collect logs since the node has been
+ # powered off.
+ log_and_raise_deployment_error(task, msg, collect_logs=False,
+ exc=e)
+
+ @METRICS.timer('AgentDeployMixin.boot_instance')
+ @base.deploy_step(priority=20)
+ @task_manager.require_exclusive_lock
+ def boot_instance(self, task):
+ """Deploy step to boot the final instance.
+
+ :param task: a TaskManager object containing the node
+ """
+ can_power_on = (states.POWER_ON in
+ task.driver.power.get_supported_power_states(task))
+ try:
+ if can_power_on:
+ manager_utils.node_power_action(task, states.POWER_ON)
+ else:
+ LOG.debug('Not trying to power on node %s that does not '
+ 'support powering on, assuming already running',
+ task.node.uuid)
+ except Exception as e:
+ msg = (_('Error booting node %(node)s after deploy. '
+ '%(cls)s: %(error)s') %
+ {'node': task.node.uuid, 'cls': e.__class__.__name__,
+ 'error': e})
+ # NOTE(mgoddard): Don't collect logs since the node has been
+ # powered off.
+ log_and_raise_deployment_error(task, msg, collect_logs=False,
+ exc=e)
+
+
+class AgentDeployMixin(HeartbeatMixin, AgentOobStepsMixin):
"""Mixin with deploy methods."""
@METRICS.timer('AgentDeployMixin.get_clean_steps')
@@ -713,10 +892,15 @@ class AgentDeployMixin(HeartbeatMixin):
'steps': previous_steps})
call = getattr(self._client, 'get_%s_steps' % step_type)
- # TODO(dtantsur): remove the error handling in the V release.
try:
agent_result = call(node, task.ports).get('command_result', {})
except exception.AgentAPIError as exc:
+ if 'agent is busy' in str(exc):
+ LOG.debug('Agent is busy with a command, will refresh steps '
+ 'on the next heartbeat')
+ return
+
+ # TODO(dtantsur): change to just 'raise'
if step_type == 'clean':
raise
else:
@@ -918,11 +1102,11 @@ class AgentDeployMixin(HeartbeatMixin):
msg = (_('Agent returned error for %(type)s step %(step)s on node '
'%(node)s : %(err)s.') %
{'node': node.uuid,
- 'err': command.get('command_error'),
+ 'err': agent_client.get_command_error(command),
'step': current_step,
'type': step_type})
LOG.error(msg)
- return manager_utils.cleaning_error_handler(task, msg)
+ return _step_failure_handler(task, msg, step_type)
# NOTE(dtantsur): VERSION_MISMATCH is a new alias for
# CLEAN_VERSION_MISMATCH, remove the old one after IPA removes it.
elif command.get('command_status') in ('CLEAN_VERSION_MISMATCH',
@@ -950,10 +1134,7 @@ class AgentDeployMixin(HeartbeatMixin):
'step': current_step,
'type': step_type})
LOG.exception(msg)
- if step_type == 'clean':
- return manager_utils.cleaning_error_handler(task, msg)
- else:
- return manager_utils.deploying_error_handler(task, msg)
+ return _step_failure_handler(task, msg, step_type)
if current_step.get('reboot_requested'):
_post_step_reboot(task, step_type)
@@ -971,21 +1152,15 @@ class AgentDeployMixin(HeartbeatMixin):
'step': current_step,
'type': step_type})
LOG.error(msg)
- if step_type == 'clean':
- return manager_utils.cleaning_error_handler(task, msg)
- else:
- return manager_utils.deploying_error_handler(task, msg)
+ return _step_failure_handler(task, msg, step_type)
- @METRICS.timer('AgentDeployMixin.reboot_and_finish_deploy')
- def reboot_and_finish_deploy(self, task):
- """Helper method to trigger reboot on the node and finish deploy.
-
- This method initiates a reboot on the node. On success, it
- marks the deploy as complete. On failure, it logs the error
- and marks deploy as failure.
+ @METRICS.timer('AgentDeployMixin.tear_down_agent')
+ @base.deploy_step(priority=40)
+ @task_manager.require_exclusive_lock
+ def tear_down_agent(self, task):
+ """A deploy step to tear down the agent.
:param task: a TaskManager object containing the node
- :raises: InstanceDeployFailure, if node reboot failed.
"""
wait = CONF.agent.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.agent.post_deploy_get_power_state_retries + 1
@@ -1007,21 +1182,38 @@ class AgentDeployMixin(HeartbeatMixin):
# in-band methods
oob_power_off = strutils.bool_from_string(
node.driver_info.get('deploy_forces_oob_reboot', False))
+ can_power_on = (states.POWER_ON in
+ task.driver.power.get_supported_power_states(task))
try:
- if not oob_power_off:
+ if not can_power_on:
+ LOG.info('Power interface of node %(node)s does not support '
+ 'power on, using reboot to switch to the instance',
+ node.uuid)
+ self._client.sync(node)
+ manager_utils.node_power_action(task, states.REBOOT)
+ elif not oob_power_off:
try:
self._client.power_off(node)
- _wait_until_powered_off(task)
except Exception as e:
- LOG.warning('Failed to soft power off node %(node_uuid)s '
- 'in at least %(timeout)d seconds. '
+ LOG.warning('Failed to soft power off node %(node_uuid)s. '
'%(cls)s: %(error)s',
{'node_uuid': node.uuid,
- 'timeout': (wait * (attempts - 1)) / 1000,
'cls': e.__class__.__name__, 'error': e},
exc_info=not isinstance(
e, exception.IronicException))
+
+ # NOTE(dtantsur): in rare cases it may happen that the power
+ # off request comes through but we never receive the response.
+ # Check the power state before trying to force off.
+ try:
+ _wait_until_powered_off(task)
+ except Exception:
+ LOG.warning('Failed to soft power off node %(node_uuid)s '
+ 'in at least %(timeout)d seconds. Forcing '
+ 'hard power off and proceeding.',
+ {'node_uuid': node.uuid,
+ 'timeout': (wait * (attempts - 1)) / 1000})
manager_utils.node_power_action(task, states.POWER_OFF)
else:
# Flush the file system prior to hard rebooting the node
@@ -1045,23 +1237,21 @@ class AgentDeployMixin(HeartbeatMixin):
'error': e})
log_and_raise_deployment_error(task, msg, exc=e)
- try:
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- manager_utils.node_power_action(task, states.POWER_ON)
- except Exception as e:
- msg = (_('Error rebooting node %(node)s after deploy. '
- '%(cls)s: %(error)s') %
- {'node': node.uuid, 'cls': e.__class__.__name__,
- 'error': e})
- # NOTE(mgoddard): Don't collect logs since the node has been
- # powered off.
- log_and_raise_deployment_error(task, msg, collect_logs=False,
- exc=e)
+ # TODO(dtantsur): remove in W
+ @METRICS.timer('AgentDeployMixin.reboot_and_finish_deploy')
+ def reboot_and_finish_deploy(self, task):
+ """Helper method to trigger reboot on the node and finish deploy.
- # TODO(dtantsur): remove these two calls when this function becomes a
- # real deploy step.
+ This method initiates a reboot on the node. On success, it
+ marks the deploy as complete. On failure, it logs the error
+ and marks deploy as failure.
+
+ :param task: a TaskManager object containing the node
+ :raises: InstanceDeployFailure, if node reboot failed.
+ """
+ # NOTE(dtantsur): do nothing here, the new deploy steps tear_down_agent
+ # and boot_instance will be picked up and finish the deploy (even for
+ # legacy deploy interfaces without decomposed steps).
task.process_event('wait')
manager_utils.notify_conductor_resume_deploy(task)
@@ -1182,7 +1372,7 @@ class AgentDeployMixin(HeartbeatMixin):
msg = (_("Failed to install a bootloader when "
"deploying node %(node)s. Error: %(error)s") %
{'node': node.uuid,
- 'error': result['command_error']})
+ 'error': agent_client.get_command_error(result)})
log_and_raise_deployment_error(task, msg)
else:
# Its possible the install will fail if the IPA image
@@ -1190,7 +1380,7 @@ class AgentDeployMixin(HeartbeatMixin):
LOG.info('Could not install bootloader for whole disk '
'image for node %(node)s, Error: %(error)s"',
{'node': node.uuid,
- 'error': result['command_error']})
+ 'error': agent_client.get_command_error(result)})
return
try:
diff --git a/ironic/drivers/modules/agent_client.py b/ironic/drivers/modules/agent_client.py
index eba9e6de8..090007d8f 100644
--- a/ironic/drivers/modules/agent_client.py
+++ b/ironic/drivers/modules/agent_client.py
@@ -30,6 +30,26 @@ METRICS = metrics_utils.get_metrics_logger(__name__)
DEFAULT_IPA_PORTAL_PORT = 3260
+REBOOT_COMMAND = 'run_image'
+
+
+def get_command_error(command):
+ """Extract an error string from the command result.
+
+ :param command: Command information from the agent.
+ :return: Error string.
+ """
+ error = command.get('command_error')
+ if error is None:
+ LOG.error('Agent returned invalid response: missing command_error in '
+ '%s', command)
+ return _('Invalid agent response')
+
+ if isinstance(error, dict):
+ return error.get('details') or error.get('message') or str(error)
+ else:
+ return error
+
class AgentClient(object):
"""Client for interacting with nodes via a REST API."""
@@ -42,9 +62,9 @@ class AgentClient(object):
"""Get URL endpoint for agent command request"""
agent_url = node.driver_internal_info.get('agent_url')
if not agent_url:
- raise exception.IronicException(_('Agent driver requires '
- 'agent_url in '
- 'driver_internal_info'))
+ raise exception.AgentConnectionFailed(_('Agent driver requires '
+ 'agent_url in '
+ 'driver_internal_info'))
return ('%(agent_url)s/%(api_version)s/commands/' %
{'agent_url': agent_url,
'api_version': CONF.agent.agent_api_version})
@@ -56,13 +76,68 @@ class AgentClient(object):
'params': params,
})
+ def _get_verify(self, node):
+ return node.driver_info.get('agent_verify_ca', True)
+
+ def _raise_if_typeerror(self, result, node, method):
+ error = result.get('command_error')
+ if error and error.get('type') == 'TypeError':
+ LOG.error('Agent command %(method)s for node %(node)s failed. '
+ 'Internal TypeError detected: Error %(error)s',
+ {'method': method, 'node': node.uuid, 'error': error})
+ raise exception.AgentAPIError(node=node.uuid,
+ status=error.get('code'),
+ error=get_command_error(result))
+
+ @METRICS.timer('AgentClient._wait_for_command')
+ @retrying.retry(
+ retry_on_exception=(
+ lambda e: isinstance(e, exception.AgentCommandTimeout)),
+ stop_max_attempt_number=CONF.agent.command_wait_attempts,
+ wait_fixed=CONF.agent.command_wait_interval * 1000)
+ def _wait_for_command(self, node, method):
+ """Wait for a command to complete.
+
+ :param node: A Node object.
+ :param method: A string represents the command executed by agent.
+ :raises: AgentCommandTimeout if timeout is reached.
+ """
+ try:
+ method = method.split('.', 1)[1]
+ except IndexError:
+ pass
+
+ # NOTE(dtantsur): this function uses AgentCommandTimeout on every
+ # failure, but unless the timeout is reached, the exception is caught
+ # and retried by the @retry decorator above.
+
+ commands = self.get_commands_status(node)
+ try:
+ result = next(c for c in reversed(commands)
+ if c.get('command_name') == method)
+ except StopIteration:
+ LOG.debug('Command %(cmd)s is not in the executing commands list '
+ 'for node %(node)s',
+ {'cmd': method, 'node': node.uuid})
+ raise exception.AgentCommandTimeout(command=method, node=node.uuid)
+
+ if result.get('command_status') == 'RUNNING':
+ LOG.debug('Command %(cmd)s has not finished yet for node %(node)s',
+ {'cmd': method, 'node': node.uuid})
+ raise exception.AgentCommandTimeout(command=method, node=node.uuid)
+ else:
+ LOG.debug('Command %(cmd)s has finished for node %(node)s with '
+ 'result %(result)s',
+ {'cmd': method, 'node': node.uuid, 'result': result})
+ self._raise_if_typeerror(result, node, method)
+ return result
+
@METRICS.timer('AgentClient._command')
@retrying.retry(
retry_on_exception=(
lambda e: isinstance(e, exception.AgentConnectionFailed)),
stop_max_attempt_number=CONF.agent.max_command_attempts)
- def _command(self, node, method, params, wait=False,
- command_timeout_factor=1):
+ def _command(self, node, method, params, wait=False, poll=False):
"""Sends command to agent.
:param node: A Node object.
@@ -72,19 +147,16 @@ class AgentClient(object):
body.
:param wait: True to wait for the command to finish executing, False
otherwise.
- :param command_timeout_factor: An integer, default 1, by which to
- multiply the [agent]command_timeout
- value. This is intended for use with
- extremely long running commands to
- the agent ramdisk where a general
- timeout value should not be extended
- in all cases.
+ :param poll: Whether to poll the command until completion. Provides
+ a better alternative to `wait` for long-running commands.
:raises: IronicException when failed to issue the request or there was
a malformed response from the agent.
:raises: AgentAPIError when agent failed to execute specified command.
:returns: A dict containing command result from agent, see
get_commands_status for a sample.
"""
+ assert not (wait and poll)
+
url = self._get_command_url(node)
body = self._get_command_body(method, params)
request_params = {
@@ -99,7 +171,8 @@ class AgentClient(object):
try:
response = self.session.post(
url, params=request_params, data=body,
- timeout=CONF.agent.command_timeout * command_timeout_factor)
+ verify=self._get_verify(node),
+ timeout=CONF.agent.command_timeout)
except (requests.ConnectionError, requests.Timeout) as e:
msg = (_('Failed to connect to the agent running on node %(node)s '
'for invoking command %(method)s. Error: %(error)s') %
@@ -128,12 +201,6 @@ class AgentClient(object):
raise exception.IronicException(msg)
error = result.get('command_error')
- exc_type = None
- if error:
- # if an error, we should see if a type field exists. This type
- # field may signal an exception that is compatability based.
- exc_type = error.get('type')
-
LOG.debug('Agent command %(method)s for node %(node)s returned '
'result %(res)s, error %(error)s, HTTP status code %(code)d',
{'node': node.uuid, 'method': method,
@@ -149,26 +216,23 @@ class AgentClient(object):
raise exception.AgentAPIError(node=node.uuid,
status=response.status_code,
error=result.get('faultstring'))
- if exc_type == 'TypeError':
- LOG.error('Agent command %(method)s for node %(node)s failed. '
- 'Internal %(exc_type)s error detected: Error %(error)s',
- {'method': method, 'node': node.uuid,
- 'exc_type': exc_type, 'error': error})
- raise exception.AgentAPIError(node=node.uuid,
- status=error.get('code'),
- error=result.get('faultstring'))
+
+ self._raise_if_typeerror(result, node, method)
+
+ if poll:
+ result = self._wait_for_command(node, method)
return result
@METRICS.timer('AgentClient.get_commands_status')
- @retrying.retry(
- retry_on_exception=(
- lambda e: isinstance(e, exception.AgentConnectionFailed)),
- stop_max_attempt_number=CONF.agent.max_command_attempts)
- def get_commands_status(self, node):
+ def get_commands_status(self, node, retry_connection=True,
+ expect_errors=False):
"""Get command status from agent.
:param node: A Node object.
+ :param retry_connection: Whether to retry connection problems.
+ :param expect_errors: If True, do not log connection problems as
+ errors.
:return: A list of command results, each result is related to a
command been issued to agent. A typical result can be:
@@ -197,17 +261,28 @@ class AgentClient(object):
"""
url = self._get_command_url(node)
LOG.debug('Fetching status of agent commands for node %s', node.uuid)
- try:
- resp = self.session.get(url, timeout=CONF.agent.command_timeout)
- except (requests.ConnectionError, requests.Timeout) as e:
- msg = (_('Failed to connect to the agent running on node %(node)s '
- 'to collect commands status. '
- 'Error: %(error)s') %
- {'node': node.uuid, 'error': e})
- LOG.error(msg)
- raise exception.AgentConnectionFailed(reason=msg)
- result = resp.json()['commands']
+ def _get():
+ try:
+ return self.session.get(url,
+ verify=self._get_verify(node),
+ timeout=CONF.agent.command_timeout)
+ except (requests.ConnectionError, requests.Timeout) as e:
+ msg = (_('Failed to connect to the agent running on node '
+ '%(node)s to collect commands status. '
+ 'Error: %(error)s') %
+ {'node': node.uuid, 'error': e})
+ logging_call = LOG.debug if expect_errors else LOG.error
+ logging_call(msg)
+ raise exception.AgentConnectionFailed(reason=msg)
+
+ if retry_connection:
+ _get = retrying.retry(
+ retry_on_exception=(
+ lambda e: isinstance(e, exception.AgentConnectionFailed)),
+ stop_max_attempt_number=CONF.agent.max_command_attempts)(_get)
+
+ result = _get().json()['commands']
status = '; '.join('%(cmd)s: result "%(res)s", error "%(err)s"' %
{'cmd': r.get('command_name'),
'res': r.get('command_result'),
@@ -245,7 +320,7 @@ class AgentClient(object):
return self._command(node=node,
method='standby.prepare_image',
params=params,
- wait=wait)
+ poll=wait)
@METRICS.timer('AgentClient.start_iscsi_target')
def start_iscsi_target(self, node, iqn,
@@ -313,8 +388,7 @@ class AgentClient(object):
return self._command(node=node,
method='image.install_bootloader',
params=params,
- wait=True,
- command_timeout_factor=2)
+ poll=True)
except exception.AgentAPIError:
# NOTE(arne_wiebalck): If for software RAID and 'uefi' as the boot
# mode, we find that the IPA does not yet support the additional
@@ -338,8 +412,7 @@ class AgentClient(object):
return self._command(node=node,
method='image.install_bootloader',
params=params,
- wait=True,
- command_timeout_factor=2)
+ poll=True)
@METRICS.timer('AgentClient.get_clean_steps')
def get_clean_steps(self, node, ports):
@@ -467,6 +540,22 @@ class AgentClient(object):
method='deploy.execute_deploy_step',
params=params)
+ @METRICS.timer('AgentClient.get_partition_uuids')
+ def get_partition_uuids(self, node):
+ """Get deploy steps from agent.
+
+ :param node: A node object.
+ :raises: IronicException when failed to issue the request or there was
+ a malformed response from the agent.
+ :raises: AgentAPIError when agent failed to execute specified command.
+ :returns: A dict containing command response from agent.
+
+ """
+ return self._command(node=node,
+ method='standby.get_partition_uuids',
+ params={},
+ wait=True)
+
@METRICS.timer('AgentClient.power_off')
def power_off(self, node):
"""Soft powers off the bare metal node by shutting down ramdisk OS.
@@ -482,6 +571,21 @@ class AgentClient(object):
method='standby.power_off',
params={})
+ @METRICS.timer('AgentClient.reboot')
+ def reboot(self, node):
+ """Soft reboots the bare metal node by shutting down ramdisk OS.
+
+ :param node: A Node object.
+ :raises: IronicException when failed to issue the request or there was
+ a malformed response from the agent.
+ :raises: AgentAPIError when agent failed to execute specified command.
+ :returns: A dict containing command response from agent.
+ See :func:`get_commands_status` for a command result sample.
+ """
+ return self._command(node=node,
+ method='standby.%s' % REBOOT_COMMAND,
+ params={})
+
@METRICS.timer('AgentClient.sync')
def sync(self, node):
"""Flush file system buffers forcing changed blocks to disk.
diff --git a/ironic/drivers/modules/agent_power.py b/ironic/drivers/modules/agent_power.py
new file mode 100644
index 000000000..11ef5711a
--- /dev/null
+++ b/ironic/drivers/modules/agent_power.py
@@ -0,0 +1,220 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The agent power interface.
+"""
+
+import time
+
+from oslo_config import cfg
+from oslo_log import log
+import retrying
+
+from ironic.common import exception
+from ironic.common.i18n import _
+from ironic.common import states
+from ironic.conductor import utils as cond_utils
+from ironic.drivers import base
+from ironic.drivers.modules import agent_client
+
+
+CONF = cfg.CONF
+
+LOG = log.getLogger(__name__)
+
+_POWER_WAIT = 30
+
+
+class AgentPower(base.PowerInterface):
+ """Power interface using the running agent for power actions."""
+
+ def __init__(self):
+ super(AgentPower, self).__init__()
+ if not CONF.deploy.fast_track:
+ raise exception.InvalidParameterValue(
+ _('[deploy]fast_track must be True to enable the agent '
+ 'power interface'))
+ self._client = agent_client.AgentClient()
+
+ def get_properties(self):
+ """Return the properties of the interface.
+
+ :returns: dictionary of <property name>:<property description> entries.
+ """
+ return {}
+
+ def validate(self, task):
+ """Validate the driver-specific Node deployment info.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :raises: InvalidParameterValue on malformed parameter(s)
+ """
+ # NOTE(dtantsur): the fast_track option is mutable, so we have to check
+ # it again on validation.
+ if not CONF.deploy.fast_track:
+ raise exception.InvalidParameterValue(
+ _('[deploy]fast_track must be True to enable the agent '
+ 'power interface'))
+ # TODO(dtantsur): support ACTIVE nodes
+ if not cond_utils.agent_is_alive(task.node):
+ raise exception.InvalidParameterValue(
+ _('Agent seems offline for node %s, the agent power interface '
+ 'cannot be used') % task.node.uuid)
+
+ def supports_power_sync(self, task):
+ """Check if power sync is supported for the given node.
+
+ Not supported for the agent power since it is not possible to power
+ on/off nodes.
+
+ :param task: A TaskManager instance containing the node to act on
+ with a **shared** lock.
+ :returns: boolean, whether power sync is supported.
+ """
+ return False
+
+ def get_supported_power_states(self, task):
+ """Get a list of the supported power states.
+
+ Only contains REBOOT.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :returns: A list with the supported power states defined
+ in :mod:`ironic.common.states`.
+ """
+ return [states.REBOOT, states.SOFT_REBOOT]
+
+ def get_power_state(self, task):
+ """Return the power state of the task's node.
+
+ Essentially, the only known state is POWER ON, everything else is
+ an error (or more precisely ``None``).
+
+ :param task: A TaskManager instance containing the node to act on.
+ :returns: A power state. One of :mod:`ironic.common.states`.
+ """
+ # TODO(dtantsur): support ACTIVE nodes
+ if cond_utils.agent_is_alive(task.node):
+ return states.POWER_ON
+ else:
+ LOG.error('Node %s is not fast-track-able, cannot determine '
+ 'its power state via the "agent" power interface',
+ task.node.uuid)
+ return None
+
+ def set_power_state(self, task, power_state, timeout=None):
+ """Set the power state of the task's node.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :param power_state: Power state from :mod:`ironic.common.states`.
+ Only REBOOT and SOFT_REBOOT are supported and are synonymous.
+ :param timeout: timeout (in seconds) positive integer (> 0) for any
+ power state. ``None`` indicates to use default timeout.
+ :raises: PowerStateFailure on non-supported power state.
+ """
+ if power_state in (states.REBOOT, states.SOFT_REBOOT):
+ return self.reboot(task)
+ else:
+ LOG.error('Power state %(state)s is not implemented for node '
+ '%(node)s using the "agent" power interface',
+ {'node': task.node.uuid, 'state': power_state})
+ raise exception.PowerStateFailure(pstate=power_state)
+
+ def reboot(self, task, timeout=None):
+ """Perform a reboot of the task's node.
+
+ Only soft reboot is implemented.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :param timeout: timeout (in seconds) positive integer (> 0) for any
+ power state. ``None`` indicates to use default timeout.
+ """
+ node = task.node
+
+ self._client.reboot(node)
+
+ info = node.driver_internal_info
+ # NOTE(dtantsur): wipe the agent token, otherwise the rebooted agent
+ # won't be able to heartbeat. This is mostly a precaution since the
+ # calling code in conductor is expected to handle it.
+ if not info.get('agent_secret_token_pregenerated'):
+ info.pop('agent_secret_token', None)
+ # NOTE(dtantsur): the URL may change on reboot, wipe it as well (but
+ # only after we call reboot).
+ info.pop('agent_url', None)
+ node.driver_internal_info = info
+ node.save()
+
+ LOG.debug('Requested reboot of node %(node)s via the agent, waiting '
+ '%(wait)d seconds for the node to power down',
+ {'node': task.node.uuid, 'wait': _POWER_WAIT})
+ time.sleep(_POWER_WAIT)
+
+ if (node.provision_state in (states.DEPLOYING, states.CLEANING)
+ and (node.driver_internal_info.get('deployment_reboot')
+ or node.driver_internal_info.get('cleaning_reboot'))):
+ # NOTE(dtantsur): we need to downgrade the lock otherwise
+ # heartbeats won't be processed. It should not have side effects
+ # for nodes in DEPLOYING/CLEANING.
+ task.downgrade_lock()
+
+ try:
+ self._wait_for_reboot(task, timeout)
+ finally:
+ # The caller probably expects a lock, so re-acquire it
+ task.upgrade_lock()
+
+ def _wait_for_reboot(self, task, timeout):
+ wait = CONF.agent.post_deploy_get_power_state_retry_interval
+ if not timeout:
+ timeout = CONF.agent.post_deploy_get_power_state_retries * wait
+
+ @retrying.retry(
+ stop_max_delay=timeout,
+ retry_on_result=lambda result: not result,
+ retry_on_exception=(
+ lambda e: isinstance(e, exception.AgentConnectionFailed)),
+ wait_fixed=wait * 1000
+ )
+ def _wait_until_rebooted(task):
+ try:
+ status = self._client.get_commands_status(
+ task.node, retry_connection=False, expect_errors=True)
+ except exception.AgentConnectionFailed:
+ LOG.debug('Still waiting for the agent to come back on the '
+ 'node %s', task.node.uuid)
+ raise
+
+ if any(cmd['command_name'] == agent_client.REBOOT_COMMAND
+ for cmd in status):
+ LOG.debug('Still waiting for the agent to power off on the '
+ 'node %s', task.node.uuid)
+ return False
+
+ return True
+
+ try:
+ _wait_until_rebooted(task)
+ except exception.AgentConnectionFailed as exc:
+ msg = _('Agent failed to come back on %(node)s with the "agent" '
+ 'power interface: %(exc)s') % {
+ 'node': task.node.uuid, 'exc': exc}
+ LOG.error(msg)
+ raise exception.PowerStateFailure(msg)
+ except Exception as exc:
+ LOG.error('Could not reboot node %(node)s with the "agent" power '
+ 'interface: %(exc)s',
+ {'node': task.node.uuid, 'exc': exc})
+ raise exception.PowerStateFailure(
+ _('Unexpected error when rebooting through the agent: %s')
+ % exc)
diff --git a/ironic/drivers/modules/ansible/deploy.py b/ironic/drivers/modules/ansible/deploy.py
index cbecdc976..d4186741f 100644
--- a/ironic/drivers/modules/ansible/deploy.py
+++ b/ironic/drivers/modules/ansible/deploy.py
@@ -375,9 +375,13 @@ def _get_clean_steps(node, interface=None, override_priorities=None):
return steps
-class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
+class AnsibleDeploy(agent_base.HeartbeatMixin,
+ agent_base.AgentOobStepsMixin,
+ base.DeployInterface):
"""Interface for deploy-related actions."""
+ has_decomposed_deploy_steps = True
+
def __init__(self):
super(AnsibleDeploy, self).__init__()
# NOTE(pas-ha) overriding agent creation as we won't be
@@ -442,12 +446,22 @@ class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
+ def process_next_step(self, task, step_type):
+ """Start the next clean/deploy step if the previous one is complete.
+
+ :param task: a TaskManager instance
+ :param step_type: "clean" or "deploy"
+ """
+ # Run the next step as soon as agent heartbeats in deploy.deploy
+ if step_type == 'deploy' and self.in_core_deploy_step(task):
+ manager_utils.notify_conductor_resume_deploy(task)
+
@staticmethod
def _required_image_info(task):
"""Gather and save needed image info while the context is good.
Gather image info that will be needed later, during the
- continue_deploy execution, where the context won't be the same
+ write_image execution, where the context won't be the same
anymore, since coming from the server's heartbeat.
"""
node = task.node
@@ -586,35 +600,30 @@ class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
- @METRICS.timer('AnsibleDeploy.continue_deploy')
- def continue_deploy(self, task):
+ @METRICS.timer('AnsibleDeploy.write_image')
+ @base.deploy_step(priority=80)
+ def write_image(self, task):
# NOTE(pas-ha) the lock should be already upgraded in heartbeat,
# just setting its purpose for better logging
task.upgrade_lock(purpose='deploy')
- task.process_event('resume')
# NOTE(pas-ha) this method is called from heartbeat processing only,
# so we are sure we need this particular method, not the general one
node_address = _get_node_ip(task)
self._ansible_deploy(task, node_address)
- self.reboot_to_instance(task)
-
- @METRICS.timer('AnsibleDeploy.reboot_to_instance')
- def reboot_to_instance(self, task):
- node = task.node
- LOG.info('Ansible complete deploy on node %s', node.uuid)
-
- LOG.debug('Rebooting node %s to instance', node.uuid)
+ LOG.info('Ansible complete deploy on node %s', task.node.uuid)
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
- self.reboot_and_finish_deploy(task)
- task.driver.boot.clean_up_ramdisk(task)
- # TODO(dtantsur): remove these two calls when this function becomes a
- # real deploy step.
- task.process_event('wait')
- manager_utils.notify_conductor_resume_deploy(task)
+ @METRICS.timer('AnsibleDeploy.tear_down_agent')
+ @base.deploy_step(priority=40)
+ @task_manager.require_exclusive_lock
+ def tear_down_agent(self, task):
+ """A deploy step to tear down the agent.
+
+ Shuts down the machine and removes it from the provisioning
+ network.
- @METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy')
- def reboot_and_finish_deploy(self, task):
+ :param task: a TaskManager object containing the node
+ """
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
@@ -652,13 +661,6 @@ class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
manager_utils.node_power_action(task, states.POWER_OFF)
else:
manager_utils.node_power_action(task, states.POWER_OFF)
- power_state_to_restore = (
- manager_utils.power_on_node_if_needed(task))
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- manager_utils.restore_power_state_if_needed(
- task, power_state_to_restore)
- manager_utils.node_power_action(task, states.POWER_ON)
except Exception as e:
msg = (_('Error rebooting node %(node)s after deploy. '
'Error: %(error)s') %
diff --git a/ironic/drivers/modules/console_utils.py b/ironic/drivers/modules/console_utils.py
index d137bbf40..6e08b6712 100644
--- a/ironic/drivers/modules/console_utils.py
+++ b/ironic/drivers/modules/console_utils.py
@@ -21,6 +21,7 @@ Ironic console utilities.
import errno
import fcntl
+import ipaddress
import os
import signal
import socket
@@ -32,7 +33,6 @@ from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import fileutils
-from oslo_utils import netutils
import psutil
from ironic.common import exception
@@ -162,11 +162,24 @@ def _get_port_range():
return start, stop
-def _verify_port(port):
+def _verify_port(port, host=None):
"""Check whether specified port is in use."""
- s = socket.socket()
+ ip_version = None
+ if host is not None:
+ try:
+ ip_version = ipaddress.ip_address(host).version
+ except ValueError:
+ # Assume it's a hostname
+ pass
+ else:
+ host = CONF.host
+ if ip_version == 6:
+ s = socket.socket(socket.AF_INET6)
+ else:
+ s = socket.socket()
+
try:
- s.bind((CONF.host, port))
+ s.bind((host, port))
except socket.error:
raise exception.Conflict()
finally:
@@ -174,7 +187,7 @@ def _verify_port(port):
@lockutils.synchronized(SERIAL_LOCK)
-def acquire_port():
+def acquire_port(host=None):
"""Returns a free TCP port on current host.
Find and returns a free TCP port in the range
@@ -187,7 +200,7 @@ def acquire_port():
if port in ALLOCATED_PORTS:
continue
try:
- _verify_port(port)
+ _verify_port(port, host=host)
ALLOCATED_PORTS.add(port)
return port
except exception.Conflict:
@@ -402,7 +415,7 @@ def start_socat_console(node_uuid, port, console_cmd):
args.append('-L%s' % pid_file)
console_host = CONF.console.socat_address
- if netutils.is_valid_ipv6(console_host):
+ if ipaddress.ip_address(console_host).version == 6:
arg = ('TCP6-LISTEN:%(port)s,bind=[%(host)s],reuseaddr,fork,'
'max-children=1')
else:
diff --git a/ironic/drivers/modules/deploy_utils.py b/ironic/drivers/modules/deploy_utils.py
index cb0af75c8..648a38d0b 100644
--- a/ironic/drivers/modules/deploy_utils.py
+++ b/ironic/drivers/modules/deploy_utils.py
@@ -67,7 +67,6 @@ RESCUE_LIKE_STATES = (states.RESCUING, states.RESCUEWAIT, states.RESCUEFAIL,
DISK_LAYOUT_PARAMS = ('root_gb', 'swap_mb', 'ephemeral_gb')
-
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
@@ -92,10 +91,6 @@ def get_ironic_api_url():
CONF.set_override('auth_type', 'none', group='service_catalog')
adapter_opts['auth'] = keystone.get_auth('service_catalog')
- # TODO(pas-ha) remove in Rocky
- # NOTE(pas-ha) if both set, the new options win
- if CONF.conductor.api_url and not CONF.service_catalog.endpoint_override:
- adapter_opts['endpoint_override'] = CONF.conductor.api_url
try:
ironic_api = keystone.get_endpoint('service_catalog', **adapter_opts)
except (exception.KeystoneFailure,
@@ -183,7 +178,7 @@ def switch_pxe_config(path, root_uuid_or_disk_id, boot_mode,
:param ipxe_enabled: A default False boolean value to tell the method
if the caller is using iPXE.
"""
- if not ramdisk_boot:
+ if not ramdisk_boot and root_uuid_or_disk_id is not None:
if not is_whole_disk_image:
_replace_root_uuid(path, root_uuid_or_disk_id)
else:
@@ -316,6 +311,7 @@ def agent_add_clean_params(task):
secure_erase = CONF.deploy.enable_ata_secure_erase
info['agent_enable_ata_secure_erase'] = secure_erase
info['disk_erasure_concurrency'] = CONF.deploy.disk_erasure_concurrency
+ info['agent_erase_skip_read_only'] = CONF.deploy.erase_skip_read_only
task.node.driver_internal_info = info
task.node.save()
@@ -383,6 +379,54 @@ def get_pxe_boot_file(node):
return boot_file
+def get_ipxe_boot_file(node):
+ """Return the iPXE boot file name requested for deploy.
+
+ This method returns iPXE boot file name to be used for deploy.
+ Architecture specific boot file is searched first. BIOS/UEFI
+ boot file is used if no valid architecture specific file found.
+
+ If no valid value is found, the default reverts to the
+ ``get_pxe_boot_file`` method and thus the
+ ``[pxe]pxe_bootfile_name`` and ``[pxe]uefi_ipxe_bootfile_name``
+ settings.
+
+ :param node: A single Node.
+ :returns: The iPXE boot file name.
+ """
+ cpu_arch = node.properties.get('cpu_arch')
+ boot_file = CONF.pxe.ipxe_bootfile_name_by_arch.get(cpu_arch)
+ if boot_file is None:
+ if boot_mode_utils.get_boot_mode(node) == 'uefi':
+ boot_file = CONF.pxe.uefi_ipxe_bootfile_name
+ else:
+ boot_file = CONF.pxe.ipxe_bootfile_name
+
+ if boot_file is None:
+ boot_file = get_pxe_boot_file(node)
+
+ return boot_file
+
+
+def get_ipxe_config_template(node):
+ """Return the iPXE config template file name requested of deploy.
+
+ This method returns the iPXE configuration template file.
+
+ :param node: A single Node.
+ :returns: The iPXE config template file name.
+ """
+ # NOTE(TheJulia): iPXE configuration files don't change based upon the
+ # architecture and we're not trying to support multiple different boot
+ # loaders by architecture as they are all consistent. Where as PXE
+ # could need to be grub for one arch, PXELINUX for another.
+ configured_template = CONF.pxe.ipxe_config_template
+ override_template = node.driver_info.get('pxe_template')
+ if override_template:
+ configured_template = override_template
+ return configured_template or get_pxe_config_template(node)
+
+
def get_pxe_config_template(node):
"""Return the PXE config template file name requested for deploy.
@@ -467,7 +511,14 @@ def validate_image_properties(ctx, deploy_info, properties):
:raises: MissingParameterValue if the image doesn't contain
the mentioned properties.
"""
- image_href = deploy_info['image_source']
+ image_href = deploy_info.get('image_source')
+ boot_iso = deploy_info.get('boot_iso')
+ if image_href and boot_iso:
+ raise exception.InvalidParameterValue(_(
+ "An 'image_source' and 'boot_iso' parameter may not be "
+ "specified at the same time."))
+ if not image_href:
+ image_href = boot_iso
try:
img_service = image_service.get_image_service(image_href, context=ctx)
image_props = img_service.show(image_href)['properties']
@@ -654,11 +705,20 @@ def get_image_instance_info(node):
instance_info. Also raises same exception if kernel/ramdisk is
missing in instance_info for non-glance images.
"""
+ # TODO(TheJulia): We seem to have a lack of direct unit testing of this
+ # method, but that is likely okay. If memory serves we test this at
+ # a few different levels. That being said, it would be good for some
+ # more explicit unit testing to exist.
info = {}
- info['image_source'] = node.instance_info.get('image_source')
is_whole_disk_image = node.driver_internal_info.get('is_whole_disk_image')
- if not is_whole_disk_image:
+ boot_iso = node.instance_info.get('boot_iso')
+ if not boot_iso:
+ info['image_source'] = node.instance_info.get('image_source')
+ else:
+ info['boot_iso'] = boot_iso
+
+ if not is_whole_disk_image and not boot_iso:
if not service_utils.is_glance_image(info['image_source']):
info['kernel'] = node.instance_info.get('kernel')
info['ramdisk'] = node.instance_info.get('ramdisk')
@@ -838,11 +898,11 @@ class InstanceImageCache(image_cache.ImageCache):
@METRICS.timer('cache_instance_image')
-def cache_instance_image(ctx, node, force_raw=CONF.force_raw_images):
+def cache_instance_image(ctx, node, force_raw=None):
"""Fetch the instance's image from Glance
- This method pulls the AMI and writes them to the appropriate place
- on local disk.
+ This method pulls the disk image and writes them to the appropriate
+ place on local disk.
:param ctx: context
:param node: an ironic node object
@@ -850,6 +910,10 @@ def cache_instance_image(ctx, node, force_raw=CONF.force_raw_images):
:returns: a tuple containing the uuid of the image and the path in
the filesystem where image is cached.
"""
+ # NOTE(dtantsur): applying the default here to make the option mutable
+ if force_raw is None:
+ force_raw = CONF.force_raw_images
+
i_info = parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
diff --git a/ironic/drivers/modules/drac/bios.py b/ironic/drivers/modules/drac/bios.py
index b8d61c93d..cb306f7fb 100644
--- a/ironic/drivers/modules/drac/bios.py
+++ b/ironic/drivers/modules/drac/bios.py
@@ -315,7 +315,7 @@ class DracWSManBIOS(base.BIOSInterface):
commit_job_id = client.commit_pending_lifecycle_changes(
reboot=reboot_needed)
except drac_exceptions.BaseClientException as exc:
- LOG.error('Failed to commit BIOS reset on node'
+ LOG.error('Failed to commit BIOS reset on node '
'%(node_uuid)s. Reason: %(error)s.', {
'node_uuid': node.uuid,
'error': exc})
diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py
index c7cc08610..5d2d02341 100644
--- a/ironic/drivers/modules/drac/raid.py
+++ b/ironic/drivers/modules/drac/raid.py
@@ -42,6 +42,11 @@ LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
+_CURRENT_RAID_CONTROLLER_MODE = "RAIDCurrentControllerMode"
+_REQUESTED_RAID_CONTROLLER_MODE = "RAIDRequestedControllerMode"
+_EHBA_MODE = "Enhanced HBA"
+_RAID_MODE = "RAID"
+
RAID_LEVELS = {
'0': {
'min_disks': 1,
@@ -310,6 +315,70 @@ def clear_foreign_config(node, raid_controller):
raise exception.DracOperationError(error=exc)
+def set_raid_settings(node, controller_fqdd, settings):
+ """Sets the RAID configuration
+
+ It sets the pending_value parameter for each of the attributes
+ passed in. For the values to be applied, a config job must
+ be created.
+
+ :param node: an ironic node object.
+ :param controller_fqdd: the ID of the RAID controller.
+ :param settings: a dictionary containing the proposed values, with
+ each key being the name of attribute and the value
+ being the proposed value.
+ :returns: a dictionary containing:
+ - The is_commit_required key with a boolean value indicating
+ whether a config job must be created for the values to be
+ applied.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted for the
+ values to be applied. Possible values are true and false.
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ try:
+
+ drac_job.validate_job_queue(node)
+
+ client = drac_common.get_drac_client(node)
+ return client.set_raid_settings(controller_fqdd, settings)
+ except drac_exceptions.BaseClientException as exc:
+ LOG.error('DRAC driver failed to set raid settings '
+ 'on %(raid_controller_fqdd)s '
+ 'for node %(node_uuid)s. '
+ 'Reason: %(error)s.',
+ {'raid_controller_fqdd': controller_fqdd,
+ 'node_uuid': node.uuid,
+ 'error': exc})
+ raise exception.DracOperationError(error=exc)
+
+
+def list_raid_settings(node):
+ """List the RAID configuration settings
+
+ :param node: an ironic node object.
+ :returns: a dictionary with the RAID settings using InstanceID as the
+ key. The attributes are RAIDEnumerableAttribute,
+ RAIDStringAttribute and RAIDIntegerAttribute objects.
+ :raises: DRACOperationFailed on error reported back by the DRAC
+ interface
+ """
+ try:
+
+ drac_job.validate_job_queue(node)
+
+ client = drac_common.get_drac_client(node)
+ return client.list_raid_settings()
+ except drac_exceptions.BaseClientException as exc:
+ LOG.error('DRAC driver failed to list raid settings '
+ 'for node %(node_uuid)s. '
+ 'Reason: %(error)s.',
+ {'node_uuid': node.uuid,
+ 'error': exc})
+ raise exception.DracOperationError(error=exc)
+
+
def change_physical_disk_state(node, mode=None,
controllers_to_physical_disk_ids=None):
"""Convert disks RAID status
@@ -874,6 +943,36 @@ def _validate_volume_size(node, logical_disks):
return logical_disks
+def _switch_to_raid_mode(node, controller_fqdd):
+ """Convert the controller mode from Enhanced HBA to RAID mode
+
+ :param node: an ironic node object
+ :param controller_fqdd: the ID of the RAID controller.
+ :returns: a dictionary containing
+ - The raid_controller key with a ID of the
+ RAID controller value.
+ - The is_commit_required needed key with a
+ boolean value indicating whether a config job must be created
+ for the values to be applied.
+ - The is_reboot_required key with a RebootRequired enumerated
+ value indicating whether the server must be rebooted to
+ switch the controller mode to RAID.
+ """
+ # wait for pending jobs to complete
+ drac_job.wait_for_job_completion(node)
+
+ raid_attr = "{}:{}".format(controller_fqdd,
+ _REQUESTED_RAID_CONTROLLER_MODE)
+ settings = {raid_attr: _RAID_MODE}
+ settings_results = set_raid_settings(
+ node, controller_fqdd, settings)
+ controller = {
+ 'raid_controller': controller_fqdd,
+ 'is_reboot_required': settings_results['is_reboot_required'],
+ 'is_commit_required': settings_results['is_commit_required']}
+ return controller
+
+
def _commit_to_controllers(node, controllers, substep="completed"):
"""Commit changes to RAID controllers on the node.
@@ -918,8 +1017,17 @@ def _commit_to_controllers(node, controllers, substep="completed"):
driver_internal_info['raid_config_job_ids'] = []
optional = drac_constants.RebootRequired.optional
- all_realtime = all(cntlr['is_reboot_required'] == optional
- for cntlr in controllers)
+
+ # all realtime controllers
+ all_realtime = all(
+ (cntlr['is_reboot_required'] == optional)
+ and not(cntlr.get('is_ehba_mode'))
+ for cntlr in controllers)
+
+ # check any controller with ehba mode
+ any_ehba_controllers = any(
+ cntrl.get('is_ehba_mode') is True for cntrl in controllers)
+
raid_config_job_ids = []
raid_config_parameters = []
if all_realtime:
@@ -931,6 +1039,35 @@ def _commit_to_controllers(node, controllers, substep="completed"):
raid_config_job_ids=raid_config_job_ids,
raid_config_parameters=raid_config_parameters)
+ elif any_ehba_controllers:
+ commit_to_ehba_controllers = []
+ for controller in controllers:
+ if controller.get('is_ehba_mode'):
+ job_details = _create_config_job(
+ node, controller=controller['raid_controller'],
+ reboot=False, realtime=True,
+ raid_config_job_ids=raid_config_job_ids,
+ raid_config_parameters=raid_config_parameters)
+
+ ehba_controller = _switch_to_raid_mode(
+ node, controller['raid_controller'])
+ commit_to_ehba_controllers.append(
+ ehba_controller['raid_controller'])
+ else:
+ job_details = _create_config_job(
+ node, controller=controller['raid_controller'],
+ reboot=False, realtime=False,
+ raid_config_job_ids=raid_config_job_ids,
+ raid_config_parameters=raid_config_parameters)
+
+ for controller in commit_to_ehba_controllers:
+ LOG.debug("Create job with Reboot to apply configuration "
+ "changes for ehba controllers")
+ job_details = _create_config_job(
+ node, controller=controller,
+ reboot=(controller == commit_to_ehba_controllers[-1]),
+ realtime=False, raid_config_job_ids=raid_config_job_ids,
+ raid_config_parameters=raid_config_parameters)
else:
for controller in controllers:
mix_controller = controller['raid_controller']
@@ -996,6 +1133,23 @@ def _create_virtual_disks(task, node):
return _commit_to_controllers(node, controllers)
+def _controller_in_hba_mode(raid_settings, controller_fqdd):
+ controller_mode = raid_settings.get(
+ '{}:{}'.format(controller_fqdd, _CURRENT_RAID_CONTROLLER_MODE))
+
+ return _EHBA_MODE in controller_mode.current_value
+
+
+def _controller_supports_ehba_mode(settings, controller_fqdd):
+ raid_cntrl_attr = "{}:{}".format(controller_fqdd,
+ _CURRENT_RAID_CONTROLLER_MODE)
+ current_cntrl_mode = settings.get(raid_cntrl_attr)
+ if not current_cntrl_mode:
+ return False
+ else:
+ return _EHBA_MODE in current_cntrl_mode.possible_values
+
+
def _get_disk_free_size_mb(disk, pending_delete):
"""Return the size of free space on the disk in MB.
@@ -1017,7 +1171,7 @@ class DracWSManRAID(base.RAIDInterface):
def apply_configuration(self, task, raid_config, create_root_volume=True,
create_nonroot_volumes=False,
delete_existing=True):
- return super(DracRAID, self).apply_configuration(
+ return super(DracWSManRAID, self).apply_configuration(
task, raid_config, create_root_volume=create_root_volume,
create_nonroot_volumes=create_nonroot_volumes,
delete_existing=delete_existing)
@@ -1363,9 +1517,15 @@ class DracWSManRAID(base.RAIDInterface):
node = task.node
controllers = list()
drac_raid_controllers = list_raid_controllers(node)
+ drac_raid_settings = list_raid_settings(node)
for cntrl in drac_raid_controllers:
if _is_raid_controller(node, cntrl.id, drac_raid_controllers):
controller = dict()
+ if _controller_supports_ehba_mode(
+ drac_raid_settings,
+ cntrl.id) and _controller_in_hba_mode(
+ drac_raid_settings, cntrl.id):
+ controller['is_ehba_mode'] = True
controller_cap = _reset_raid_config(node, cntrl.id)
controller["raid_controller"] = cntrl.id
controller["is_reboot_required"] = controller_cap[
diff --git a/ironic/drivers/modules/ibmc/management.py b/ironic/drivers/modules/ibmc/management.py
index 3596b391b..672501a7b 100644
--- a/ironic/drivers/modules/ibmc/management.py
+++ b/ironic/drivers/modules/ibmc/management.py
@@ -34,8 +34,6 @@ LOG = log.getLogger(__name__)
class IBMCManagement(base.ManagementInterface):
- supported = False
-
def __init__(self):
"""Initialize the iBMC management interface
diff --git a/ironic/drivers/modules/ibmc/power.py b/ironic/drivers/modules/ibmc/power.py
index 0750ffa41..6bb15ee22 100644
--- a/ironic/drivers/modules/ibmc/power.py
+++ b/ironic/drivers/modules/ibmc/power.py
@@ -40,8 +40,6 @@ EXPECT_POWER_STATE_MAP = {
class IBMCPower(base.PowerInterface):
- supported = False
-
def __init__(self):
"""Initialize the iBMC power interface.
diff --git a/ironic/drivers/modules/ibmc/raid.py b/ironic/drivers/modules/ibmc/raid.py
new file mode 100644
index 000000000..886329ab9
--- /dev/null
+++ b/ironic/drivers/modules/ibmc/raid.py
@@ -0,0 +1,199 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+iBMC RAID configuration specific methods
+"""
+
+from ironic_lib import metrics_utils
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+from ironic.common.i18n import _
+from ironic.common import raid
+from ironic import conf
+from ironic.drivers import base
+from ironic.drivers.modules.ibmc import utils
+
+constants = importutils.try_import('ibmc_client.constants')
+ibmc_client = importutils.try_import('ibmc_client')
+ibmc_error = importutils.try_import('ibmc_client.exceptions')
+
+CONF = conf.CONF
+LOG = logging.getLogger(__name__)
+METRICS = metrics_utils.get_metrics_logger(__name__)
+
+
+class IbmcRAID(base.RAIDInterface):
+ """Implementation of RAIDInterface for iBMC."""
+
+ RAID_APPLY_CONFIGURATION_ARGSINFO = {
+ "raid_config": {
+ "description": "The RAID configuration to apply.",
+ "required": True,
+ },
+ "create_root_volume": {
+ "description": (
+ "Setting this to 'False' indicates not to create root "
+ "volume that is specified in 'raid_config'. Default "
+ "value is 'True'."
+ ),
+ "required": False,
+ },
+ "create_nonroot_volumes": {
+ "description": (
+ "Setting this to 'False' indicates not to create "
+ "non-root volumes (all except the root volume) in "
+ "'raid_config'. Default value is 'True'."
+ ),
+ "required": False,
+ },
+ "delete_existing": {
+ "description": (
+ "Setting this to 'True' indicates to delete existing RAID "
+ "configuration prior to creating the new configuration. "
+ "Default value is 'True'."
+ ),
+ "required": False,
+ }
+ }
+
+ def get_properties(self):
+ """Return the properties of the interface.
+
+ :returns: dictionary of <property name>:<property description> entries.
+ """
+ return utils.COMMON_PROPERTIES.copy()
+
+ @utils.handle_ibmc_exception('delete iBMC RAID configuration')
+ def _delete_raid_configuration(self, task):
+ """Delete the RAID configuration through `python-ibmcclient` lib.
+
+ :param task: a TaskManager instance containing the node to act on.
+ """
+ ibmc = utils.parse_driver_info(task.node)
+ with ibmc_client.connect(**ibmc) as conn:
+ # NOTE(qianbiao.ng): To reduce review workload, we should keep all
+ # delete logic in python-ibmcclient. And delete raid configuration
+ # logic should be synchronized. if async required, do it in
+ # python-ibmcclient.
+ conn.system.storage.delete_all_raid_configuration()
+
+ @utils.handle_ibmc_exception('create iBMC RAID configuration')
+ def _create_raid_configuration(self, task, logical_disks):
+ """Create the RAID configuration through `python-ibmcclient` lib.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param logical_disks: a list of JSON dictionaries which represents
+ the logical disks to be created. The JSON dictionary should match
+ the (ironic.drivers.raid_config_schema.json) scheme.
+ """
+ ibmc = utils.parse_driver_info(task.node)
+ with ibmc_client.connect(**ibmc) as conn:
+ # NOTE(qianbiao.ng): To reduce review workload, we should keep all
+ # apply logic in python-ibmcclient. And apply raid configuration
+ # logic should be synchronized. if async required, do it in
+ # python-ibmcclient.
+ conn.system.storage.apply_raid_configuration(logical_disks)
+
+ @base.deploy_step(priority=0,
+ argsinfo=RAID_APPLY_CONFIGURATION_ARGSINFO)
+ def apply_configuration(self, task, raid_config, create_root_volume=True,
+ create_nonroot_volumes=False):
+ return super(IbmcRAID, self).apply_configuration(
+ task, raid_config, create_root_volume=create_root_volume,
+ create_nonroot_volumes=create_nonroot_volumes)
+
+ @METRICS.timer('IbmcRAID.create_configuration')
+ @base.clean_step(priority=0, abortable=False, argsinfo={
+ 'create_root_volume': {
+ 'description': ('This specifies whether to create the root '
+ 'volume. Defaults to `True`.'),
+ 'required': False
+ },
+ 'create_nonroot_volumes': {
+ 'description': ('This specifies whether to create the non-root '
+ 'volumes. Defaults to `True`.'),
+ 'required': False
+ },
+ "delete_existing": {
+ "description": ("Setting this to 'True' indicates to delete "
+ "existing RAID configuration prior to creating "
+ "the new configuration. "
+ "Default value is 'False'."),
+ "required": False,
+ }
+ })
+ def create_configuration(self, task, create_root_volume=True,
+ create_nonroot_volumes=True,
+ delete_existing=False):
+ """Create a RAID configuration.
+
+ This method creates a RAID configuration on the given node.
+
+ :param task: a TaskManager instance.
+ :param create_root_volume: If True, a root volume is created
+ during RAID configuration. Otherwise, no root volume is
+ created. Default is True.
+ :param create_nonroot_volumes: If True, non-root volumes are
+ created. If False, no non-root volumes are created. Default
+ is True.
+ :param delete_existing: Setting this to True indicates to delete RAID
+ configuration prior to creating the new configuration. Default is
+ False.
+ :raises: MissingParameterValue, if node.target_raid_config is missing
+ or empty after skipping root volume and/or non-root volumes.
+ :raises: IBMCError, on failure to execute step.
+ """
+ node = task.node
+ raid_config = raid.filter_target_raid_config(
+ node, create_root_volume=create_root_volume,
+ create_nonroot_volumes=create_nonroot_volumes)
+ LOG.info(_("Invoke RAID create_configuration step for node %s(uuid). "
+ "Current provision state is: %(status)s. "
+ "Target RAID configuration is: %(config)s."),
+ {'uuid': node.uuid, 'status': node.provision_state,
+ 'target': raid_config})
+
+ # cache current raid config to node's driver_internal_info
+ node.driver_internal_info['raid_config'] = raid_config
+ node.save()
+
+ # delete exist volumes if necessary
+ if delete_existing:
+ self._delete_raid_configuration(task)
+
+ # create raid configuration
+ logical_disks = raid_config.get('logical_disks', [])
+ self._create_raid_configuration(task, logical_disks)
+ LOG.info(_("Succeed to create raid configuration on node %s."),
+ task.node.uuid)
+
+ @METRICS.timer('IbmcRAID.delete_configuration')
+ @base.clean_step(priority=0, abortable=False)
+ @base.deploy_step(priority=0)
+ def delete_configuration(self, task):
+ """Delete the RAID configuration.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :returns: states.CLEANWAIT if cleaning operation in progress
+ asynchronously or states.DEPLOYWAIT if deploy operation in
+ progress synchronously or None if it is completed.
+ :raises: IBMCError, on failure to execute step.
+ """
+ node = task.node
+ LOG.info("Invoke RAID delete_configuration step for node %s(uuid). "
+ "Current provision state is: %(status)s. ",
+ {'uuid': node.uuid, 'status': node.provision_state})
+ self._delete_raid_configuration(task)
+ LOG.info(_("Succeed to delete raid configuration on node %s."),
+ task.node.uuid)
diff --git a/ironic/drivers/modules/ibmc/utils.py b/ironic/drivers/modules/ibmc/utils.py
index 0819d7874..78bf25cb3 100644
--- a/ironic/drivers/modules/ibmc/utils.py
+++ b/ironic/drivers/modules/ibmc/utils.py
@@ -152,7 +152,7 @@ def handle_ibmc_exception(action):
try:
return f(*args, **kwargs)
- except ibmc_error.ConnectionError as e:
+ except ibmc_error.IBMCConnectionError as e:
error = (_('Failed to connect to iBMC for node %(node)s, '
'Error: %(error)s')
% {'node': node.uuid, 'error': e})
diff --git a/ironic/drivers/modules/ibmc/vendor.py b/ironic/drivers/modules/ibmc/vendor.py
index 2d4f74a28..00344cd3b 100644
--- a/ironic/drivers/modules/ibmc/vendor.py
+++ b/ironic/drivers/modules/ibmc/vendor.py
@@ -29,8 +29,6 @@ LOG = log.getLogger(__name__)
class IBMCVendor(base.VendorInterface):
- supported = False
-
def __init__(self):
"""Initialize the iBMC vendor interface.
@@ -87,3 +85,24 @@ class IBMCVendor(base.VendorInterface):
system = conn.system.get()
boot_sequence = system.boot_sequence
return {'boot_up_sequence': boot_sequence}
+
+ @base.passthru(['GET'], async_call=False,
+ description=_('Returns a list of dictionary, every '
+ 'dictionary represents a RAID controller '
+ 'summary info'))
+ @utils.handle_ibmc_exception('get iBMC RAID controller summary')
+ def get_raid_controller_list(self, task, **kwargs):
+ """List RAID controllers summary info of the node.
+
+ :param task: A TaskManager instance containing the node to act on.
+ :param kwargs: Not used.
+ :raises: IBMCConnectionError when it fails to connect to iBMC
+ :raises: IBMCError when iBMC responses an error information
+ :returns: A list of dictionaries, every dictionary represents a RAID
+ controller summary of node.
+ """
+ driver_info = utils.parse_driver_info(task.node)
+ with ibmc_client.connect(**driver_info) as conn:
+ controllers = conn.system.storage.list()
+ summaries = [ctrl.summary() for ctrl in controllers]
+ return summaries
diff --git a/ironic/drivers/modules/ilo/common.py b/ironic/drivers/modules/ilo/common.py
index 62c6bbb23..ef330c5c2 100644
--- a/ironic/drivers/modules/ilo/common.py
+++ b/ironic/drivers/modules/ilo/common.py
@@ -116,6 +116,15 @@ POST_INPOSTDISCOVERY_STATE = "InPostDiscoveryComplete"
POST_FINISHEDPOST_STATE = "FinishedPost"
""" Node is in FinishedPost post state."""
+SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY = 'legacy bios only'
+""" Node supports only legacy BIOS boot mode."""
+
+SUPPORTED_BOOT_MODE_UEFI_ONLY = 'uefi only'
+""" Node supports only UEFI boot mode."""
+
+SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI = 'legacy bios and uefi'
+""" Node supports both legacy BIOS and UEFI boot mode."""
+
def copy_image_to_web_server(source_file_path, destination):
"""Copies the given image to the http web server.
@@ -492,6 +501,24 @@ def set_boot_mode(node, boot_mode):
{'uuid': node.uuid, 'boot_mode': boot_mode})
+def get_current_boot_mode(node):
+ """Get the current boot mode for a node.
+
+ :param node: an ironic node object.
+ :raises: IloOperationError if failed to fetch boot mode.
+ :raises: IloOperationNotSupported if node does not support getting pending
+ boot mode.
+ """
+ ilo_object = get_ilo_object(node)
+ operation = _("Get current boot mode")
+ try:
+ c_boot_mode = ilo_object.get_current_boot_mode()
+ return BOOT_MODE_ILO_TO_GENERIC[c_boot_mode.lower()]
+ except ilo_error.IloError as ilo_exception:
+ raise exception.IloOperationError(operation=operation,
+ error=ilo_exception)
+
+
def update_boot_mode(task):
"""Update instance_info with boot mode to be used for deploy.
diff --git a/ironic/drivers/modules/ilo/management.py b/ironic/drivers/modules/ilo/management.py
index 07cbe7b41..75b5934ea 100644
--- a/ironic/drivers/modules/ilo/management.py
+++ b/ironic/drivers/modules/ilo/management.py
@@ -24,6 +24,7 @@ from oslo_utils import excutils
from oslo_utils import importutils
from ironic.common import boot_devices
+from ironic.common import boot_modes
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
@@ -523,12 +524,43 @@ class IloManagement(base.ManagementInterface):
@base.clean_step(priority=0, abortable=False,
argsinfo=_FIRMWARE_UPDATE_SUM_ARGSINFO)
def update_firmware_sum(self, task, **kwargs):
- """Updates the firmware using Smart Update Manager (SUM).
+ """Clean step to update the firmware using Smart Update Manager (SUM)
:param task: a TaskManager object.
:raises: NodeCleaningFailure, on failure to execute of clean step.
+ :returns: states.CLEANWAIT to signify the step will be completed async
+ """
+ return self._do_update_firmware_sum(task, **kwargs)
+
+ @METRICS.timer('IloManagement.update_firmware_sum')
+ @base.deploy_step(priority=0, argsinfo=_FIRMWARE_UPDATE_SUM_ARGSINFO)
+ def flash_firmware_sum(self, task, **kwargs):
+ """Deploy step to Update the firmware using Smart Update Manager (SUM).
+
+ :param task: a TaskManager object.
+ :raises: InstanceDeployFailure, on failure to execute of deploy step.
+ :returns: states.DEPLOYWAIT to signify the step will be completed
+ async
+ """
+ return self._do_update_firmware_sum(task, **kwargs)
+
+ def _do_update_firmware_sum(self, task, **kwargs):
+ """Update the firmware using Smart Update Manager (SUM).
+
+ :param task: a TaskManager object.
+ :raises: NodeCleaningFailure or InstanceDeployFailure, on failure to
+ execute of clean or deploy step respectively.
+ :returns: states.CLEANWAIT or states.DEPLOYWAIT to signify the step
+ will be completed async for clean or deploy step respectively.
"""
node = task.node
+ if node.provision_state == states.DEPLOYING:
+ step = node.deploy_step
+ step_type = 'deploy'
+ else:
+ step = node.clean_step
+ step_type = 'clean'
+
# The arguments are validated and sent to the ProliantHardwareManager
# to perform SUM based firmware update clean step.
firmware_processor.get_and_validate_firmware_image_info(kwargs,
@@ -537,24 +569,25 @@ class IloManagement(base.ManagementInterface):
url = kwargs['url']
if urlparse.urlparse(url).scheme == 'swift':
url = firmware_processor.get_swift_url(urlparse.urlparse(url))
- node.clean_step['args']['url'] = url
+ step['args']['url'] = url
# Insert SPP ISO into virtual media CDROM
ilo_common.attach_vmedia(node, 'CDROM', url)
- step = node.clean_step
- return agent_base.execute_clean_step(task, step)
+ return agent_base.execute_step(task, step, step_type)
@staticmethod
+ @agent_base.post_deploy_step_hook(
+ interface='management', step='flash_firmware_sum')
@agent_base.post_clean_step_hook(
interface='management', step='update_firmware_sum')
def _update_firmware_sum_final(task, command):
- """Clean step hook after SUM based firmware update operation.
+ """Deploy/Clean step hook after SUM based firmware update operation.
- This method is invoked as a post clean step hook by the Ironic
- conductor once firmware update operaion is completed. The clean logs
- are collected and stored according to the configured storage backend
- when the node is configured to collect the logs.
+ This method is invoked as a post deploy/clean step hook by the Ironic
+ conductor once firmware update operaion is completed. The deploy/clean
+ logs are collected and stored according to the configured storage
+ backend when the node is configured to collect the logs.
:param task: a TaskManager instance.
:param command: A command result structure of the SUM based firmware
@@ -564,12 +597,16 @@ class IloManagement(base.ManagementInterface):
if not _should_collect_logs(command):
return
+ if task.node.provision_state == states.DEPLOYWAIT:
+ log_data = command['command_result']['deploy_result']['Log Data']
+ label = command['command_result']['deploy_step']['step']
+ else:
+ log_data = command['command_result']['clean_result']['Log Data']
+ label = command['command_result']['clean_step']['step']
+
node = task.node
try:
- driver_utils.store_ramdisk_logs(
- node,
- command['command_result']['clean_result']['Log Data'],
- label='update_firmware_sum')
+ driver_utils.store_ramdisk_logs(node, log_data, label=label)
except exception.SwiftOperationError as e:
LOG.error('Failed to store the logs from the node %(node)s '
'for "update_firmware_sum" clean step in Swift. '
@@ -684,6 +721,59 @@ class IloManagement(base.ManagementInterface):
raise exception.IloOperationError(operation=operation,
error=ilo_exception)
+ def get_supported_boot_modes(self, task):
+ """Get a list of the supported boot devices.
+
+ :param task: a task from TaskManager.
+ :raises: IloOperationError if any exception happens in proliantutils
+ :returns: A list with the supported boot devices defined
+ in :mod:`ironic.common.boot_devices`.
+ """
+ node = task.node
+ ilo_object = ilo_common.get_ilo_object(node)
+ try:
+ modes = ilo_object.get_supported_boot_mode()
+ if modes == ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY:
+ return [boot_modes.LEGACY_BIOS]
+ elif modes == ilo_common.SUPPORTED_BOOT_MODE_UEFI_ONLY:
+ return [boot_modes.UEFI]
+ elif modes == ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI:
+ return [boot_modes.UEFI, boot_modes.LEGACY_BIOS]
+ except ilo_error.IloError as ilo_exception:
+ operation = _("Get supported boot modes")
+ raise exception.IloOperationError(operation=operation,
+ error=ilo_exception)
+
+ @task_manager.require_exclusive_lock
+ def set_boot_mode(self, task, mode):
+ """Set the boot mode for a node.
+
+ Set the boot mode to use on next reboot of the node.
+
+ :param task: A task from TaskManager.
+ :param mode: The boot mode, one of
+ :mod:`ironic.common.boot_modes`.
+ :raises: InvalidParameterValue if an invalid boot mode is
+ specified.
+ :raises: IloOperationError if setting boot mode failed.
+ """
+ if mode not in self.get_supported_boot_modes(task):
+ raise exception.InvalidParameterValue(_(
+ "The given boot mode '%s' is not supported.") % mode)
+ ilo_common.set_boot_mode(task.node, mode)
+
+ def get_boot_mode(self, task):
+ """Get the current boot mode for a node.
+
+ Provides the current boot mode of the node.
+
+ :param task: A task from TaskManager.
+ :raises: IloOperationError on an error from IloClient library.
+ :returns: The boot mode, one of :mod:`ironic.common.boot_mode` or
+ None if it is unknown.
+ """
+ return ilo_common.get_current_boot_mode(task.node)
+
class Ilo5Management(IloManagement):
diff --git a/ironic/drivers/modules/image_cache.py b/ironic/drivers/modules/image_cache.py
index b7ca62fc3..78b5e02b4 100644
--- a/ironic/drivers/modules/image_cache.py
+++ b/ironic/drivers/modules/image_cache.py
@@ -313,9 +313,10 @@ def _fetch(context, image_href, path, force_raw=False):
# Notes(yjiang5): If glance can provide the virtual size information,
# then we can firstly clean cache and then invoke images.fetch().
if force_raw:
- required_space = images.converted_size(path_tmp)
- directory = os.path.dirname(path_tmp)
- _clean_up_caches(directory, required_space)
+ if images.force_raw_will_convert(image_href, path_tmp):
+ required_space = images.converted_size(path_tmp)
+ directory = os.path.dirname(path_tmp)
+ _clean_up_caches(directory, required_space)
images.image_to_raw(image_href, path, path_tmp)
else:
os.rename(path_tmp, path)
diff --git a/ironic/drivers/modules/inspector.py b/ironic/drivers/modules/inspector.py
index be0a5e72b..0da29c63c 100644
--- a/ironic/drivers/modules/inspector.py
+++ b/ironic/drivers/modules/inspector.py
@@ -33,6 +33,7 @@ from ironic.conductor import task_manager
from ironic.conductor import utils as cond_utils
from ironic.conf import CONF
from ironic.drivers import base
+from ironic.drivers.modules import deploy_utils
LOG = logging.getLogger(__name__)
@@ -45,7 +46,7 @@ _IRONIC_MANAGES_BOOT = 'inspector_manage_boot'
def _get_inspector_session(**kwargs):
global _INSPECTOR_SESSION
if not _INSPECTOR_SESSION:
- if CONF.auth_strategy == 'noauth':
+ if CONF.auth_strategy != 'keystone':
# NOTE(dtantsur): using set_default instead of set_override because
# the native keystoneauth option must have priority.
CONF.set_default('auth_type', 'none', group='inspector')
@@ -194,6 +195,8 @@ def _start_managed_inspection(task):
endpoint = _get_callback_endpoint(client)
params = dict(_parse_kernel_params(),
**{'ipa-inspection-callback-url': endpoint})
+ if CONF.deploy.fast_track:
+ params['ipa-api-url'] = deploy_utils.get_ironic_api_url()
cond_utils.node_power_action(task, states.POWER_OFF)
with cond_utils.power_state_for_network_configuration(task):
diff --git a/ironic/drivers/modules/ipmitool.py b/ironic/drivers/modules/ipmitool.py
index 5747d156a..a3b443b58 100644
--- a/ironic/drivers/modules/ipmitool.py
+++ b/ironic/drivers/modules/ipmitool.py
@@ -30,13 +30,13 @@ DRIVER.
"""
import contextlib
-import functools
import os
import re
import subprocess
import tempfile
import time
+from eventlet.green import subprocess as green_subprocess
from ironic_lib import metrics_utils
from ironic_lib import utils as ironic_utils
from oslo_concurrency import processutils
@@ -389,31 +389,6 @@ def _parse_driver_info(node):
}
-def _exec_ipmitool_wait(timeout, driver_info, popen_obj):
- wait_interval = min(timeout, 0.5)
-
- while timeout >= 0:
- if not popen_obj.poll():
- return
-
- time.sleep(wait_interval)
- timeout -= wait_interval
-
- LOG.warning('Killing timed out IPMI process "%(cmd)s" for node %(node)s.',
- {'node': driver_info['uuid'], 'cmd': popen_obj.cmd})
-
- popen_obj.terminate()
- time.sleep(0.5)
- if popen_obj.poll():
- popen_obj.kill()
-
- time.sleep(1)
-
- if popen_obj.poll():
- LOG.warning('Could not kill IPMI process "%(cmd)s" for node %(node)s.',
- {'node': driver_info['uuid'], 'cmd': popen_obj.cmd})
-
-
def _get_ipmitool_args(driver_info, pw_file=None):
ipmi_version = ('lanplus'
if driver_info['protocol_version'] == '2.0'
@@ -480,7 +455,10 @@ def _exec_ipmitool(driver_info, command, check_exit_code=None,
if _is_option_supported('timing'):
args.append('-R')
- args.append(str(num_tries))
+ if CONF.ipmi.use_ipmitool_retries:
+ args.append(str(num_tries))
+ else:
+ args.append('1')
args.append('-N')
args.append(str(CONF.ipmi.min_command_interval))
@@ -488,14 +466,7 @@ def _exec_ipmitool(driver_info, command, check_exit_code=None,
extra_args = {}
if kill_on_timeout:
- # NOTE(etingof): We can't trust ipmitool to terminate in time.
- # Therefore we have to kill it if it is running for longer than
- # we asked it to.
- # For that purpose we inject the time-capped `popen.wait` call
- # before the uncapped `popen.communicate` is called internally.
- # That gives us a chance to kill misbehaving `ipmitool` child.
- extra_args['on_execute'] = functools.partial(
- _exec_ipmitool_wait, timeout, driver_info)
+ extra_args['timeout'] = timeout
if check_exit_code is not None:
extra_args['check_exit_code'] = check_exit_code
@@ -530,9 +501,12 @@ def _exec_ipmitool(driver_info, command, check_exit_code=None,
IPMITOOL_RETRYABLE_FAILURES
+ CONF.ipmi.additional_retryable_ipmi_errors)
if x in str(e)]
+ # If Ironic is doing retries then retry all errors
+ retry_failures = (err_list
+ or not CONF.ipmi.use_ipmitool_retries)
if ((time.time() > end_time)
or (num_tries == 0)
- or not err_list):
+ or not retry_failures):
LOG.error('IPMI Error while attempting "%(cmd)s" '
'for node %(node)s. Error: %(error)s',
{'node': driver_info['uuid'],
@@ -582,7 +556,10 @@ def _set_and_wait(task, power_action, driver_info, timeout=None):
try:
_exec_ipmitool(driver_info, cmd)
except (exception.PasswordFileFailedToCreate,
- processutils.ProcessExecutionError) as e:
+ processutils.ProcessExecutionError,
+ subprocess.TimeoutExpired,
+ # https://github.com/eventlet/eventlet/issues/624
+ green_subprocess.TimeoutExpired) as e:
LOG.warning("IPMI power action %(cmd)s failed for node %(node_id)s "
"with error: %(error)s.",
{'node_id': driver_info['uuid'], 'cmd': cmd, 'error': e})
@@ -830,10 +807,10 @@ def _constructor_checks(driver):
_check_temp_dir()
-def _allocate_port(task):
+def _allocate_port(task, host=None):
node = task.node
dii = node.driver_internal_info or {}
- allocated_port = console_utils.acquire_port()
+ allocated_port = console_utils.acquire_port(host=host)
dii['allocated_ipmi_terminal_port'] = allocated_port
node.driver_internal_info = dii
node.save()
@@ -1434,7 +1411,8 @@ class IPMISocatConsole(IPMIConsole):
"""
driver_info = _parse_driver_info(task.node)
if not driver_info['port']:
- driver_info['port'] = _allocate_port(task)
+ driver_info['port'] = _allocate_port(
+ task, host=CONF.console.socat_address)
try:
self._exec_stop_console(driver_info)
diff --git a/ironic/drivers/modules/ipxe_config.template b/ironic/drivers/modules/ipxe_config.template
index 84b01d104..f8bfc19e9 100644
--- a/ironic/drivers/modules/ipxe_config.template
+++ b/ironic/drivers/modules/ipxe_config.template
@@ -33,9 +33,14 @@ boot
:boot_ramdisk
imgfree
+{%- if pxe_options.boot_iso_url %}
+sanboot {{ pxe_options.boot_iso_url }}
+{%- else %}
kernel {% if pxe_options.ipxe_timeout > 0 %}--timeout {{ pxe_options.ipxe_timeout }} {% endif %}{{ pxe_options.aki_path }} root=/dev/ram0 text {{ pxe_options.pxe_append_params|default("", true) }} {{ pxe_options.ramdisk_opts|default('', true) }} initrd=ramdisk || goto boot_ramdisk
initrd {% if pxe_options.ipxe_timeout > 0 %}--timeout {{ pxe_options.ipxe_timeout }} {% endif %}{{ pxe_options.ari_path }} || goto boot_ramdisk
boot
+{%- endif %}
+
{%- if pxe_options.boot_from_volume %}
:boot_iscsi
diff --git a/ironic/drivers/modules/irmc/boot.py b/ironic/drivers/modules/irmc/boot.py
index e23ee4961..f36df670a 100644
--- a/ironic/drivers/modules/irmc/boot.py
+++ b/ironic/drivers/modules/irmc/boot.py
@@ -87,7 +87,7 @@ COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
def _is_image_href_ordinary_file_name(image_href):
- """Check if image_href is a ordinary file name.
+ """Check if image_href is an ordinary file name.
This method judges if image_href is an ordinary file name or not,
which is a file supposed to be stored in share file system.
diff --git a/ironic/drivers/modules/iscsi_deploy.py b/ironic/drivers/modules/iscsi_deploy.py
index c76d9e154..8a4843136 100644
--- a/ironic/drivers/modules/iscsi_deploy.py
+++ b/ironic/drivers/modules/iscsi_deploy.py
@@ -25,7 +25,6 @@ from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
-from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
@@ -283,6 +282,8 @@ def deploy_partition_image(
NOTE: If key exists but value is None, it means partition doesn't
exist.
"""
+ # NOTE(dtantsur): CONF.default_boot_option is mutable, don't use it in
+ # the function signature!
boot_option = boot_option or deploy_utils.get_default_boot_option()
image_mb = disk_utils.get_image_mb(image_path)
if image_mb > root_mb:
@@ -597,40 +598,12 @@ def validate(task):
deploy_utils.parse_instance_info(task.node)
-class AgentDeployMixin(agent_base.AgentDeployMixin):
-
- @METRICS.timer('AgentDeployMixin.continue_deploy')
- @task_manager.require_exclusive_lock
- def continue_deploy(self, task):
- """Method invoked when deployed using iSCSI.
-
- This method is invoked during a heartbeat from an agent when
- the node is in wait-call-back state. This deploys the image on
- the node and then configures the node to boot according to the
- desired boot option (netboot or localboot).
-
- :param task: a TaskManager object containing the node.
- :param kwargs: the kwargs passed from the heartbeat method.
- :raises: InstanceDeployFailure, if it encounters some error during
- the deploy.
- """
- task.process_event('resume')
- node = task.node
- LOG.debug('Continuing the deployment on node %s', node.uuid)
-
- uuid_dict_returned = do_agent_iscsi_deploy(task, self._client)
- root_uuid = uuid_dict_returned.get('root uuid')
- efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid')
- prep_boot_part_uuid = uuid_dict_returned.get(
- 'PrEP Boot partition uuid')
- self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid,
- prep_boot_part_uuid=prep_boot_part_uuid)
- self.reboot_and_finish_deploy(task)
-
-
-class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
+class ISCSIDeploy(agent_base.AgentDeployMixin, agent_base.AgentBaseMixin,
+ base.DeployInterface):
"""iSCSI Deploy Interface for deploy-related actions."""
+ has_decomposed_deploy_steps = True
+
def get_properties(self):
return agent_base.VENDOR_PROPERTIES
@@ -676,14 +649,12 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
"""
node = task.node
if manager_utils.is_fast_track(task):
+ # NOTE(mgoddard): For fast track we can mostly skip this step and
+ # proceed to the next step (i.e. write_image).
LOG.debug('Performing a fast track deployment for %(node)s.',
{'node': task.node.uuid})
deploy_utils.cache_instance_image(task.context, node)
check_image_size(task)
- # Update the database for the API and the task tracking resumes
- # the state machine state going from DEPLOYWAIT -> DEPLOYING
- task.process_event('wait')
- self.continue_deploy(task)
elif task.driver.storage.should_write_image(task):
# Standard deploy process
deploy_utils.cache_instance_image(task.context, node)
@@ -695,52 +666,61 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
manager_utils.node_power_action(task, states.REBOOT)
info = task.node.driver_internal_info
info.pop('deployment_reboot', None)
+ info.pop('deployment_uuids', None)
task.node.driver_internal_info = info
task.node.save()
return states.DEPLOYWAIT
- else:
- # Boot to an Storage Volume
-
- # TODO(TheJulia): At some point, we should de-dupe this code
- # as it is nearly identical to the agent deploy interface.
- # This is not being done now as it is expected to be
- # refactored in the near future.
- manager_utils.node_power_action(task, states.POWER_OFF)
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.remove_provisioning_network(task)
- task.driver.network.configure_tenant_networks(task)
- task.driver.boot.prepare_instance(task)
- manager_utils.node_power_action(task, states.POWER_ON)
- return None
-
- @METRICS.timer('ISCSIDeploy.tear_down')
+ @METRICS.timer('ISCSIDeploy.write_image')
+ @base.deploy_step(priority=80)
@task_manager.require_exclusive_lock
- def tear_down(self, task):
- """Tear down a previous deployment on the task's node.
+ def write_image(self, task):
+ """Method invoked when deployed using iSCSI.
- Power off the node. All actual clean-up is done in the clean_up()
- method which should be called separately.
+ This method is invoked during a heartbeat from an agent when
+ the node is in wait-call-back state. This deploys the image on
+ the node and then configures the node to boot according to the
+ desired boot option (netboot or localboot).
- :param task: a TaskManager instance containing the node to act on.
- :returns: deploy state DELETED.
- :raises: NetworkError if the cleaning ports cannot be removed.
- :raises: InvalidParameterValue when the wrong state is specified
- or the wrong driver info is specified.
- :raises: StorageError when volume detachment fails.
- :raises: other exceptions by the node's power driver if something
- wrong occurred during the power action.
+ :param task: a TaskManager object containing the node.
+ :param kwargs: the kwargs passed from the heartbeat method.
+ :raises: InstanceDeployFailure, if it encounters some error during
+ the deploy.
"""
- manager_utils.node_power_action(task, states.POWER_OFF)
- task.driver.storage.detach_volumes(task)
- deploy_utils.tear_down_storage_configuration(task)
- with manager_utils.power_state_for_network_configuration(task):
- task.driver.network.unconfigure_tenant_networks(task)
- # NOTE(mgoddard): If the deployment was unsuccessful the node may
- # have ports on the provisioning network which were not deleted.
- task.driver.network.remove_provisioning_network(task)
- return states.DELETED
+ if not task.driver.storage.should_write_image(task):
+ LOG.debug('Skipping write_image for node %s', task.node.uuid)
+ return
+
+ node = task.node
+ LOG.debug('Continuing the deployment on node %s', node.uuid)
+
+ uuid_dict_returned = do_agent_iscsi_deploy(task, self._client)
+ utils.set_node_nested_field(node, 'driver_internal_info',
+ 'deployment_uuids', uuid_dict_returned)
+ node.save()
+
+ @METRICS.timer('ISCSIDeploy.prepare_instance_boot')
+ @base.deploy_step(priority=60)
+ def prepare_instance_boot(self, task):
+ if not task.driver.storage.should_write_image(task):
+ task.driver.boot.prepare_instance(task)
+ return
+
+ node = task.node
+ try:
+ uuid_dict_returned = node.driver_internal_info['deployment_uuids']
+ except KeyError:
+ raise exception.InstanceDeployFailure(
+ _('Invalid internal state: the write_image deploy step has '
+ 'not been called before prepare_instance_boot'))
+ root_uuid = uuid_dict_returned.get('root uuid')
+ efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid')
+ prep_boot_part_uuid = uuid_dict_returned.get(
+ 'PrEP Boot partition uuid')
+
+ self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid,
+ prep_boot_part_uuid=prep_boot_part_uuid)
@METRICS.timer('ISCSIDeploy.prepare')
@task_manager.require_exclusive_lock
@@ -815,33 +795,7 @@ class ISCSIDeploy(AgentDeployMixin, base.DeployInterface):
:param task: a TaskManager instance containing the node to act on.
"""
deploy_utils.destroy_images(task.node.uuid)
- task.driver.boot.clean_up_ramdisk(task)
- task.driver.boot.clean_up_instance(task)
- provider = dhcp_factory.DHCPFactory()
- provider.clean_dhcp(task)
-
- def take_over(self, task):
- pass
-
- @METRICS.timer('ISCSIDeploy.prepare_cleaning')
- def prepare_cleaning(self, task):
- """Boot into the agent to prepare for cleaning.
-
- :param task: a TaskManager object containing the node
- :raises NodeCleaningFailure: if the previous cleaning ports cannot
- be removed or if new cleaning ports cannot be created
- :returns: states.CLEANWAIT to signify an asynchronous prepare.
- """
- return deploy_utils.prepare_inband_cleaning(
- task, manage_boot=True)
-
- @METRICS.timer('ISCSIDeploy.tear_down_cleaning')
- def tear_down_cleaning(self, task):
- """Clean up the PXE and DHCP files after cleaning.
-
- :param task: a TaskManager object containing the node
- :raises NodeCleaningFailure: if the cleaning ports cannot be
- removed
- """
- deploy_utils.tear_down_inband_cleaning(
- task, manage_boot=True)
+ super(ISCSIDeploy, self).clean_up(task)
+ if utils.pop_node_nested_field(task.node, 'driver_internal_info',
+ 'deployment_uuids'):
+ task.node.save()
diff --git a/ironic/drivers/modules/network/common.py b/ironic/drivers/modules/network/common.py
index 736249b69..2c3c4be0c 100644
--- a/ironic/drivers/modules/network/common.py
+++ b/ironic/drivers/modules/network/common.py
@@ -83,7 +83,7 @@ def _is_port_physnet_allowed(port, physnets):
or port.physical_network in physnets)
-def _get_free_portgroups_and_ports(task, vif_id, physnets):
+def _get_free_portgroups_and_ports(task, vif_id, physnets, vif_info={}):
"""Get free portgroups and ports.
It only returns ports or portgroups that can be used for attachment of
@@ -95,6 +95,8 @@ def _get_free_portgroups_and_ports(task, vif_id, physnets):
attached. This is governed by the segments of the VIF's network. An
empty set indicates that the ports' physical networks should be
ignored.
+ :param vif_info: dict that may contain extra information, such as
+ port_uuid
:returns: list of free ports and portgroups.
:raises: VifAlreadyAttached, if vif_id is attached to any of the
node's ports or portgroups.
@@ -109,9 +111,18 @@ def _get_free_portgroups_and_ports(task, vif_id, physnets):
# at least one port with vif already attached to it
non_usable_portgroups = set()
+ port_uuid = None
+ portgroup_uuid = None
+ if 'port_uuid' in vif_info:
+ port_uuid = vif_info['port_uuid']
+ elif 'portgroup_uuid' in vif_info:
+ portgroup_uuid = vif_info['portgroup_uuid']
+
for p in task.ports:
+ # If port_uuid is specified in vif_info, check id
# Validate that port has needed information
- if not neutron.validate_port_info(task.node, p):
+ if ((port_uuid and port_uuid != p.uuid)
+ or not neutron.validate_port_info(task.node, p)):
continue
if _vif_attached(p, vif_id):
# Consider such portgroup unusable. The fact that we can have None
@@ -120,27 +131,30 @@ def _get_free_portgroups_and_ports(task, vif_id, physnets):
continue
if not _is_port_physnet_allowed(p, physnets):
continue
- if p.portgroup_id is None:
- # ports without portgroup_id are always considered candidates
+ if p.portgroup_id is None and not portgroup_uuid:
free_port_like_objs.append(p)
else:
ports_by_portgroup[p.portgroup_id].append(p)
- for pg in task.portgroups:
- if _vif_attached(pg, vif_id):
- continue
- if pg.id in non_usable_portgroups:
- # This portgroup has vifs attached to its ports, consider its
- # ports instead to avoid collisions
- free_port_like_objs.extend(ports_by_portgroup[pg.id])
- # Also ignore empty portgroups
- elif ports_by_portgroup[pg.id]:
- free_port_like_objs.append(pg)
+ if not port_uuid:
+ for pg in task.portgroups:
+ # if portgroup_uuid is specified in vif_info, check id
+ if ((portgroup_uuid and portgroup_uuid != pg.uuid)
+ or _vif_attached(pg, vif_id)):
+ continue
+ if pg.id in non_usable_portgroups:
+ # This portgroup has vifs attached to its ports, consider its
+ # ports instead to avoid collisions
+ if not portgroup_uuid:
+ free_port_like_objs.extend(ports_by_portgroup[pg.id])
+ # Also ignore empty portgroups
+ elif ports_by_portgroup[pg.id]:
+ free_port_like_objs.append(pg)
return free_port_like_objs
-def get_free_port_like_object(task, vif_id, physnets):
+def get_free_port_like_object(task, vif_id, physnets, vif_info={}):
"""Find free port-like object (portgroup or port) VIF will be attached to.
Ensures that the VIF is not already attached to this node. When selecting
@@ -160,6 +174,8 @@ def get_free_port_like_object(task, vif_id, physnets):
attached. This is governed by the segments of the VIF's network. An
empty set indicates that the ports' physical networks should be
ignored.
+ :param vif_info: dict that may contain extra information, such as
+ port_uuid
:raises: VifAlreadyAttached, if VIF is already attached to the node.
:raises: NoFreePhysicalPorts, if there is no port-like object VIF can be
attached to.
@@ -167,8 +183,8 @@ def get_free_port_like_object(task, vif_id, physnets):
has ports which are not all assigned the same physical network.
:returns: port-like object VIF will be attached to.
"""
- free_port_like_objs = _get_free_portgroups_and_ports(task, vif_id,
- physnets)
+ free_port_like_objs = _get_free_portgroups_and_ports(
+ task, vif_id, physnets, vif_info)
if not free_port_like_objs:
raise exception.NoFreePhysicalPorts(vif=vif_id)
@@ -393,6 +409,30 @@ class VIFPortIDMixin(object):
or p_obj.internal_info.get('inspection_vif_port_id')
or self._get_vif_id_by_port_like_obj(p_obj) or None)
+ def get_node_network_data(self, task):
+ """Get network configuration data for node's ports/portgroups.
+
+ Gather L2 and L3 network settings from ironic node `network_data`
+ field. Ironic would eventually pass network configuration to the node
+ being managed out-of-band.
+
+ :param task: A TaskManager instance.
+ :raises: InvalidParameterValue, if the network interface configuration
+ is invalid.
+ :raises: MissingParameterValue, if some parameters are missing.
+ :returns: a dict holding network configuration information adhearing
+ Nova network metadata layout (`network_data.json`).
+ """
+ node = task.node
+
+ network_data = node.network_data
+
+ # TODO(etingof): remove or truncate `network_data` logging
+ LOG.debug('Collected network data for node %(node)s: %(data)s',
+ {'node': node.uuid, 'data': network_data})
+
+ return network_data
+
class NeutronVIFPortIDMixin(VIFPortIDMixin):
"""VIF port ID mixin class for neutron network interfaces.
@@ -552,7 +592,8 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
raise exception.VifInvalidForAttach(
node=task.node.uuid, vif=vif_id, reason=reason)
- port_like_obj = get_free_port_like_object(task, vif_id, physnets)
+ port_like_obj = get_free_port_like_object(
+ task, vif_id, physnets, vif_info)
# Address is optional for portgroups
if port_like_obj.address:
@@ -592,3 +633,51 @@ class NeutronVIFPortIDMixin(VIFPortIDMixin):
# DELETING state.
if task.node.provision_state in [states.ACTIVE, states.DELETING]:
neutron.unbind_neutron_port(vif_id, context=task.context)
+
+ def get_node_network_data(self, task):
+ """Get network configuration data for node ports.
+
+ Pull network data from ironic node object if present, otherwise
+ collect it for Neutron VIFs.
+
+ :param task: A TaskManager instance.
+ :raises: InvalidParameterValue, if the network interface configuration
+ is invalid.
+ :raises: MissingParameterValue, if some parameters are missing.
+ :returns: a dict holding network configuration information adhearing
+ Nova network metadata layout (`network_data.json`).
+ """
+ # NOTE(etingof): static network data takes precedence
+ network_data = (
+ super(NeutronVIFPortIDMixin, self).get_node_network_data(task))
+ if network_data:
+ return network_data
+
+ node = task.node
+
+ LOG.debug('Gathering network data from ports of node '
+ '%(node)s', {'node': node.uuid})
+
+ network_data = collections.defaultdict(list)
+
+ for port_obj in task.ports:
+ vif_port_id = self.get_current_vif(task, port_obj)
+
+ LOG.debug('Considering node %(node)s port %(port)s, VIF %(vif)s',
+ {'node': node.uuid, 'port': port_obj.uuid,
+ 'vif': vif_port_id})
+
+ if not vif_port_id:
+ continue
+
+ port_network_data = neutron.get_neutron_port_data(
+ port_obj.uuid, vif_port_id, context=task.context)
+
+ for field, field_data in port_network_data.items():
+ if field_data:
+ network_data[field].extend(field_data)
+
+ LOG.debug('Collected network data for node %(node)s: %(data)s',
+ {'node': node.uuid, 'data': network_data})
+
+ return network_data
diff --git a/ironic/drivers/modules/pxe.py b/ironic/drivers/modules/pxe.py
index ce86dfa8a..3463f3543 100644
--- a/ironic/drivers/modules/pxe.py
+++ b/ironic/drivers/modules/pxe.py
@@ -24,7 +24,7 @@ from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
-from ironic.drivers.modules import agent
+from ironic.drivers.modules import agent_base
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import pxe_base
LOG = logging.getLogger(__name__)
@@ -37,7 +37,10 @@ class PXEBoot(pxe_base.PXEBaseMixin, base.BootInterface):
capabilities = ['ramdisk_boot', 'pxe_boot']
-class PXERamdiskDeploy(agent.AgentDeploy):
+class PXERamdiskDeploy(agent_base.AgentBaseMixin, base.DeployInterface):
+
+ def get_properties(self, task):
+ return {}
def validate(self, task):
if 'ramdisk_boot' not in task.driver.boot.capabilities:
diff --git a/ironic/drivers/modules/pxe_base.py b/ironic/drivers/modules/pxe_base.py
index 290b005a1..1c4ecb598 100644
--- a/ironic/drivers/modules/pxe_base.py
+++ b/ironic/drivers/modules/pxe_base.py
@@ -200,7 +200,10 @@ class PXEBaseMixin(object):
if ramdisk_params.get("ipa-api-url"):
pxe_options["ipa-api-url"] = ramdisk_params["ipa-api-url"]
- pxe_config_template = deploy_utils.get_pxe_config_template(node)
+ if self.ipxe_enabled:
+ pxe_config_template = deploy_utils.get_ipxe_config_template(node)
+ else:
+ pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template,
@@ -241,7 +244,6 @@ class PXEBaseMixin(object):
boot_option = deploy_utils.get_boot_option(node)
boot_device = None
instance_image_info = {}
-
if boot_option == "ramdisk":
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
@@ -301,11 +303,22 @@ class PXEBaseMixin(object):
ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.PXE
else:
- # If it's going to boot from the local disk, we don't need
- # PXE config files. They still need to be generated as part
- # of the prepare() because the deployment does PXE boot the
- # deploy ramdisk
- pxe_utils.clean_up_pxe_config(task, ipxe_enabled=self.ipxe_enabled)
+ # NOTE(dtantsur): create a PXE configuration as a safety net for
+ # hardware uncapable of persistent boot. If on a reboot it will try
+ # to boot from PXE, this configuration will return it back.
+ if CONF.pxe.enable_netboot_fallback:
+ pxe_utils.build_service_pxe_config(
+ task, instance_image_info,
+ task.node.driver_internal_info.get('root_uuid_or_disk_id'),
+ ipxe_enabled=self.ipxe_enabled,
+ # PXE config for whole disk images is identical to what
+ # we need to boot from local disk, so use True even
+ # for partition images.
+ is_whole_disk_image=True)
+ else:
+ # Clean up the deployment configuration
+ pxe_utils.clean_up_pxe_config(
+ task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
# NOTE(pas-ha) do not re-set boot device on ACTIVE nodes
@@ -351,6 +364,14 @@ class PXEBaseMixin(object):
{'node': node.uuid})
pxe_utils.validate_boot_parameters_for_trusted_boot(node)
+ # Check if we have invalid parameters being passed which will not work
+ # for ramdisk configurations.
+ if (node.instance_info.get('image_source')
+ and node.instance_info.get('boot_iso')):
+ raise exception.InvalidParameterValue(_(
+ "An 'image_source' and 'boot_iso' parameter may not be "
+ "specified at the same time."))
+
pxe_utils.parse_driver_info(node)
@METRICS.timer('PXEBaseMixin.validate')
@@ -379,6 +400,8 @@ class PXEBaseMixin(object):
if (node.driver_internal_info.get('is_whole_disk_image')
or deploy_utils.get_boot_option(node) == 'local'):
props = []
+ elif d_info.get('boot_iso'):
+ props = ['boot_iso']
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
else:
diff --git a/ironic/drivers/modules/redfish/boot.py b/ironic/drivers/modules/redfish/boot.py
index f842a04fd..445ee0d57 100644
--- a/ironic/drivers/modules/redfish/boot.py
+++ b/ironic/drivers/modules/redfish/boot.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+import json
import os
import shutil
import tempfile
@@ -83,524 +85,662 @@ KERNEL_RAMDISK_LABELS = {
'rescue': RESCUE_PROPERTIES
}
+IMAGE_SUBDIR = 'redfish'
+
sushy = importutils.try_import('sushy')
-class RedfishVirtualMediaBoot(base.BootInterface):
- """Virtual media boot interface over Redfish.
+def _parse_driver_info(node):
+ """Gets the driver specific Node deployment info.
- Virtual Media allows booting the system from the "virtual"
- CD/DVD drive containing the user image that BMC "inserts"
- into the drive.
+ This method validates whether the 'driver_info' property of the
+ supplied node contains the required or optional information properly
+ for this driver to deploy images to the node.
- The CD/DVD images must be in ISO format and (depending on
- BMC implementation) could be pulled over HTTP, served as
- iSCSI targets or NFS volumes.
+ :param node: a target node of the deployment
+ :returns: the driver_info values of the node.
+ :raises: MissingParameterValue, if any of the required parameters are
+ missing.
+ :raises: InvalidParameterValue, if any of the parameters have invalid
+ value.
+ """
+ d_info = node.driver_info
- The baseline boot workflow looks like this:
+ mode = deploy_utils.rescue_or_deploy_mode(node)
+ params_to_check = KERNEL_RAMDISK_LABELS[mode]
- 1. Pull kernel, ramdisk and ESP (FAT partition image with EFI boot
- loader) images (ESP is only needed for UEFI boot)
- 2. Create bootable ISO out of images (#1), push it to Glance and
- pass to the BMC as Swift temporary URL
- 3. Optionally create floppy image with desired system configuration data,
- push it to Glance and pass to the BMC as Swift temporary URL
- 4. Insert CD/DVD and (optionally) floppy images and set proper boot mode
+ deploy_info = {option: d_info.get(option)
+ for option in params_to_check}
- For building deploy or rescue ISO, redfish boot interface uses
- `deploy_kernel`/`deploy_ramdisk` or `rescue_kernel`/`rescue_ramdisk`
- properties from `[instance_info]` or `[driver_info]`.
+ if not any(deploy_info.values()):
+ # NOTE(dtantsur): avoid situation when e.g. deploy_kernel comes
+ # from driver_info but deploy_ramdisk comes from configuration,
+ # since it's a sign of a potential operator's mistake.
+ deploy_info = {k: getattr(CONF.conductor, k)
+ for k in params_to_check}
- For building boot (user) ISO, redfish boot interface seeks `kernel_id`
- and `ramdisk_id` properties in the Glance image metadata found in
- `[instance_info]image_source` node property.
- """
+ error_msg = _("Error validating Redfish virtual media. Some "
+ "parameters were missing in node's driver_info")
- IMAGE_SUBDIR = 'redfish'
+ deploy_utils.check_for_missing_params(deploy_info, error_msg)
- capabilities = ['iscsi_volume_boot', 'ramdisk_boot']
+ deploy_info.update(
+ {option: d_info.get(option, getattr(CONF.conductor, option, None))
+ for option in OPTIONAL_PROPERTIES})
- def __init__(self):
- """Initialize the Redfish virtual media boot interface.
+ deploy_info.update(redfish_utils.parse_driver_info(node))
- :raises: DriverLoadError if the driver can't be loaded due to
- missing dependencies
- """
- super(RedfishVirtualMediaBoot, self).__init__()
- if not sushy:
- raise exception.DriverLoadError(
- driver='redfish',
- reason=_('Unable to import the sushy library'))
+ return deploy_info
- @staticmethod
- def _parse_driver_info(node):
- """Gets the driver specific Node deployment info.
- This method validates whether the 'driver_info' property of the
- supplied node contains the required or optional information properly
- for this driver to deploy images to the node.
-
- :param node: a target node of the deployment
- :returns: the driver_info values of the node.
- :raises: MissingParameterValue, if any of the required parameters are
- missing.
- :raises: InvalidParameterValue, if any of the parameters have invalid
- value.
- """
- d_info = node.driver_info
+def _parse_instance_info(node):
+ """Gets the instance specific Node deployment info.
- mode = deploy_utils.rescue_or_deploy_mode(node)
- params_to_check = KERNEL_RAMDISK_LABELS[mode]
+ This method validates whether the 'instance_info' property of the
+ supplied node contains the required or optional information properly
+ for this driver to deploy images to the node.
- deploy_info = {option: d_info.get(option)
- for option in params_to_check}
+ :param node: a target node of the deployment
+ :returns: the instance_info values of the node.
+ :raises: InvalidParameterValue, if any of the parameters have invalid
+ value.
+ """
+ deploy_info = node.instance_info.copy()
- if not any(deploy_info.values()):
- # NOTE(dtantsur): avoid situation when e.g. deploy_kernel comes
- # from driver_info but deploy_ramdisk comes from configuration,
- # since it's a sign of a potential operator's mistake.
- deploy_info = {k: getattr(CONF.conductor, k)
- for k in params_to_check}
+ # NOTE(etingof): this method is currently no-op, here for completeness
+ return deploy_info
- error_msg = _("Error validating Redfish virtual media. Some "
- "parameters were missing in node's driver_info")
- deploy_utils.check_for_missing_params(deploy_info, error_msg)
+def _append_filename_param(url, filename):
+ """Append 'filename=<file>' parameter to given URL.
- deploy_info.update(
- {option: d_info.get(option, getattr(CONF.conductor, option, None))
- for option in OPTIONAL_PROPERTIES})
+ Some BMCs seem to validate boot image URL requiring the URL to end
+ with something resembling ISO image file name.
- deploy_info.update(redfish_utils.parse_driver_info(node))
+ This function tries to add, hopefully, meaningless 'filename'
+ parameter to URL's query string in hope to make the entire boot image
+ URL looking more convincing to the BMC.
- return deploy_info
+ However, `url` with fragments might not get cured by this hack.
- @staticmethod
- def _parse_instance_info(node):
- """Gets the instance specific Node deployment info.
+ :param url: a URL to work on
+ :param filename: name of the file to append to the URL
+ :returns: original URL with 'filename' parameter appended
+ """
+ parsed_url = urlparse.urlparse(url)
+ parsed_qs = urlparse.parse_qsl(parsed_url.query)
- This method validates whether the 'instance_info' property of the
- supplied node contains the required or optional information properly
- for this driver to deploy images to the node.
+ has_filename = [x for x in parsed_qs if x[0].lower() == 'filename']
+ if has_filename:
+ return url
- :param node: a target node of the deployment
- :returns: the instance_info values of the node.
- :raises: InvalidParameterValue, if any of the parameters have invalid
- value.
- """
- deploy_info = node.instance_info.copy()
+ parsed_qs.append(('filename', filename))
+ parsed_url = list(parsed_url)
+ parsed_url[4] = urlparse.urlencode(parsed_qs)
- # NOTE(etingof): this method is currently no-op, here for completeness
- return deploy_info
+ return urlparse.urlunparse(parsed_url)
- @classmethod
- def _parse_deploy_info(cls, node):
- """Gets the instance and driver specific Node deployment info.
-
- This method validates whether the 'instance_info' and 'driver_info'
- property of the supplied node contains the required information for
- this driver to deploy images to the node.
-
- :param node: a target node of the deployment
- :returns: a dict with the instance_info and driver_info values.
- :raises: MissingParameterValue, if any of the required parameters are
- missing.
- :raises: InvalidParameterValue, if any of the parameters have invalid
- value.
- """
- deploy_info = {}
- deploy_info.update(deploy_utils.get_image_instance_info(node))
- deploy_info.update(cls._parse_driver_info(node))
- deploy_info.update(cls._parse_instance_info(node))
- return deploy_info
+def _get_floppy_image_name(node):
+ """Returns the floppy image name for a given node.
- @staticmethod
- def _append_filename_param(url, filename):
- """Append 'filename=<file>' parameter to given URL.
+ :param node: the node for which image name is to be provided.
+ """
+ return "image-%s" % node.uuid
- Some BMCs seem to validate boot image URL requiring the URL to end
- with something resembling ISO image file name.
- This function tries to add, hopefully, meaningless 'filename'
- parameter to URL's query string in hope to make the entire boot image
- URL looking more convincing to the BMC.
+def _get_iso_image_name(node):
+ """Returns the boot iso image name for a given node.
- However, `url` with fragments might not get cured by this hack.
+ :param node: the node for which image name is to be provided.
+ """
+ return "boot-%s" % node.uuid
- :param url: a URL to work on
- :param filename: name of the file to append to the URL
- :returns: original URL with 'filename' parameter appended
- """
- parsed_url = urlparse.urlparse(url)
- parsed_qs = urlparse.parse_qsl(parsed_url.query)
- has_filename = [x for x in parsed_qs if x[0].lower() == 'filename']
- if has_filename:
- return url
+def _insert_vmedia(task, boot_url, boot_device):
+ """Insert bootable ISO image into virtual CD or DVD
- parsed_qs.append(('filename', filename))
- parsed_url = list(parsed_url)
- parsed_url[4] = urlparse.urlencode(parsed_qs)
+ :param task: A task from TaskManager.
+ :param boot_url: URL to a bootable ISO image
+ :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`,
+ `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY`
+ :raises: InvalidParameterValue, if no suitable virtual CD or DVD is
+ found on the node.
+ """
+ system = redfish_utils.get_system(task.node)
+
+ for manager in system.managers:
+ for v_media in manager.virtual_media.get_members():
+ if boot_device not in v_media.media_types:
+ continue
+
+ if v_media.inserted:
+ if v_media.image == boot_url:
+ LOG.debug("Boot media %(boot_url)s is already "
+ "inserted into %(boot_device)s for node "
+ "%(node)s", {'node': task.node.uuid,
+ 'boot_url': boot_url,
+ 'boot_device': boot_device})
+ return
+
+ continue
+
+ v_media.insert_media(boot_url, inserted=True,
+ write_protected=True)
+
+ LOG.info("Inserted boot media %(boot_url)s into "
+ "%(boot_device)s for node "
+ "%(node)s", {'node': task.node.uuid,
+ 'boot_url': boot_url,
+ 'boot_device': boot_device})
+ return
- return urlparse.urlunparse(parsed_url)
+ raise exception.InvalidParameterValue(
+ _('No suitable virtual media device found'))
- @classmethod
- def _publish_image(cls, image_file, object_name):
- """Make image file downloadable.
- Depending on ironic settings, pushes given file into Swift or copies
- it over to local HTTP server's document root and returns publicly
- accessible URL leading to the given file.
+def _eject_vmedia(task, boot_device=None):
+ """Eject virtual CDs and DVDs
- :param image_file: path to file to publish
- :param object_name: name of the published file
- :return: a URL to download published file
- """
+ :param task: A task from TaskManager.
+ :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`,
+ `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY` or `None` to
+ eject everything (default).
+ :raises: InvalidParameterValue, if no suitable virtual CD or DVD is
+ found on the node.
+ """
+ system = redfish_utils.get_system(task.node)
- if CONF.redfish.use_swift:
- container = CONF.redfish.swift_container
- timeout = CONF.redfish.swift_object_expiry_timeout
+ for manager in system.managers:
+ for v_media in manager.virtual_media.get_members():
+ if boot_device and boot_device not in v_media.media_types:
+ continue
- object_headers = {'X-Delete-After': str(timeout)}
+ inserted = v_media.inserted
- swift_api = swift.SwiftAPI()
+ if inserted:
+ v_media.eject_media()
- swift_api.create_object(container, object_name, image_file,
- object_headers=object_headers)
+ LOG.info("Boot media is%(already)s ejected from "
+ "%(boot_device)s for node %(node)s"
+ "", {'node': task.node.uuid,
+ 'already': '' if inserted else ' already',
+ 'boot_device': v_media.name})
- image_url = swift_api.get_temp_url(container, object_name, timeout)
- else:
- public_dir = os.path.join(CONF.deploy.http_root, cls.IMAGE_SUBDIR)
+def _has_vmedia_device(task, boot_device):
+ """Indicate if device exists at any of the managers
- if not os.path.exists(public_dir):
- os.mkdir(public_dir, 0x755)
+ :param task: A task from TaskManager.
+ :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`,
+ `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY`.
+ """
+ system = redfish_utils.get_system(task.node)
- published_file = os.path.join(public_dir, object_name)
+ for manager in system.managers:
+ for v_media in manager.virtual_media.get_members():
+ if boot_device in v_media.media_types:
+ return True
- try:
- os.link(image_file, published_file)
- except OSError as exc:
- LOG.debug(
- "Could not hardlink image file %(image)s to public "
- "location %(public)s (will copy it over): "
- "%(error)s", {'image': image_file,
- 'public': published_file,
- 'error': exc})
+def _cleanup_iso_image(task):
+ """Deletes the ISO if it was created for the instance.
- shutil.copyfile(image_file, published_file)
+ :param task: A task from TaskManager.
+ """
+ iso_object_name = _get_iso_image_name(task.node)
- image_url = os.path.join(
- CONF.deploy.http_url, cls.IMAGE_SUBDIR, object_name)
+ _unpublish_image(iso_object_name)
- image_url = cls._append_filename_param(
- image_url, os.path.basename(image_file))
- return image_url
+def _unpublish_image(object_name):
+ """Withdraw the image previously made downloadable.
- @classmethod
- def _unpublish_image(cls, object_name):
- """Withdraw the image previously made downloadable.
+ Depending on ironic settings, removes previously published file
+ from where it has been published - Swift or local HTTP server's
+ document root.
- Depending on ironic settings, removes previously published file
- from where it has been published - Swift or local HTTP server's
- document root.
+ :param object_name: name of the published file (optional)
+ """
+ if CONF.redfish.use_swift:
+ container = CONF.redfish.swift_container
- :param object_name: name of the published file (optional)
- """
- if CONF.redfish.use_swift:
- container = CONF.redfish.swift_container
+ swift_api = swift.SwiftAPI()
- swift_api = swift.SwiftAPI()
+ LOG.debug("Cleaning up image %(name)s from Swift container "
+ "%(container)s", {'name': object_name,
+ 'container': container})
- LOG.debug("Cleaning up image %(name)s from Swift container "
- "%(container)s", {'name': object_name,
- 'container': container})
+ try:
+ swift_api.delete_object(container, object_name)
+
+ except exception.SwiftOperationError as exc:
+ LOG.warning("Failed to clean up image %(image)s. Error: "
+ "%(error)s.", {'image': object_name,
+ 'error': exc})
+
+ else:
+ published_file = os.path.join(
+ CONF.deploy.http_root, IMAGE_SUBDIR, object_name)
+
+ ironic_utils.unlink_without_raise(published_file)
+
+
+def _prepare_floppy_image(task, params=None):
+ """Prepares the floppy image for passing the parameters.
+
+ This method prepares a temporary VFAT filesystem image and adds
+ a file into the image which contains parameters to be passed to
+ the ramdisk. Then this method uploads built image to Swift
+ '[redfish]swift_container', setting it to auto expire after
+ '[redfish]swift_object_expiry_timeout' seconds. Finally, a
+ temporary Swift URL is returned addressing Swift object just
+ created.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param params: a dictionary containing 'parameter name'->'value'
+ mapping to be passed to deploy or rescue image via floppy image.
+ :raises: ImageCreationFailed, if it failed while creating the floppy
+ image.
+ :raises: SwiftOperationError, if any operation with Swift fails.
+ :returns: image URL for the floppy image.
+ """
+ object_name = _get_floppy_image_name(task.node)
- try:
- swift_api.delete_object(container, object_name)
+ LOG.debug("Trying to create floppy image for node "
+ "%(node)s", {'node': task.node.uuid})
- except exception.SwiftOperationError as exc:
- LOG.warning("Failed to clean up image %(image)s. Error: "
- "%(error)s.", {'image': object_name,
- 'error': exc})
+ with tempfile.NamedTemporaryFile(
+ dir=CONF.tempdir, suffix='.img') as vfat_image_tmpfile_obj:
- else:
- published_file = os.path.join(
- CONF.deploy.http_root, cls.IMAGE_SUBDIR, object_name)
+ vfat_image_tmpfile = vfat_image_tmpfile_obj.name
+ images.create_vfat_image(vfat_image_tmpfile, parameters=params)
- ironic_utils.unlink_without_raise(published_file)
+ image_url = _publish_image(vfat_image_tmpfile, object_name)
- @staticmethod
- def _get_floppy_image_name(node):
- """Returns the floppy image name for a given node.
+ LOG.debug("Created floppy image %(name)s in Swift for node %(node)s, "
+ "exposed as temporary URL "
+ "%(url)s", {'node': task.node.uuid,
+ 'name': object_name,
+ 'url': image_url})
- :param node: the node for which image name is to be provided.
- """
- return "image-%s" % node.uuid
+ return image_url
- @classmethod
- def _cleanup_floppy_image(cls, task):
- """Deletes the floppy image if it was created for the node.
- :param task: an ironic node object.
- """
- floppy_object_name = cls._get_floppy_image_name(task.node)
+def _publish_image(image_file, object_name):
+ """Make image file downloadable.
- cls._unpublish_image(floppy_object_name)
+ Depending on ironic settings, pushes given file into Swift or copies
+ it over to local HTTP server's document root and returns publicly
+ accessible URL leading to the given file.
- @classmethod
- def _prepare_floppy_image(cls, task, params=None):
- """Prepares the floppy image for passing the parameters.
+ :param image_file: path to file to publish
+ :param object_name: name of the published file
+ :return: a URL to download published file
+ """
- This method prepares a temporary VFAT filesystem image and adds
- a file into the image which contains parameters to be passed to
- the ramdisk. Then this method uploads built image to Swift
- '[redfish]swift_container', setting it to auto expire after
- '[redfish]swift_object_expiry_timeout' seconds. Finally, a
- temporary Swift URL is returned addressing Swift object just
- created.
+ if CONF.redfish.use_swift:
+ container = CONF.redfish.swift_container
+ timeout = CONF.redfish.swift_object_expiry_timeout
- :param task: a TaskManager instance containing the node to act on.
- :param params: a dictionary containing 'parameter name'->'value'
- mapping to be passed to deploy or rescue image via floppy image.
- :raises: ImageCreationFailed, if it failed while creating the floppy
- image.
- :raises: SwiftOperationError, if any operation with Swift fails.
- :returns: image URL for the floppy image.
- """
- object_name = cls._get_floppy_image_name(task.node)
+ object_headers = {'X-Delete-After': str(timeout)}
- LOG.debug("Trying to create floppy image for node "
- "%(node)s", {'node': task.node.uuid})
+ swift_api = swift.SwiftAPI()
- with tempfile.NamedTemporaryFile(
- dir=CONF.tempdir, suffix='.img') as vfat_image_tmpfile_obj:
+ swift_api.create_object(container, object_name, image_file,
+ object_headers=object_headers)
- vfat_image_tmpfile = vfat_image_tmpfile_obj.name
- images.create_vfat_image(vfat_image_tmpfile, parameters=params)
+ image_url = swift_api.get_temp_url(container, object_name, timeout)
- image_url = cls._publish_image(vfat_image_tmpfile, object_name)
+ else:
+ public_dir = os.path.join(CONF.deploy.http_root, IMAGE_SUBDIR)
- LOG.debug("Created floppy image %(name)s in Swift for node %(node)s, "
- "exposed as temporary URL "
- "%(url)s", {'node': task.node.uuid,
- 'name': object_name,
- 'url': image_url})
+ if not os.path.exists(public_dir):
+ os.mkdir(public_dir, 0o755)
- return image_url
+ published_file = os.path.join(public_dir, object_name)
- @staticmethod
- def _get_iso_image_name(node):
- """Returns the boot iso image name for a given node.
+ try:
+ os.link(image_file, published_file)
+ os.chmod(image_file, CONF.redfish.file_permission)
- :param node: the node for which image name is to be provided.
- """
- return "boot-%s" % node.uuid
+ except OSError as exc:
+ LOG.debug(
+ "Could not hardlink image file %(image)s to public "
+ "location %(public)s (will copy it over): "
+ "%(error)s", {'image': image_file,
+ 'public': published_file,
+ 'error': exc})
- @classmethod
- def _cleanup_iso_image(cls, task):
- """Deletes the ISO if it was created for the instance.
+ shutil.copyfile(image_file, published_file)
+ os.chmod(published_file, CONF.redfish.file_permission)
- :param task: an ironic node object.
- """
- iso_object_name = cls._get_iso_image_name(task.node)
+ image_url = os.path.join(
+ CONF.deploy.http_url, IMAGE_SUBDIR, object_name)
- cls._unpublish_image(iso_object_name)
+ image_url = _append_filename_param(
+ image_url, os.path.basename(image_file))
- @classmethod
- def _prepare_iso_image(cls, task, kernel_href, ramdisk_href,
- bootloader_href=None, configdrive=None,
- root_uuid=None, params=None):
- """Prepare an ISO to boot the node.
+ return image_url
- Build bootable ISO out of `kernel_href` and `ramdisk_href` (and
- `bootloader` if it's UEFI boot), then push built image up to Swift and
- return a temporary URL.
- :param task: a TaskManager instance containing the node to act on.
- :param kernel_href: URL or Glance UUID of the kernel to use
- :param ramdisk_href: URL or Glance UUID of the ramdisk to use
- :param bootloader_href: URL or Glance UUID of the EFI bootloader
- image to use when creating UEFI bootbable ISO
- :param configdrive: URL to or a compressed blob of a ISO9660 or
- FAT-formatted OpenStack config drive image. This image will be
- written onto the built ISO image. Optional.
- :param root_uuid: optional uuid of the root partition.
- :param params: a dictionary containing 'parameter name'->'value'
- mapping to be passed to kernel command line.
- :returns: bootable ISO HTTP URL.
- :raises: MissingParameterValue, if any of the required parameters are
- missing.
- :raises: InvalidParameterValue, if any of the parameters have invalid
- value.
- :raises: ImageCreationFailed, if creating ISO image failed.
- """
- if not kernel_href or not ramdisk_href:
- raise exception.InvalidParameterValue(_(
- "Unable to find kernel or ramdisk for "
- "building ISO for %(node)s") %
- {'node': task.node.uuid})
+def _cleanup_floppy_image(task):
+ """Deletes the floppy image if it was created for the node.
+
+ :param task: an ironic node object.
+ """
+ floppy_object_name = _get_floppy_image_name(task.node)
+
+ _unpublish_image(floppy_object_name)
+
- i_info = task.node.instance_info
+def _parse_deploy_info(node):
+ """Gets the instance and driver specific Node deployment info.
- if deploy_utils.get_boot_option(task.node) == "ramdisk":
+ This method validates whether the 'instance_info' and 'driver_info'
+ property of the supplied node contains the required information for
+ this driver to deploy images to the node.
+
+ :param node: a target node of the deployment
+ :returns: a dict with the instance_info and driver_info values.
+ :raises: MissingParameterValue, if any of the required parameters are
+ missing.
+ :raises: InvalidParameterValue, if any of the parameters have invalid
+ value.
+ """
+ deploy_info = {}
+ deploy_info.update(deploy_utils.get_image_instance_info(node))
+ deploy_info.update(_parse_driver_info(node))
+ deploy_info.update(_parse_instance_info(node))
+
+ return deploy_info
+
+
+def _prepare_iso_image(task, kernel_href, ramdisk_href,
+ bootloader_href=None, configdrive=None,
+ root_uuid=None, params=None, base_iso=None):
+ """Prepare an ISO to boot the node.
+
+ Build bootable ISO out of `kernel_href` and `ramdisk_href` (and
+ `bootloader` if it's UEFI boot), then push built image up to Swift and
+ return a temporary URL.
+
+ If `configdrive` is specified it will be eventually written onto
+ the boot ISO image.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param kernel_href: URL or Glance UUID of the kernel to use
+ :param ramdisk_href: URL or Glance UUID of the ramdisk to use
+ :param bootloader_href: URL or Glance UUID of the EFI bootloader
+ image to use when creating UEFI bootbable ISO
+ :param configdrive: URL to or a compressed blob of a ISO9660 or
+ FAT-formatted OpenStack config drive image. This image will be
+ written onto the built ISO image. Optional.
+ :param root_uuid: optional uuid of the root partition.
+ :param params: a dictionary containing 'parameter name'->'value'
+ mapping to be passed to kernel command line.
+ :returns: bootable ISO HTTP URL.
+ :raises: MissingParameterValue, if any of the required parameters are
+ missing.
+ :raises: InvalidParameterValue, if any of the parameters have invalid
+ value.
+ :raises: ImageCreationFailed, if creating ISO image failed.
+ """
+ if (not kernel_href or not ramdisk_href) and not base_iso:
+ raise exception.InvalidParameterValue(_(
+ "Unable to find kernel, ramdisk for "
+ "building ISO, or explicit ISO for %(node)s") %
+ {'node': task.node.uuid})
+
+ i_info = task.node.instance_info
+
+ # NOTE(TheJulia): Until we support modifying a base iso, most of
+ # this logic actually does nothing in the end. But it should!
+ if deploy_utils.get_boot_option(task.node) == "ramdisk":
+ if not base_iso:
kernel_params = "root=/dev/ram0 text "
kernel_params += i_info.get("ramdisk_kernel_arguments", "")
-
else:
- kernel_params = i_info.get(
- 'kernel_append_params', CONF.redfish.kernel_append_params)
-
- if params:
- kernel_params = ' '.join(
- (kernel_params, ' '.join(
- '%s=%s' % kv for kv in params.items())))
-
- boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)
-
- LOG.debug("Trying to create %(boot_mode)s ISO image for node %(node)s "
- "with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, "
- "bootloader %(bootloader_href)s and kernel params %(params)s"
- "", {'node': task.node.uuid,
- 'boot_mode': boot_mode,
- 'kernel_href': kernel_href,
- 'ramdisk_href': ramdisk_href,
- 'bootloader_href': bootloader_href,
- 'params': kernel_params})
+ kernel_params = None
- with tempfile.NamedTemporaryFile(
- dir=CONF.tempdir, suffix='.iso') as boot_fileobj:
+ else:
+ kernel_params = i_info.get(
+ 'kernel_append_params', CONF.redfish.kernel_append_params)
- with tempfile.NamedTemporaryFile(
- dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:
+ if params and not base_iso:
+ kernel_params = ' '.join(
+ (kernel_params, ' '.join(
+ '%s=%s' % kv for kv in params.items())))
- configdrive_href = configdrive
+ boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)
- if configdrive:
- parsed_url = urlparse.urlparse(configdrive)
- if not parsed_url.scheme:
- cfgdrv_blob = base64.decode_as_bytes(configdrive)
+ LOG.debug("Trying to create %(boot_mode)s ISO image for node %(node)s "
+ "with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, "
+ "bootloader %(bootloader_href)s and kernel params %(params)s"
+ "", {'node': task.node.uuid,
+ 'boot_mode': boot_mode,
+ 'kernel_href': kernel_href,
+ 'ramdisk_href': ramdisk_href,
+ 'bootloader_href': bootloader_href,
+ 'params': kernel_params})
- with open(cfgdrv_fileobj.name, 'wb') as f:
- f.write(cfgdrv_blob)
+ with tempfile.NamedTemporaryFile(
+ dir=CONF.tempdir, suffix='.iso') as boot_fileobj:
- configdrive_href = urlparse.urlunparse(
- ('file', '', cfgdrv_fileobj.name, '', '', ''))
+ with tempfile.NamedTemporaryFile(
+ dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:
+
+ configdrive_href = configdrive
+
+ # FIXME(TheJulia): This is treated as conditional with
+ # a base_iso as the intent, eventually, is to support
+ # injection into the supplied image.
+
+ if configdrive and not base_iso:
+ parsed_url = urlparse.urlparse(configdrive)
+ if not parsed_url.scheme:
+ cfgdrv_blob = base64.decode_as_bytes(configdrive)
+
+ with open(cfgdrv_fileobj.name, 'wb') as f:
+ f.write(cfgdrv_blob)
+
+ configdrive_href = urlparse.urlunparse(
+ ('file', '', cfgdrv_fileobj.name, '', '', ''))
+
+ LOG.debug("Built configdrive out of configdrive blob "
+ "for node %(node)s", {'node': task.node.uuid})
+
+ boot_iso_tmp_file = boot_fileobj.name
+ images.create_boot_iso(
+ task.context, boot_iso_tmp_file,
+ kernel_href, ramdisk_href,
+ esp_image_href=bootloader_href,
+ configdrive_href=configdrive_href,
+ root_uuid=root_uuid,
+ kernel_params=kernel_params,
+ boot_mode=boot_mode,
+ base_iso=base_iso)
+
+ iso_object_name = _get_iso_image_name(task.node)
+
+ image_url = _publish_image(
+ boot_iso_tmp_file, iso_object_name)
+
+ LOG.debug("Created ISO %(name)s in object store for node %(node)s, "
+ "exposed as temporary URL "
+ "%(url)s", {'node': task.node.uuid,
+ 'name': iso_object_name,
+ 'url': image_url})
+
+ return image_url
+
+
+def _prepare_deploy_iso(task, params, mode):
+ """Prepare deploy or rescue ISO image
+
+ Build bootable ISO out of
+ `[driver_info]/deploy_kernel`/`[driver_info]/deploy_ramdisk` or
+ `[driver_info]/rescue_kernel`/`[driver_info]/rescue_ramdisk`
+ and `[driver_info]/bootloader`, then push built image up to Glance
+ and return temporary Swift URL to the image.
+
+ If network interface supplies network configuration (`network_data`),
+ a new `configdrive` will be created with `network_data.json` inside,
+ and eventually written down onto the boot ISO.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param params: a dictionary containing 'parameter name'->'value'
+ mapping to be passed to kernel command line.
+ :param mode: either 'deploy' or 'rescue'.
+ :returns: bootable ISO HTTP URL.
+ :raises: MissingParameterValue, if any of the required parameters are
+ missing.
+ :raises: InvalidParameterValue, if any of the parameters have invalid
+ value.
+ :raises: ImageCreationFailed, if creating ISO image failed.
+ """
+ node = task.node
- LOG.info("Burning configdrive %(url)s to boot ISO image "
- "for node %(node)s", {'url': configdrive_href,
- 'node': task.node.uuid})
+ d_info = _parse_driver_info(node)
- boot_iso_tmp_file = boot_fileobj.name
- images.create_boot_iso(
- task.context, boot_iso_tmp_file,
- kernel_href, ramdisk_href,
- esp_image_href=bootloader_href,
- configdrive_href=configdrive_href,
- root_uuid=root_uuid,
- kernel_params=kernel_params,
- boot_mode=boot_mode)
+ kernel_href = d_info.get('%s_kernel' % mode)
+ ramdisk_href = d_info.get('%s_ramdisk' % mode)
+ bootloader_href = d_info.get('bootloader')
- iso_object_name = cls._get_iso_image_name(task.node)
+ # TODO(TheJulia): At some point we should support something like
+ # boot_iso for the deploy interface, perhaps when we support config
+ # injection.
+ prepare_iso_image = functools.partial(
+ _prepare_iso_image, task, kernel_href, ramdisk_href,
+ bootloader_href=bootloader_href, params=params)
- image_url = cls._publish_image(
- boot_iso_tmp_file, iso_object_name)
+ network_data = task.driver.network.get_node_network_data(task)
+ if network_data:
+ with tempfile.NamedTemporaryFile(
+ dir=CONF.tempdir, suffix='.iso') as metadata_fileobj:
- LOG.debug("Created ISO %(name)s in object store for node %(node)s, "
- "exposed as temporary URL "
- "%(url)s", {'node': task.node.uuid,
- 'name': iso_object_name,
- 'url': image_url})
+ with open(metadata_fileobj.name, 'w') as f:
+ json.dump(network_data, f, indent=2)
- return image_url
+ files_info = {
+ metadata_fileobj.name: 'openstack/latest/meta'
+ 'data/network_data.json'
+ }
- @classmethod
- def _prepare_deploy_iso(cls, task, params, mode):
- """Prepare deploy or rescue ISO image
+ with tempfile.NamedTemporaryFile(
+ dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:
- Build bootable ISO out of
- `[driver_info]/deploy_kernel`/`[driver_info]/deploy_ramdisk` or
- `[driver_info]/rescue_kernel`/`[driver_info]/rescue_ramdisk`
- and `[driver_info]/bootloader`, then push built image up to Glance
- and return temporary Swift URL to the image.
+ images.create_vfat_image(cfgdrv_fileobj.name, files_info)
- :param task: a TaskManager instance containing the node to act on.
- :param params: a dictionary containing 'parameter name'->'value'
- mapping to be passed to kernel command line.
- :param mode: either 'deploy' or 'rescue'.
- :returns: bootable ISO HTTP URL.
- :raises: MissingParameterValue, if any of the required parameters are
- missing.
- :raises: InvalidParameterValue, if any of the parameters have invalid
- value.
- :raises: ImageCreationFailed, if creating ISO image failed.
- """
- node = task.node
+ configdrive_href = urlparse.urlunparse(
+ ('file', '', cfgdrv_fileobj.name, '', '', ''))
- d_info = cls._parse_driver_info(node)
+ LOG.debug("Built configdrive %(name)s out of network data "
+ "for node %(node)s", {'name': configdrive_href,
+ 'node': task.node.uuid})
- kernel_href = d_info.get('%s_kernel' % mode)
- ramdisk_href = d_info.get('%s_ramdisk' % mode)
- bootloader_href = d_info.get('bootloader')
+ return prepare_iso_image(configdrive=configdrive_href)
- return cls._prepare_iso_image(
- task, kernel_href, ramdisk_href, bootloader_href, params=params)
+ return prepare_iso_image()
- @classmethod
- def _prepare_boot_iso(cls, task, root_uuid=None):
- """Prepare boot ISO image
- Build bootable ISO out of `[instance_info]/kernel`,
- `[instance_info]/ramdisk` and `[driver_info]/bootloader` if present.
- Otherwise, read `kernel_id` and `ramdisk_id` from
- `[instance_info]/image_source` Glance image metadata.
+def _prepare_boot_iso(task, root_uuid=None):
+ """Prepare boot ISO image
- Push produced ISO image up to Glance and return temporary Swift
- URL to the image.
+ Build bootable ISO out of `[instance_info]/kernel`,
+ `[instance_info]/ramdisk` and `[driver_info]/bootloader` if present.
+ Otherwise, read `kernel_id` and `ramdisk_id` from
+ `[instance_info]/image_source` Glance image metadata.
- :param task: a TaskManager instance containing the node to act on.
- :returns: bootable ISO HTTP URL.
- :raises: MissingParameterValue, if any of the required parameters are
- missing.
- :raises: InvalidParameterValue, if any of the parameters have invalid
- value.
- :raises: ImageCreationFailed, if creating ISO image failed.
- """
- node = task.node
+ Push produced ISO image up to Glance and return temporary Swift
+ URL to the image.
- d_info = cls._parse_deploy_info(node)
+ :param task: a TaskManager instance containing the node to act on.
+ :returns: bootable ISO HTTP URL.
+ :raises: MissingParameterValue, if any of the required parameters are
+ missing.
+ :raises: InvalidParameterValue, if any of the parameters have invalid
+ value.
+ :raises: ImageCreationFailed, if creating ISO image failed.
+ """
+ node = task.node
+
+ d_info = _parse_deploy_info(node)
- kernel_href = node.instance_info.get('kernel')
- ramdisk_href = node.instance_info.get('ramdisk')
+ kernel_href = node.instance_info.get('kernel')
+ ramdisk_href = node.instance_info.get('ramdisk')
+ base_iso = node.instance_info.get('boot_iso')
- if not kernel_href or not ramdisk_href:
+ if (not kernel_href or not ramdisk_href) and not base_iso:
- image_href = d_info['image_source']
+ image_href = d_info['image_source']
- image_properties = (
- images.get_image_properties(
- task.context, image_href, ['kernel_id', 'ramdisk_id']))
+ image_properties = (
+ images.get_image_properties(
+ task.context, image_href, ['kernel_id', 'ramdisk_id']))
- if not kernel_href:
- kernel_href = image_properties.get('kernel_id')
+ if not kernel_href:
+ kernel_href = image_properties.get('kernel_id')
- if not ramdisk_href:
- ramdisk_href = image_properties.get('ramdisk_id')
+ if not ramdisk_href:
+ ramdisk_href = image_properties.get('ramdisk_id')
- if not kernel_href or not ramdisk_href:
+ if (not kernel_href or not ramdisk_href):
raise exception.InvalidParameterValue(_(
"Unable to find kernel or ramdisk for "
"to generate boot ISO for %(node)s") %
{'node': task.node.uuid})
- bootloader_href = d_info.get('bootloader')
+ bootloader_href = d_info.get('bootloader')
+
+ return _prepare_iso_image(
+ task, kernel_href, ramdisk_href, bootloader_href,
+ root_uuid=root_uuid, base_iso=base_iso)
+
+
+class RedfishVirtualMediaBoot(base.BootInterface):
+ """Virtual media boot interface over Redfish.
+
+ Virtual Media allows booting the system from the "virtual"
+ CD/DVD drive containing the user image that BMC "inserts"
+ into the drive.
+
+ The CD/DVD images must be in ISO format and (depending on
+ BMC implementation) could be pulled over HTTP, served as
+ iSCSI targets or NFS volumes.
+
+ The baseline boot workflow looks like this:
+
+ 1. Pull kernel, ramdisk and ESP (FAT partition image with EFI boot
+ loader) images (ESP is only needed for UEFI boot)
+ 2. Create bootable ISO out of images (#1), push it to Glance and
+ pass to the BMC as Swift temporary URL
+ 3. Optionally create floppy image with desired system configuration data,
+ push it to Glance and pass to the BMC as Swift temporary URL
+ 4. Insert CD/DVD and (optionally) floppy images and set proper boot mode
+
+ For building deploy or rescue ISO, redfish boot interface uses
+ `deploy_kernel`/`deploy_ramdisk` or `rescue_kernel`/`rescue_ramdisk`
+ properties from `[instance_info]` or `[driver_info]`.
+
+ For building boot (user) ISO, redfish boot interface seeks `kernel_id`
+ and `ramdisk_id` properties in the Glance image metadata found in
+ `[instance_info]image_source` node property.
+ """
+
+ capabilities = ['iscsi_volume_boot', 'ramdisk_boot']
- return cls._prepare_iso_image(
- task, kernel_href, ramdisk_href, bootloader_href,
- root_uuid=root_uuid)
+ def __init__(self):
+ """Initialize the Redfish virtual media boot interface.
+
+ :raises: DriverLoadError if the driver can't be loaded due to
+ missing dependencies
+ """
+ super(RedfishVirtualMediaBoot, self).__init__()
+ if not sushy:
+ raise exception.DriverLoadError(
+ driver='redfish',
+ reason=_('Unable to import the sushy library'))
def get_properties(self):
"""Return the properties of the interface.
@@ -609,8 +749,7 @@ class RedfishVirtualMediaBoot(base.BootInterface):
"""
return REQUIRED_PROPERTIES
- @classmethod
- def _validate_driver_info(cls, task):
+ def _validate_driver_info(self, task):
"""Validate the prerequisites for virtual media based boot.
This method validates whether the 'driver_info' property of the
@@ -623,10 +762,9 @@ class RedfishVirtualMediaBoot(base.BootInterface):
"""
node = task.node
- cls._parse_driver_info(node)
+ _parse_driver_info(node)
- @classmethod
- def _validate_instance_info(cls, task):
+ def _validate_instance_info(self, task):
"""Validate instance image information for the task's node.
This method validates whether the 'instance_info' property of the
@@ -639,11 +777,12 @@ class RedfishVirtualMediaBoot(base.BootInterface):
"""
node = task.node
- d_info = cls._parse_deploy_info(node)
+ d_info = _parse_deploy_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
-
+ elif d_info.get('boot_iso'):
+ props = ['boot_iso']
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
@@ -720,7 +859,7 @@ class RedfishVirtualMediaBoot(base.BootInterface):
manager_utils.node_power_action(task, states.POWER_OFF)
- d_info = self._parse_driver_info(node)
+ d_info = _parse_driver_info(node)
config_via_floppy = d_info.get('config_via_floppy')
@@ -731,16 +870,16 @@ class RedfishVirtualMediaBoot(base.BootInterface):
if config_via_floppy:
- if self._has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY):
+ if _has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY):
# NOTE (etingof): IPA will read the diskette only if
# we tell it to
ramdisk_params['boot_method'] = 'vmedia'
- floppy_ref = self._prepare_floppy_image(
+ floppy_ref = _prepare_floppy_image(
task, params=ramdisk_params)
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
- self._insert_vmedia(
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
+ _insert_vmedia(
task, floppy_ref, sushy.VIRTUAL_MEDIA_FLOPPY)
LOG.debug('Inserted virtual floppy with configuration for '
@@ -753,10 +892,10 @@ class RedfishVirtualMediaBoot(base.BootInterface):
mode = deploy_utils.rescue_or_deploy_mode(node)
- iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)
+ iso_ref = _prepare_deploy_iso(task, ramdisk_params, mode)
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
- self._insert_vmedia(task, iso_ref, sushy.VIRTUAL_MEDIA_CD)
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
+ _insert_vmedia(task, iso_ref, sushy.VIRTUAL_MEDIA_CD)
boot_mode_utils.sync_boot_mode(task)
@@ -775,22 +914,21 @@ class RedfishVirtualMediaBoot(base.BootInterface):
:param task: A task from TaskManager.
:returns: None
"""
- node = task.node
-
- d_info = self._parse_driver_info(node)
+ d_info = _parse_driver_info(task.node)
config_via_floppy = d_info.get('config_via_floppy')
LOG.debug("Cleaning up deploy boot for "
"%(node)s", {'node': task.node.uuid})
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
- self._cleanup_iso_image(task)
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
+ _cleanup_iso_image(task)
if (config_via_floppy
- and self._has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY)):
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
- self._cleanup_floppy_image(task)
+ and _has_vmedia_device(task, sushy.VIRTUAL_MEDIA_FLOPPY)):
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
+
+ _cleanup_floppy_image(task)
def prepare_instance(self, task):
"""Prepares the boot of instance over virtual media.
@@ -815,12 +953,10 @@ class RedfishVirtualMediaBoot(base.BootInterface):
node = task.node
boot_option = deploy_utils.get_boot_option(node)
-
self.clean_up_instance(task)
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if boot_option == "local" or iwdi:
- self._set_boot_device(
- task, boot_devices.DISK, persistent=True)
+ self._set_boot_device(task, boot_devices.DISK, persistent=True)
LOG.debug("Node %(node)s is set to permanently boot from local "
"%(device)s", {'node': task.node.uuid,
@@ -831,28 +967,24 @@ class RedfishVirtualMediaBoot(base.BootInterface):
if boot_option != 'ramdisk':
root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')
-
if not root_uuid and task.driver.storage.should_write_image(task):
LOG.warning(
"The UUID of the root partition could not be found for "
"node %s. Booting instance from disk anyway.", node.uuid)
- self._set_boot_device(
- task, boot_devices.DISK, persistent=True)
+ self._set_boot_device(task, boot_devices.DISK, persistent=True)
return
params.update(root_uuid=root_uuid)
- iso_ref = self._prepare_boot_iso(task, **params)
-
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
- self._insert_vmedia(task, iso_ref, sushy.VIRTUAL_MEDIA_CD)
+ iso_ref = _prepare_boot_iso(task, **params)
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
+ _insert_vmedia(task, iso_ref, sushy.VIRTUAL_MEDIA_CD)
boot_mode_utils.sync_boot_mode(task)
- self._set_boot_device(
- task, boot_devices.CDROM, persistent=True)
+ self._set_boot_device(task, boot_devices.CDROM, persistent=True)
LOG.debug("Node %(node)s is set to permanently boot from "
"%(device)s", {'node': task.node.uuid,
@@ -870,99 +1002,13 @@ class RedfishVirtualMediaBoot(base.BootInterface):
LOG.debug("Cleaning up instance boot for "
"%(node)s", {'node': task.node.uuid})
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
d_info = task.node.driver_info
config_via_floppy = d_info.get('config_via_floppy')
if config_via_floppy:
- self._eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
-
- self._cleanup_iso_image(task)
-
- @staticmethod
- def _insert_vmedia(task, boot_url, boot_device):
- """Insert bootable ISO image into virtual CD or DVD
-
- :param task: A task from TaskManager.
- :param boot_url: URL to a bootable ISO image
- :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`,
- `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY`
- :raises: InvalidParameterValue, if no suitable virtual CD or DVD is
- found on the node.
- """
- system = redfish_utils.get_system(task.node)
-
- for manager in system.managers:
- for v_media in manager.virtual_media.get_members():
- if boot_device not in v_media.media_types:
- continue
-
- if v_media.inserted:
- if v_media.image == boot_url:
- LOG.debug("Boot media %(boot_url)s is already "
- "inserted into %(boot_device)s for node "
- "%(node)s", {'node': task.node.uuid,
- 'boot_url': boot_url,
- 'boot_device': boot_device})
- return
-
- continue
-
- v_media.insert_media(boot_url, inserted=True,
- write_protected=True)
-
- LOG.info("Inserted boot media %(boot_url)s into "
- "%(boot_device)s for node "
- "%(node)s", {'node': task.node.uuid,
- 'boot_url': boot_url,
- 'boot_device': boot_device})
- return
-
- raise exception.InvalidParameterValue(
- _('No suitable virtual media device found'))
-
- @staticmethod
- def _eject_vmedia(task, boot_device=None):
- """Eject virtual CDs and DVDs
-
- :param task: A task from TaskManager.
- :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`,
- `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY` or `None` to
- eject everything (default).
- :raises: InvalidParameterValue, if no suitable virtual CD or DVD is
- found on the node.
- """
- system = redfish_utils.get_system(task.node)
-
- for manager in system.managers:
- for v_media in manager.virtual_media.get_members():
- if boot_device and boot_device not in v_media.media_types:
- continue
-
- inserted = v_media.inserted
-
- if inserted:
- v_media.eject_media()
-
- LOG.info("Boot media is%(already)s ejected from "
- "%(boot_device)s for node %(node)s"
- "", {'node': task.node.uuid,
- 'already': '' if inserted else ' already',
- 'boot_device': v_media.name})
-
- @staticmethod
- def _has_vmedia_device(task, boot_device):
- """Indicate if device exists at any of the managers
-
- :param task: A task from TaskManager.
- :param boot_device: sushy boot device e.g. `VIRTUAL_MEDIA_CD`,
- `VIRTUAL_MEDIA_DVD` or `VIRTUAL_MEDIA_FLOPPY`.
- """
- system = redfish_utils.get_system(task.node)
+ _eject_vmedia(task, sushy.VIRTUAL_MEDIA_FLOPPY)
- for manager in system.managers:
- for v_media in manager.virtual_media.get_members():
- if boot_device in v_media.media_types:
- return True
+ _cleanup_iso_image(task)
@classmethod
def _set_boot_device(cls, task, device, persistent=False):
diff --git a/ironic/drivers/modules/redfish/management.py b/ironic/drivers/modules/redfish/management.py
index 4026da48c..22ef03b49 100644
--- a/ironic/drivers/modules/redfish/management.py
+++ b/ironic/drivers/modules/redfish/management.py
@@ -24,6 +24,7 @@ from ironic.common import components
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import indicator_states
+from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers.modules.redfish import utils as redfish_utils
@@ -68,6 +69,51 @@ if sushy:
v: k for k, v in INDICATOR_MAP.items()}
+def _set_boot_device(task, system, device, persistent=False):
+ """An internal routine to set the boot device.
+
+ :param task: a task from TaskManager.
+ :param system: a Redfish System object.
+ :param device: the Redfish boot device.
+ :param persistent: Boolean value. True if the boot device will
+ persist to all future boots, False if not.
+ Default: False.
+ :raises: SushyError on an error from the Sushy library
+ """
+ desired_enabled = BOOT_DEVICE_PERSISTENT_MAP_REV[persistent]
+ current_enabled = system.boot.get('enabled')
+
+ # NOTE(etingof): this can be racy, esp if BMC is not RESTful
+ enabled = (desired_enabled
+ if desired_enabled != current_enabled else None)
+
+ try:
+ system.set_system_boot_options(device, enabled=enabled)
+ except sushy.exceptions.SushyError as e:
+ if enabled == sushy.BOOT_SOURCE_ENABLED_CONTINUOUS:
+ # NOTE(dtantsur): continuous boot device settings have been
+ # removed from Redfish, and some vendors stopped supporting
+ # it before an alternative was provided. As a work around,
+ # use one-time boot and restore the boot device on every
+ # reboot via RedfishPower.
+ LOG.debug('Error %(error)s when trying to set a '
+ 'persistent boot device on node %(node)s, '
+ 'falling back to one-time boot settings',
+ {'error': e, 'node': task.node.uuid})
+ system.set_system_boot_options(
+ device, enabled=sushy.BOOT_SOURCE_ENABLED_ONCE)
+ LOG.warning('Could not set persistent boot device to '
+ '%(dev)s for node %(node)s, using one-time '
+ 'boot device instead',
+ {'dev': device, 'node': task.node.uuid})
+ utils.set_node_nested_field(
+ task.node, 'driver_internal_info',
+ 'redfish_boot_device', device)
+ task.node.save()
+ else:
+ raise
+
+
class RedfishManagement(base.ManagementInterface):
def __init__(self):
@@ -108,6 +154,33 @@ class RedfishManagement(base.ManagementInterface):
return list(BOOT_DEVICE_MAP_REV)
@task_manager.require_exclusive_lock
+ def restore_boot_device(self, task, system):
+ """Restore boot device if needed.
+
+ Checks the redfish_boot_device internal flag and sets the one-time
+ boot device accordingly. A warning is issued if it fails.
+
+ This method is supposed to be called from the Redfish power interface
+ and should be considered private to the Redfish hardware type.
+
+ :param task: a task from TaskManager.
+ :param system: a Redfish System object.
+ """
+ device = task.node.driver_internal_info.get('redfish_boot_device')
+ if not device:
+ return
+
+ LOG.debug('Restoring boot device %(dev)s on node %(node)s',
+ {'dev': device, 'node': task.node.uuid})
+ try:
+ _set_boot_device(task, system, device)
+ except sushy.exceptions.SushyError as e:
+ LOG.warning('Unable to recover boot device %(dev)s for node '
+ '%(node)s, relying on the pre-configured boot order. '
+ 'Error: %(error)s',
+ {'dev': device, 'node': task.node.uuid, 'error': e})
+
+ @task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for a node.
@@ -124,19 +197,16 @@ class RedfishManagement(base.ManagementInterface):
:raises: RedfishConnectionError when it fails to connect to Redfish
:raises: RedfishError on an error from the Sushy library
"""
- system = redfish_utils.get_system(task.node)
+ utils.pop_node_nested_field(
+ task.node, 'driver_internal_info', 'redfish_boot_device')
+ task.node.save()
- desired_persistence = BOOT_DEVICE_PERSISTENT_MAP_REV[persistent]
- current_persistence = system.boot.get('enabled')
-
- # NOTE(etingof): this can be racy, esp if BMC is not RESTful
- enabled = (desired_persistence
- if desired_persistence != current_persistence else None)
+ system = redfish_utils.get_system(task.node)
try:
- system.set_system_boot_options(
- BOOT_DEVICE_MAP_REV[device], enabled=enabled)
-
+ _set_boot_device(
+ task, system, BOOT_DEVICE_MAP_REV[device],
+ persistent=persistent)
except sushy.exceptions.SushyError as e:
error_msg = (_('Redfish set boot device failed for node '
'%(node)s. Error: %(error)s') %
diff --git a/ironic/drivers/modules/redfish/power.py b/ironic/drivers/modules/redfish/power.py
index 8f568bfb9..0a6f3f338 100644
--- a/ironic/drivers/modules/redfish/power.py
+++ b/ironic/drivers/modules/redfish/power.py
@@ -22,6 +22,7 @@ from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as cond_utils
from ironic.drivers import base
+from ironic.drivers.modules.redfish import management as redfish_mgmt
from ironic.drivers.modules.redfish import utils as redfish_utils
LOG = log.getLogger(__name__)
@@ -51,6 +52,23 @@ TARGET_STATE_MAP = {
}
+def _set_power_state(task, system, power_state, timeout=None):
+ """An internal helper to set a power state on the system.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param system: a Redfish System object.
+ :param power_state: Any power state from :mod:`ironic.common.states`.
+ :param timeout: Time to wait for the node to reach the requested state.
+ :raises: MissingParameterValue if a required parameter is missing.
+ :raises: RedfishConnectionError when it fails to connect to Redfish
+ :raises: RedfishError on an error from the Sushy library
+ """
+ system.reset_system(SET_POWER_STATE_MAP.get(power_state))
+ target_state = TARGET_STATE_MAP.get(power_state, power_state)
+ cond_utils.node_wait_for_power_state(task, target_state,
+ timeout=timeout)
+
+
class RedfishPower(base.PowerInterface):
def __init__(self):
@@ -106,19 +124,22 @@ class RedfishPower(base.PowerInterface):
:raises: RedfishError on an error from the Sushy library
"""
system = redfish_utils.get_system(task.node)
+
+ if (power_state in (states.POWER_ON, states.SOFT_REBOOT, states.REBOOT)
+ and isinstance(task.driver.management,
+ redfish_mgmt.RedfishManagement)):
+ task.driver.management.restore_boot_device(task, system)
+
try:
- system.reset_system(SET_POWER_STATE_MAP.get(power_state))
+ _set_power_state(task, system, power_state, timeout=timeout)
except sushy.exceptions.SushyError as e:
- error_msg = (_('Redfish set power state failed for node '
+ error_msg = (_('Setting power state to %(state)s failed for node '
'%(node)s. Error: %(error)s') %
- {'node': task.node.uuid, 'error': e})
+ {'node': task.node.uuid, 'state': power_state,
+ 'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
- target_state = TARGET_STATE_MAP.get(power_state, power_state)
- cond_utils.node_wait_for_power_state(task, target_state,
- timeout=timeout)
-
@task_manager.require_exclusive_lock
def reboot(self, task, timeout=None):
"""Perform a hard reboot of the task's node.
@@ -134,19 +155,23 @@ class RedfishPower(base.PowerInterface):
try:
if current_power_state == states.POWER_ON:
- system.reset_system(SET_POWER_STATE_MAP.get(states.REBOOT))
- else:
- system.reset_system(SET_POWER_STATE_MAP.get(states.POWER_ON))
+ next_state = states.POWER_OFF
+ _set_power_state(task, system, next_state, timeout=timeout)
+
+ if isinstance(task.driver.management,
+ redfish_mgmt.RedfishManagement):
+ task.driver.management.restore_boot_device(task, system)
+
+ next_state = states.POWER_ON
+ _set_power_state(task, system, next_state, timeout=timeout)
except sushy.exceptions.SushyError as e:
- error_msg = (_('Redfish reboot failed for node %(node)s. '
- 'Error: %(error)s') % {'node': task.node.uuid,
- 'error': e})
+ error_msg = (_('Reboot failed for node %(node)s when setting '
+ 'power state to %(state)s. Error: %(error)s') %
+ {'node': task.node.uuid, 'state': next_state,
+ 'error': e})
LOG.error(error_msg)
raise exception.RedfishError(error=error_msg)
- cond_utils.node_wait_for_power_state(task, states.POWER_ON,
- timeout=timeout)
-
def get_supported_power_states(self, task):
"""Get a list of the supported power states.
diff --git a/ironic/drivers/modules/snmp.py b/ironic/drivers/modules/snmp.py
index 008886b90..ab6c3ade4 100644
--- a/ironic/drivers/modules/snmp.py
+++ b/ironic/drivers/modules/snmp.py
@@ -554,15 +554,18 @@ class SNMPDriverSimple(SNMPDriverBase):
super(SNMPDriverSimple, self).__init__(*args, **kwargs)
self.oid = self._snmp_oid()
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def oid_device(self):
"""Device dependent portion of the power state object OID."""
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def value_power_on(self):
"""Value representing power on state."""
- @abc.abstractproperty
+ @property
+ @abc.abstractmethod
def value_power_off(self):
"""Value representing power off state."""
diff --git a/ironic/drivers/redfish.py b/ironic/drivers/redfish.py
index fe082cfe4..51e34d6be 100644
--- a/ironic/drivers/redfish.py
+++ b/ironic/drivers/redfish.py
@@ -17,6 +17,7 @@ from ironic.drivers import generic
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import noop
+from ironic.drivers.modules import noop_mgmt
from ironic.drivers.modules import pxe
from ironic.drivers.modules.redfish import bios as redfish_bios
from ironic.drivers.modules.redfish import boot as redfish_boot
@@ -36,7 +37,7 @@ class RedfishHardware(generic.GenericHardware):
@property
def supported_management_interfaces(self):
"""List of supported management interfaces."""
- return [redfish_mgmt.RedfishManagement]
+ return [redfish_mgmt.RedfishManagement, noop_mgmt.NoopManagement]
@property
def supported_power_interfaces(self):
diff --git a/ironic/drivers/utils.py b/ironic/drivers/utils.py
index 99b2a7665..e61d7ab86 100644
--- a/ironic/drivers/utils.py
+++ b/ironic/drivers/utils.py
@@ -323,7 +323,7 @@ def store_ramdisk_logs(node, logs, label=None):
f.name, object_headers=object_headers)
-def collect_ramdisk_logs(node):
+def collect_ramdisk_logs(node, label=None):
"""Collect and store the system logs from the IPA ramdisk.
Collect and store the system logs from the IPA ramdisk. This method
@@ -331,8 +331,11 @@ def collect_ramdisk_logs(node):
according to the configured storage backend.
:param node: A node object.
-
+ :param label: A string to label the log file such as a clean step name.
"""
+ if CONF.agent.deploy_logs_collect == 'never':
+ return
+
client = agent_client.AgentClient()
try:
result = client.collect_system_logs(node)
@@ -350,7 +353,8 @@ def collect_ramdisk_logs(node):
return
try:
- store_ramdisk_logs(node, result['command_result']['system_logs'])
+ store_ramdisk_logs(node, result['command_result']['system_logs'],
+ label=label)
except exception.SwiftOperationError as e:
LOG.error('Failed to store the logs from the node %(node)s '
'deployment in Swift. Error: %(error)s',
diff --git a/ironic/objects/fields.py b/ironic/objects/fields.py
index 528e998b2..1b7778945 100644
--- a/ironic/objects/fields.py
+++ b/ironic/objects/fields.py
@@ -14,8 +14,6 @@
# under the License.
import ast
-import hashlib
-import inspect
from oslo_versionedobjects import fields as object_fields
@@ -57,10 +55,7 @@ class StringFieldThatAcceptsCallable(object_fields.StringField):
default = self._default
if (self._default != object_fields.UnspecifiedDefault
and callable(self._default)):
- default = "%s-%s" % (
- self._default.__name__,
- hashlib.md5(inspect.getsource(
- self._default).encode()).hexdigest())
+ default = '<function %s>' % default.__name__
return '%s(default=%s,nullable=%s)' % (self._type.__class__.__name__,
default, self._nullable)
diff --git a/ironic/objects/node.py b/ironic/objects/node.py
index cdb02ebb9..0392ee283 100644
--- a/ironic/objects/node.py
+++ b/ironic/objects/node.py
@@ -75,7 +75,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
# Version 1.32: Add description field
# Version 1.33: Add retired and retired_reason fields
# Version 1.34: Add lessee field
- VERSION = '1.34'
+ # Version 1.35: Add network_data field
+ VERSION = '1.35'
dbapi = db_api.get_instance()
@@ -164,6 +165,7 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
'description': object_fields.StringField(nullable=True),
'retired': objects.fields.BooleanField(nullable=True),
'retired_reason': object_fields.StringField(nullable=True),
+ 'network_data': object_fields.FlexibleDictField(nullable=True),
}
def as_dict(self, secure=False):
@@ -439,10 +441,6 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
updates = self.do_version_changes_for_db()
self._validate_property_values(updates.get('properties'))
- if 'driver' in updates and 'driver_internal_info' not in updates:
- # Clean driver_internal_info when changes driver
- self.driver_internal_info = {}
- updates = self.do_version_changes_for_db()
self._validate_and_remove_traits(updates)
self._validate_and_format_conductor_group(updates)
db_node = self.dbapi.update_node(self.uuid, updates)
@@ -549,6 +547,21 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
elif self.conductor_group:
self.conductor_group = ''
+ def _convert_network_data_field(self, target_version,
+ remove_unavailable_fields=True):
+ # NOTE(etingof): The default value for `network_data` is an empty
+ # dict. Therefore we can't use generic version adjustment
+ # routine.
+ field_is_set = self.obj_attr_is_set('network_data')
+ if target_version >= (1, 35):
+ if not field_is_set:
+ self.network_data = {}
+ elif field_is_set:
+ if remove_unavailable_fields:
+ delattr(self, 'network_data')
+ elif self.network_data:
+ self.network_data = {}
+
# NOTE (yolanda): new method created to avoid repeating code in
# _convert_to_version, and to avoid pep8 too complex error
def _adjust_field_to_version(self, field_name, field_default_value,
@@ -606,6 +619,8 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
should be set to False (or removed).
Version 1.34: lessee was added. For versions prior to this, it should
be set to None or removed.
+ Version 1.35: network_data was added. For versions prior to this, it
+ should be set to empty dict (or removed).
:param target_version: the desired version of the object
:param remove_unavailable_fields: True to remove fields that are
@@ -621,6 +636,7 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
('automated_clean', 28), ('protected_reason', 29),
('owner', 30), ('allocation_id', 31), ('description', 32),
('retired_reason', 33), ('lessee', 34)]
+
for name, minor in fields:
self._adjust_field_to_version(name, None, target_version,
1, minor, remove_unavailable_fields)
@@ -637,14 +653,17 @@ class Node(base.IronicObject, object_base.VersionedObjectDictCompat):
self._adjust_field_to_version('retired', False, target_version,
1, 33, remove_unavailable_fields)
+ self._convert_network_data_field(target_version,
+ remove_unavailable_fields)
+
@base.IronicObjectRegistry.register
class NodePayload(notification.NotificationPayloadBase):
"""Base class used for all notification payloads about a Node object."""
# NOTE: This payload does not include the Node fields "chassis_id",
# "driver_info", "driver_internal_info", "instance_info", "raid_config",
- # "reservation", or "target_raid_config". These were excluded for reasons
- # including:
+ # "network_data", "reservation", or "target_raid_config". These were
+ # excluded for reasons including:
# - increased complexity needed for creating the payload
# - sensitive information in the fields that shouldn't be exposed to
# external services
diff --git a/ironic/objects/port.py b/ironic/objects/port.py
index 6c75c8c21..85690b162 100644
--- a/ironic/objects/port.py
+++ b/ironic/objects/port.py
@@ -203,18 +203,21 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# Implications of calling new remote procedures should be thought through.
# @object_base.remotable_classmethod
@classmethod
- def get_by_address(cls, context, address, owner=None):
+ def get_by_address(cls, context, address, owner=None, project=None):
"""Find a port based on address and return a :class:`Port` object.
:param cls: the :class:`Port`
:param context: Security context
:param address: the address of a port.
- :param owner: a node owner to match against
+ :param owner: DEPRECATED a node owner to match against
+ :param project: a node owner or lessee to match against
:returns: a :class:`Port` object.
:raises: PortNotFound
"""
- db_port = cls.dbapi.get_port_by_address(address, owner=owner)
+ if owner and not project:
+ project = owner
+ db_port = cls.dbapi.get_port_by_address(address, project=project)
port = cls._from_db_object(context, cls(), db_port)
return port
@@ -224,7 +227,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list(cls, context, limit=None, marker=None,
- sort_key=None, sort_dir=None, owner=None):
+ sort_key=None, sort_dir=None, owner=None, project=None):
"""Return a list of Port objects.
:param context: Security context.
@@ -232,16 +235,19 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
- :param owner: a node owner to match against
+ :param owner: DEPRECATED a node owner to match against
+ :param project: a node owner or lessee to match against
:returns: a list of :class:`Port` object.
:raises: InvalidParameterValue
"""
+ if owner and not project:
+ project = owner
db_ports = cls.dbapi.get_port_list(limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
- owner=owner)
+ project=project)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -250,7 +256,8 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
# @object_base.remotable_classmethod
@classmethod
def list_by_node_id(cls, context, node_id, limit=None, marker=None,
- sort_key=None, sort_dir=None, owner=None):
+ sort_key=None, sort_dir=None, owner=None,
+ project=None):
"""Return a list of Port objects associated with a given node ID.
:param context: Security context.
@@ -259,15 +266,18 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
- :param owner: a node owner to match against
+ :param owner: DEPRECATED a node owner to match against
+ :param project: a node owner or lessee to match against
:returns: a list of :class:`Port` object.
"""
+ if owner and not project:
+ project = owner
db_ports = cls.dbapi.get_ports_by_node_id(node_id, limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
- owner=owner)
+ project=project)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
@@ -277,7 +287,7 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
@classmethod
def list_by_portgroup_id(cls, context, portgroup_id, limit=None,
marker=None, sort_key=None, sort_dir=None,
- owner=None):
+ owner=None, project=None):
"""Return a list of Port objects associated with a given portgroup ID.
:param context: Security context.
@@ -286,16 +296,19 @@ class Port(base.IronicObject, object_base.VersionedObjectDictCompat):
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
- :param owner: a node owner to match against
+ :param owner: DEPRECATED a node owner to match against
+ :param project: a node owner or lessee to match against
:returns: a list of :class:`Port` object.
"""
+ if owner and not project:
+ project = owner
db_ports = cls.dbapi.get_ports_by_portgroup_id(portgroup_id,
limit=limit,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
- owner=owner)
+ project=project)
return cls._from_db_object_list(context, db_ports)
# NOTE(xek): We don't want to enable RPC on this call just yet. Remotable
diff --git a/ironic/tests/base.py b/ironic/tests/base.py
index dabcb7ba4..eccdb9a1b 100644
--- a/ironic/tests/base.py
+++ b/ironic/tests/base.py
@@ -16,7 +16,7 @@
"""Base classes for our unit tests.
-Allows overriding of config for use of fakes, and some black magic for
+Allows overriding of config for use of fakes, and some magic for
inline callbacks.
"""
@@ -26,9 +26,10 @@ import os
import subprocess
import sys
import tempfile
+from unittest import mock
import eventlet
-eventlet.monkey_patch(os=False) # noqa E402
+eventlet.monkey_patch(os=False)
import fixtures
from ironic_lib import utils
from oslo_concurrency import processutils
@@ -53,6 +54,21 @@ logging.register_options(CONF)
logging.setup(CONF, 'ironic')
+# NOTE(rpittau) this function allows autospec for classmethods and
+# staticmethods in Python 3.6, while no issue occurs in Python 3.7
+# and later.
+# For more info please see: http://bugs.python.org/issue23078
+def _patch_mock_callable(obj):
+ if isinstance(obj, type):
+ return True
+ if getattr(obj, '__call__', None) is not None:
+ return True
+ if (isinstance(obj, (staticmethod, classmethod))
+ and mock._callable(obj.__func__)):
+ return True
+ return False
+
+
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
@@ -117,10 +133,13 @@ class TestCase(oslo_test_base.BaseTestCase):
# subprocess.Popen is a class
self.patch(subprocess, 'Popen', DoNotCallPopen)
+ if sys.version_info < (3, 7):
+ _patch_mock_callable._old_func = mock._callable
+ mock._callable = _patch_mock_callable
+
def _set_config(self):
self.cfg_fixture = self.useFixture(config_fixture.Config(CONF))
self.config(use_stderr=False,
- fatal_exception_format_errors=True,
tempdir=tempfile.tempdir)
self.config(cleaning_network=uuidutils.generate_uuid(),
group='neutron')
diff --git a/ironic/tests/json_samples/network_data.json b/ironic/tests/json_samples/network_data.json
new file mode 100644
index 000000000..efce35ddd
--- /dev/null
+++ b/ironic/tests/json_samples/network_data.json
@@ -0,0 +1,113 @@
+{
+ "links": [
+ {
+ "id": "interface2",
+ "type": "vif",
+ "ethernet_mac_address": "a0:36:9f:2c:e8:70",
+ "vif_id": "e1c90e9f-eafc-4e2d-8ec9-58b91cebb53d",
+ "mtu": 1500
+ },
+ {
+ "id": "interface0",
+ "type": "phy",
+ "ethernet_mac_address": "a0:36:9f:2c:e8:80",
+ "mtu": 9000
+ },
+ {
+ "id": "interface1",
+ "type": "phy",
+ "ethernet_mac_address": "a0:36:9f:2c:e8:81",
+ "mtu": 9000
+ },
+ {
+ "id": "bond0",
+ "type": "bond",
+ "bond_links": [
+ "interface0",
+ "interface1"
+ ],
+ "ethernet_mac_address": "a0:36:9f:2c:e8:82",
+ "bond_mode": "802.1ad",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bond_miimon": 100
+ },
+ {
+ "id": "vlan0",
+ "type": "vlan",
+ "vlan_link": "bond0",
+ "vlan_id": 101,
+ "vlan_mac_address": "a0:36:9f:2c:e8:80",
+ "vif_id": "e1c90e9f-eafc-4e2d-8ec9-58b91cebb53f"
+ }
+ ],
+ "networks": [
+ {
+ "id": "private-ipv4",
+ "type": "ipv4",
+ "link": "interface0",
+ "ip_address": "10.184.0.244",
+ "netmask": "255.255.240.0",
+ "routes": [
+ {
+ "network": "10.0.0.0",
+ "netmask": "255.0.0.0",
+ "gateway": "11.0.0.1"
+ },
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "23.253.157.1"
+ }
+ ],
+ "network_id": "da5bb487-5193-4a65-a3df-4a0055a8c0d7"
+ },
+ {
+ "id": "private-ipv4",
+ "type": "ipv6",
+ "link": "interface0",
+ "ip_address": "2001:cdba::3257:9652/24",
+ "routes": [
+ {
+ "network": "::",
+ "netmask": "::",
+ "gateway": "fd00::1"
+ },
+ {
+ "network": "::",
+ "netmask": "ffff:ffff:ffff::",
+ "gateway": "fd00::1:1"
+ }
+ ],
+ "network_id": "da5bb487-5193-4a65-a3df-4a0055a8c0d8"
+ },
+ {
+ "id": "publicnet-ipv4",
+ "type": "ipv4",
+ "link": "vlan0",
+ "ip_address": "23.253.157.244",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": [
+ "69.20.0.164",
+ "69.20.0.196"
+ ],
+ "routes": [
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "23.253.157.1"
+ }
+ ],
+ "network_id": "62611d6f-66cb-4270-8b1f-503ef0dd4736"
+ }
+ ],
+ "services": [
+ {
+ "type": "dns",
+ "address": "8.8.8.8"
+ },
+ {
+ "type": "dns",
+ "address": "8.8.4.4"
+ }
+ ]
+} \ No newline at end of file
diff --git a/ironic/tests/unit/api/base.py b/ironic/tests/unit/api/base.py
index 5d4ab99b8..3d80ee42f 100644
--- a/ironic/tests/unit/api/base.py
+++ b/ironic/tests/unit/api/base.py
@@ -20,9 +20,9 @@
# ceilometer/tests/api/__init__.py). This should be oslo'ified:
# https://bugs.launchpad.net/ironic/+bug/1255115.
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
import pecan
import pecan.testing
@@ -60,7 +60,8 @@ class BaseApiTest(db_base.DbTestCase):
self.addCleanup(reset_pecan)
- p = mock.patch('ironic.api.controllers.v1.Controller._check_version')
+ p = mock.patch('ironic.api.controllers.v1.Controller._check_version',
+ autospec=True)
self._check_version = p.start()
self.addCleanup(p.stop)
diff --git a/ironic/tests/unit/api/controllers/test_base.py b/ironic/tests/unit/api/controllers/test_base.py
index 423eaffa5..b94f66f65 100644
--- a/ironic/tests/unit/api/controllers/test_base.py
+++ b/ironic/tests/unit/api/controllers/test_base.py
@@ -14,8 +14,8 @@
# under the License.
from http import client as http_client
+from unittest import mock
-import mock
from webob import exc
from ironic.api.controllers import base as cbase
diff --git a/ironic/tests/unit/api/controllers/v1/test_allocation.py b/ironic/tests/unit/api/controllers/v1/test_allocation.py
index 8f28a3d67..7409ea2ae 100644
--- a/ironic/tests/unit/api/controllers/v1/test_allocation.py
+++ b/ironic/tests/unit/api/controllers/v1/test_allocation.py
@@ -15,10 +15,10 @@ Tests for the API /allocations/ methods.
import datetime
from http import client as http_client
+from unittest import mock
from urllib import parse as urlparse
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -114,7 +114,7 @@ class TestListAllocations(test_api_base.BaseApiTest):
'/allocations/%s?fields=%s' % (allocation.uuid, fields),
headers=self.headers)
# We always append "links"
- self.assertItemsEqual(['resource_class', 'extra', 'links'], data)
+ self.assertCountEqual(['resource_class', 'extra', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
@@ -132,7 +132,7 @@ class TestListAllocations(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['allocations']))
for allocation in data['allocations']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'extra', 'links'], allocation)
+ self.assertCountEqual(['uuid', 'extra', 'links'], allocation)
def test_get_custom_fields_invalid_fields(self):
allocation = obj_utils.create_test_allocation(self.context,
@@ -185,9 +185,9 @@ class TestListAllocations(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
diff --git a/ironic/tests/unit/api/controllers/v1/test_chassis.py b/ironic/tests/unit/api/controllers/v1/test_chassis.py
index 61c542c73..69b83e4ac 100644
--- a/ironic/tests/unit/api/controllers/v1/test_chassis.py
+++ b/ironic/tests/unit/api/controllers/v1/test_chassis.py
@@ -17,9 +17,9 @@ Tests for the API /chassis/ methods.
import datetime
from http import client as http_client
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -78,7 +78,7 @@ class TestListChassis(test_api_base.BaseApiTest):
'/chassis/%s?fields=%s' % (chassis.uuid, fields),
headers={api_base.Version.string: str(api_v1.max_version())})
# We always append "links"
- self.assertItemsEqual(['description', 'extra', 'links'], data)
+ self.assertCountEqual(['description', 'extra', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
@@ -93,7 +93,7 @@ class TestListChassis(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['chassis']))
for ch in data['chassis']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'extra', 'links'], ch)
+ self.assertCountEqual(['uuid', 'extra', 'links'], ch)
def test_get_custom_fields_invalid_fields(self):
chassis = obj_utils.create_test_chassis(self.context)
@@ -190,9 +190,10 @@ class TestListChassis(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'],
+ bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/chassis/%s' % (public_url, uuid),
diff --git a/ironic/tests/unit/api/controllers/v1/test_conductor.py b/ironic/tests/unit/api/controllers/v1/test_conductor.py
index 8a96a2faf..caf85eb4c 100644
--- a/ironic/tests/unit/api/controllers/v1/test_conductor.py
+++ b/ironic/tests/unit/api/controllers/v1/test_conductor.py
@@ -15,8 +15,8 @@ Tests for the API /conductors/ methods.
import datetime
from http import client as http_client
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -120,7 +120,7 @@ class TestListConductors(test_api_base.BaseApiTest):
data = self.get_json(
'/conductors/rocky.rocks?fields=%s' % fields,
headers={api_base.Version.string: str(api_v1.max_version())})
- self.assertItemsEqual(['hostname', 'alive', 'links'], data)
+ self.assertCountEqual(['hostname', 'alive', 'links'], data)
def test_get_collection_custom_fields(self):
obj_utils.create_test_conductor(self.context, hostname='rocky.rocks')
@@ -133,7 +133,7 @@ class TestListConductors(test_api_base.BaseApiTest):
self.assertEqual(2, len(data['conductors']))
for c in data['conductors']:
- self.assertItemsEqual(['hostname', 'alive', 'links'], c)
+ self.assertCountEqual(['hostname', 'alive', 'links'], c)
def test_get_custom_fields_invalid_fields(self):
obj_utils.create_test_conductor(self.context, hostname='rocky.rocks')
@@ -156,9 +156,9 @@ class TestListConductors(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn('rocky.rocks', data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=headers))
if public_url is not None:
diff --git a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
index 6d7dfcb35..b194dafdd 100644
--- a/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
+++ b/ironic/tests/unit/api/controllers/v1/test_deploy_template.py
@@ -15,9 +15,9 @@ Tests for the API /deploy_templates/ methods.
import datetime
from http import client as http_client
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -119,7 +119,7 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
'/deploy_templates/%s?fields=%s' % (template.uuid, fields),
headers=self.headers)
# We always append "links"
- self.assertItemsEqual(['name', 'steps', 'links'], data)
+ self.assertCountEqual(['name', 'steps', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,steps'
@@ -136,7 +136,7 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
self.assertEqual(3, len(data['deploy_templates']))
for template in data['deploy_templates']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'steps', 'links'], template)
+ self.assertCountEqual(['uuid', 'steps', 'links'], template)
def test_get_custom_fields_invalid_fields(self):
template = obj_utils.create_test_deploy_template(self.context)
@@ -216,9 +216,9 @@ class TestListDeployTemplates(BaseDeployTemplatesAPITest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
@@ -353,14 +353,15 @@ class TestPatch(BaseDeployTemplatesAPITest):
mock_save.assert_called_once_with(mock.ANY)
return response
- def _test_update_bad_request(self, mock_save, patch, error_msg):
+ def _test_update_bad_request(self, mock_save, patch, error_msg=None):
response = self.patch_json('/deploy_templates/%s' % self.template.uuid,
patch, expect_errors=True,
headers=self.headers)
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.BAD_REQUEST, response.status_code)
self.assertTrue(response.json['error_message'])
- self.assertRegex(response.json['error_message'], error_msg)
+ if error_msg:
+ self.assertRegex(response.json['error_message'], error_msg)
self.assertFalse(mock_save.called)
return response
@@ -537,16 +538,14 @@ class TestPatch(BaseDeployTemplatesAPITest):
'priority': 42
}
patch = [{'path': '/steps/1', 'op': 'replace', 'value': step}]
- self._test_update_bad_request(
- mock_save, patch, "list assignment index out of range|"
- "can't replace outside of list")
+ self._test_update_bad_request(mock_save, patch)
def test_replace_empty_step_list_fail(self, mock_save):
patch = [{'path': '/steps', 'op': 'replace', 'value': []}]
self._test_update_bad_request(
mock_save, patch, 'No deploy steps specified')
- def _test_remove_not_allowed(self, mock_save, field, error_msg):
+ def _test_remove_not_allowed(self, mock_save, field, error_msg=None):
patch = [{'path': '/%s' % field, 'op': 'remove'}]
self._test_update_bad_request(mock_save, patch, error_msg)
@@ -566,8 +565,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
"'/steps' is a mandatory attribute and can not be removed")
def test_remove_foo(self, mock_save):
- self._test_remove_not_allowed(
- mock_save, 'foo', "can't remove non-existent object 'foo'")
+ self._test_remove_not_allowed(mock_save, 'foo')
def test_replace_step_invalid_interface(self, mock_save):
patch = [{'path': '/steps/0/interface', 'op': 'replace',
@@ -632,14 +630,11 @@ class TestPatch(BaseDeployTemplatesAPITest):
def test_remove_non_existent_property_fail(self, mock_save):
patch = [{'path': '/non-existent', 'op': 'remove'}]
- self._test_update_bad_request(
- mock_save, patch,
- "can't remove non-existent object 'non-existent'")
+ self._test_update_bad_request(mock_save, patch)
def test_remove_non_existent_step_fail(self, mock_save):
patch = [{'path': '/steps/1', 'op': 'remove'}]
- self._test_update_bad_request(
- mock_save, patch, "can't remove non-existent object '1'")
+ self._test_update_bad_request(mock_save, patch)
def test_remove_only_step_fail(self, mock_save):
patch = [{'path': '/steps/0', 'op': 'remove'}]
@@ -648,9 +643,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
def test_remove_non_existent_step_property_fail(self, mock_save):
patch = [{'path': '/steps/0/non-existent', 'op': 'remove'}]
- self._test_update_bad_request(
- mock_save, patch,
- "can't remove non-existent object 'non-existent'")
+ self._test_update_bad_request(mock_save, patch)
def test_add_root_non_existent(self, mock_save):
patch = [{'path': '/foo', 'value': 'bar', 'op': 'add'}]
@@ -665,8 +658,7 @@ class TestPatch(BaseDeployTemplatesAPITest):
'priority': 42
}
patch = [{'path': '/steps/2', 'op': 'add', 'value': step}]
- self._test_update_bad_request(
- mock_save, patch, "can't insert outside of list")
+ self._test_update_bad_request(mock_save, patch)
def test_add_multi(self, mock_save):
steps = [
diff --git a/ironic/tests/unit/api/controllers/v1/test_driver.py b/ironic/tests/unit/api/controllers/v1/test_driver.py
index 965c390f0..18100874d 100644
--- a/ironic/tests/unit/api/controllers/v1/test_driver.py
+++ b/ironic/tests/unit/api/controllers/v1/test_driver.py
@@ -15,8 +15,8 @@
from http import client as http_client
import json
+from unittest import mock
-import mock
from oslo_config import cfg
from testtools import matchers
@@ -270,9 +270,10 @@ class TestListDrivers(base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(self.hw1, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'],
+ bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/drivers/%s' % (public_url, self.hw1),
diff --git a/ironic/tests/unit/api/controllers/v1/test_event.py b/ironic/tests/unit/api/controllers/v1/test_event.py
index 082c74493..b67870c23 100644
--- a/ironic/tests/unit/api/controllers/v1/test_event.py
+++ b/ironic/tests/unit/api/controllers/v1/test_event.py
@@ -14,8 +14,7 @@ Tests for the API /events methods.
"""
from http import client as http_client
-
-import mock
+from unittest import mock
from ironic.api.controllers import base as api_base
from ironic.api.controllers.v1 import types
diff --git a/ironic/tests/unit/api/controllers/v1/test_expose.py b/ironic/tests/unit/api/controllers/v1/test_expose.py
index 0c9976dcb..bc8e9fbe7 100644
--- a/ironic/tests/unit/api/controllers/v1/test_expose.py
+++ b/ironic/tests/unit/api/controllers/v1/test_expose.py
@@ -12,15 +12,26 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+from http import client as http_client
from importlib import machinery
import inspect
+import json
import os
import sys
+from unittest import mock
-import mock
from oslo_utils import uuidutils
+import pecan.rest
+import pecan.testing
+from ironic.api.controllers import root
+from ironic.api.controllers import v1
+from ironic.api import expose
+from ironic.api import types as atypes
+from ironic.common import exception
from ironic.tests import base as test_base
+from ironic.tests.unit.api import base as test_api_base
class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
@@ -85,3 +96,220 @@ class TestExposedAPIMethodsCheckPolicy(test_base.TestCase):
def test_conductor_api_policy(self):
self._test('ironic.api.controllers.v1.conductor')
+
+
+class UnderscoreStr(atypes.UserType):
+ basetype = str
+ name = "custom string"
+
+ def tobasetype(self, value):
+ return '__' + value
+
+
+class Obj(atypes.Base):
+ id = int
+ name = str
+ unset_me = str
+
+
+class NestedObj(atypes.Base):
+ o = Obj
+
+
+class TestJsonRenderer(test_base.TestCase):
+
+ def setUp(self):
+ super(TestJsonRenderer, self).setUp()
+ self.renderer = expose.JSonRenderer('/', None)
+
+ def test_render_error(self):
+ error_dict = {
+ 'faultcode': 500,
+ 'faultstring': 'ouch'
+ }
+ self.assertEqual(
+ error_dict,
+ json.loads(self.renderer.render('/', error_dict))
+ )
+
+ def test_render_exception(self):
+ error_dict = {
+ 'faultcode': 'Server',
+ 'faultstring': 'ouch',
+ 'debuginfo': None
+ }
+ try:
+ raise Exception('ouch')
+ except Exception:
+ excinfo = sys.exc_info()
+ self.assertEqual(
+ json.dumps(error_dict),
+ self.renderer.render('/', expose.format_exception(excinfo))
+ )
+
+ def test_render_http_exception(self):
+ error_dict = {
+ 'faultcode': '403',
+ 'faultstring': 'Not authorized',
+ 'debuginfo': None
+ }
+ try:
+ e = exception.NotAuthorized()
+ e.code = 403
+ except exception.IronicException:
+ excinfo = sys.exc_info()
+ self.assertEqual(
+ json.dumps(error_dict),
+ self.renderer.render('/', expose.format_exception(excinfo))
+ )
+
+ def test_render_int(self):
+ self.assertEqual(
+ '42',
+ self.renderer.render('/', {
+ 'result': 42,
+ 'datatype': int
+ })
+ )
+
+ def test_render_none(self):
+ self.assertEqual(
+ 'null',
+ self.renderer.render('/', {
+ 'result': None,
+ 'datatype': str
+ })
+ )
+
+ def test_render_str(self):
+ self.assertEqual(
+ '"a string"',
+ self.renderer.render('/', {
+ 'result': 'a string',
+ 'datatype': str
+ })
+ )
+
+ def test_render_datetime(self):
+ self.assertEqual(
+ '"2020-04-14T10:35:10.586431"',
+ self.renderer.render('/', {
+ 'result': datetime.datetime(2020, 4, 14, 10, 35, 10, 586431),
+ 'datatype': datetime.datetime
+ })
+ )
+
+ def test_render_array(self):
+ self.assertEqual(
+ json.dumps(['one', 'two', 'three']),
+ self.renderer.render('/', {
+ 'result': ['one', 'two', 'three'],
+ 'datatype': atypes.ArrayType(str)
+ })
+ )
+
+ def test_render_dict(self):
+ self.assertEqual(
+ json.dumps({'one': 'a', 'two': 'b', 'three': 'c'}),
+ self.renderer.render('/', {
+ 'result': {'one': 'a', 'two': 'b', 'three': 'c'},
+ 'datatype': atypes.DictType(str, str)
+ })
+ )
+
+ def test_complex_type(self):
+ o = Obj()
+ o.id = 1
+ o.name = 'one'
+ o.unset_me = atypes.Unset
+
+ n = NestedObj()
+ n.o = o
+ self.assertEqual(
+ json.dumps({'o': {'id': 1, 'name': 'one'}}),
+ self.renderer.render('/', {
+ 'result': n,
+ 'datatype': NestedObj
+ })
+ )
+
+ def test_user_type(self):
+ self.assertEqual(
+ '"__foo"',
+ self.renderer.render('/', {
+ 'result': 'foo',
+ 'datatype': UnderscoreStr()
+ })
+ )
+
+
+class MyThingController(pecan.rest.RestController):
+
+ _custom_actions = {
+ 'no_content': ['GET'],
+ 'response_content': ['GET'],
+ 'ouch': ['GET'],
+ }
+
+ @expose.expose(int, str, int)
+ def get(self, name, number):
+ return {name: number}
+
+ @expose.expose(str)
+ def no_content(self):
+ return atypes.PassthruResponse('nothing', status_code=204)
+
+ @expose.expose(str)
+ def response_content(self):
+ return atypes.PassthruResponse('nothing', status_code=200)
+
+ @expose.expose(str)
+ def ouch(self):
+ raise Exception('ouch')
+
+
+class MyV1Controller(v1.Controller):
+
+ things = MyThingController()
+
+
+class MyRootController(root.RootController):
+
+ v1 = MyV1Controller()
+
+
+class TestExpose(test_api_base.BaseApiTest):
+
+ block_execute = False
+
+ root_controller = '%s.%s' % (MyRootController.__module__,
+ MyRootController.__name__)
+
+ def test_expose(self):
+ self.assertEqual(
+ {'foo': 1},
+ self.get_json('/things/', name='foo', number=1)
+ )
+
+ def test_response_204(self):
+ response = self.get_json('/things/no_content', expect_errors=True)
+ self.assertEqual(http_client.NO_CONTENT, response.status_int)
+ self.assertIsNone(response.content_type)
+ self.assertEqual(b'', response.normal_body)
+
+ def test_response_content(self):
+ response = self.get_json('/things/response_content',
+ expect_errors=True)
+ self.assertEqual(http_client.OK, response.status_int)
+ self.assertEqual(b'"nothing"', response.normal_body)
+ self.assertEqual('application/json', response.content_type)
+
+ def test_exception(self):
+ response = self.get_json('/things/ouch',
+ expect_errors=True)
+ error_message = json.loads(response.json['error_message'])
+ self.assertEqual(http_client.INTERNAL_SERVER_ERROR,
+ response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual('Server', error_message['faultcode'])
+ self.assertEqual('ouch', error_message['faultstring'])
diff --git a/ironic/tests/unit/api/controllers/v1/test_node.py b/ironic/tests/unit/api/controllers/v1/test_node.py
index 421175c10..2d979fb42 100644
--- a/ironic/tests/unit/api/controllers/v1/test_node.py
+++ b/ironic/tests/unit/api/controllers/v1/test_node.py
@@ -16,10 +16,11 @@ Tests for the API /nodes/ methods.
import datetime
from http import client as http_client
import json
+import os
+from unittest import mock
from urllib import parse as urlparse
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -42,12 +43,20 @@ from ironic.common import states
from ironic.conductor import rpcapi
from ironic import objects
from ironic.objects import fields as obj_fields
+from ironic import tests as tests_root
from ironic.tests import base
from ironic.tests.unit.api import base as test_api_base
from ironic.tests.unit.api import utils as test_api_utils
from ironic.tests.unit.objects import utils as obj_utils
+with open(
+ os.path.join(
+ os.path.dirname(tests_root.__file__),
+ 'json_samples', 'network_data.json')) as fl:
+ NETWORK_DATA = json.load(fl)
+
+
class TestNodeObject(base.TestCase):
def test_node_init(self):
@@ -138,6 +147,7 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertNotIn('retired', data['nodes'][0])
self.assertNotIn('retired_reason', data['nodes'][0])
self.assertNotIn('lessee', data['nodes'][0])
+ self.assertNotIn('network_data', data['nodes'][0])
def test_get_one(self):
node = obj_utils.create_test_node(self.context,
@@ -403,6 +413,19 @@ class TestListNodes(test_api_base.BaseApiTest):
headers={api_base.Version.string: '1.65'})
self.assertEqual(data['lessee'], "some-lucky-project")
+ def test_node_network_data_hidden_in_lower_version(self):
+ self._test_node_field_hidden_in_lower_version('network_data',
+ '1.65', '1.66')
+
+ def test_node_network_data(self):
+ node = obj_utils.create_test_node(
+ self.context, network_data=NETWORK_DATA,
+ provision_state='active',
+ uuid=uuidutils.generate_uuid())
+ data = self.get_json('/nodes/%s' % node.uuid,
+ headers={api_base.Version.string: '1.66'})
+ self.assertEqual(data['network_data'], NETWORK_DATA)
+
def test_get_one_custom_fields(self):
node = obj_utils.create_test_node(self.context,
chassis_id=self.chassis.id)
@@ -411,7 +434,7 @@ class TestListNodes(test_api_base.BaseApiTest):
'/nodes/%s?fields=%s' % (node.uuid, fields),
headers={api_base.Version.string: str(api_v1.max_version())})
# We always append "links"
- self.assertItemsEqual(['extra', 'instance_info', 'links'], data)
+ self.assertCountEqual(['extra', 'instance_info', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,instance_info'
@@ -427,7 +450,7 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['nodes']))
for node in data['nodes']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'instance_info', 'links'], node)
+ self.assertCountEqual(['uuid', 'instance_info', 'links'], node)
def test_get_custom_fields_invalid_fields(self):
node = obj_utils.create_test_node(self.context,
@@ -460,7 +483,7 @@ class TestListNodes(test_api_base.BaseApiTest):
'/nodes/%s?fields=%s' % (node.uuid, fields),
headers={api_base.Version.string: str(api_v1.max_version())})
# We always append "links"
- self.assertItemsEqual(['driver_info', 'links'], data)
+ self.assertCountEqual(['driver_info', 'links'], data)
self.assertEqual('******', data['driver_info']['fake_password'])
def test_get_network_interface_fields_invalid_api_version(self):
@@ -684,6 +707,7 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertIn('allocation_uuid', data['nodes'][0])
self.assertIn('retired', data['nodes'][0])
self.assertIn('retired_reason', data['nodes'][0])
+ self.assertIn('network_data', data['nodes'][0])
def test_detail_using_query(self):
node = obj_utils.create_test_node(self.context,
@@ -722,6 +746,7 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertNotIn('chassis_id', data['nodes'][0])
self.assertIn('retired', data['nodes'][0])
self.assertIn('retired_reason', data['nodes'][0])
+ self.assertIn('network_data', data['nodes'][0])
def test_detail_query_false(self):
obj_utils.create_test_node(self.context)
@@ -1103,9 +1128,10 @@ class TestListNodes(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'],
+ bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/nodes/%s' % (public_url, uuid),
@@ -3654,6 +3680,36 @@ class TestPatch(test_api_base.BaseApiTest):
self.assertEqual('application/json', response.content_type)
self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
+ def test_update_network_data(self):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ provision_state='active')
+ self.mock_update_node.return_value = node
+ headers = {api_base.Version.string: '1.66'}
+
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/network_data',
+ 'value': NETWORK_DATA,
+ 'op': 'replace'}],
+ headers=headers)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(http_client.OK, response.status_code)
+
+ def test_update_network_data_old_api(self):
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid())
+ self.mock_update_node.return_value = node
+ headers = {api_base.Version.string: '1.62'}
+
+ response = self.patch_json('/nodes/%s' % node.uuid,
+ [{'path': '/network_data',
+ 'value': NETWORK_DATA,
+ 'op': 'replace'}],
+ headers=headers,
+ expect_errors=True)
+ self.assertEqual('application/json', response.content_type)
+ self.assertEqual(http_client.NOT_ACCEPTABLE, response.status_code)
+
@mock.patch.object(api_utils, 'check_multiple_node_policies_and_retrieve',
autospec=True)
def test_patch_policy_update(self, mock_cmnpar):
@@ -6121,6 +6177,51 @@ class TestAttachDetachVif(test_api_base.BaseApiTest):
self.assertTrue(ret.json['error_message'])
@mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(rpcapi.ConductorAPI, 'vif_attach')
+ def test_vif_attach_port_uuid_and_portgroup_uuid(self, mock_attach,
+ mock_get):
+ vif_id = uuidutils.generate_uuid()
+ request_body = {
+ 'id': vif_id,
+ 'port_uuid': 'port-uuid',
+ 'portgroup_uuid': 'portgroup-uuid'
+ }
+
+ mock_get.return_value = self.node
+
+ ret = self.post_json('/nodes/%s/vifs' % self.node.uuid,
+ request_body, expect_errors=True,
+ headers={api_base.Version.string:
+ "1.67"})
+
+ self.assertEqual(http_client.BAD_REQUEST, ret.status_int)
+ self.assertTrue(ret.json['error_message'])
+
+ @mock.patch.object(objects.Node, 'get_by_uuid')
+ @mock.patch.object(rpcapi.ConductorAPI, 'vif_attach')
+ def test_vif_attach_port_uuid_and_portgroup_uuid_old(self, mock_attach,
+ mock_get):
+ vif_id = uuidutils.generate_uuid()
+ request_body = {
+ 'id': vif_id,
+ 'port_uuid': 'port-uuid',
+ 'portgroup_uuid': 'portgroup-uuid'
+ }
+
+ mock_get.return_value = self.node
+
+ ret = self.post_json('/nodes/%s/vifs' % self.node.uuid,
+ request_body,
+ headers={api_base.Version.string:
+ self.vif_version})
+
+ self.assertEqual(http_client.NO_CONTENT, ret.status_code)
+ mock_get.assert_called_once_with(mock.ANY, self.node.uuid)
+ mock_attach.assert_called_once_with(mock.ANY, self.node.uuid,
+ vif_info=request_body,
+ topic='test-topic')
+
+ @mock.patch.object(objects.Node, 'get_by_uuid')
@mock.patch.object(rpcapi.ConductorAPI, 'vif_detach')
def test_vif_detach(self, mock_detach, mock_get):
vif_id = uuidutils.generate_uuid()
diff --git a/ironic/tests/unit/api/controllers/v1/test_notification_utils.py b/ironic/tests/unit/api/controllers/v1/test_notification_utils.py
index 30c8df134..d7e3e7e82 100644
--- a/ironic/tests/unit/api/controllers/v1/test_notification_utils.py
+++ b/ironic/tests/unit/api/controllers/v1/test_notification_utils.py
@@ -12,7 +12,8 @@
"""Test class for ironic-api notification utilities."""
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from ironic.api.controllers.v1 import notification_utils as notif_utils
diff --git a/ironic/tests/unit/api/controllers/v1/test_port.py b/ironic/tests/unit/api/controllers/v1/test_port.py
index 4b67bce73..b5e145b9f 100644
--- a/ironic/tests/unit/api/controllers/v1/test_port.py
+++ b/ironic/tests/unit/api/controllers/v1/test_port.py
@@ -16,14 +16,15 @@ Tests for the API /ports/ methods.
import datetime
from http import client as http_client
import types
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from testtools import matchers
+from ironic import api
from ironic.api.controllers import base as api_base
from ironic.api.controllers import v1 as api_v1
from ironic.api.controllers.v1 import notification_utils
@@ -195,6 +196,40 @@ class TestPortsController__CheckAllowedPortFields(base.TestCase):
mock_allow_port.assert_called_once_with()
+@mock.patch.object(objects.Port, 'list', autospec=True)
+@mock.patch.object(api, 'request', spec_set=['context'])
+class TestPortsController__GetPortsCollection(base.TestCase):
+
+ def setUp(self):
+ super(TestPortsController__GetPortsCollection, self).setUp()
+ self.controller = api_port.PortsController()
+
+ def test__get_ports_collection(self, mock_request, mock_list):
+ mock_request.context = 'fake-context'
+ mock_list.return_value = []
+ self.controller._get_ports_collection(None, None, None, None, None,
+ None, 'asc')
+ mock_list.assert_called_once_with('fake-context', 1000, None,
+ project=None, sort_dir='asc',
+ sort_key=None)
+
+
+@mock.patch.object(objects.Port, 'get_by_address', autospec=True)
+@mock.patch.object(api, 'request', spec_set=['context'])
+class TestPortsController__GetPortByAddress(base.TestCase):
+
+ def setUp(self):
+ super(TestPortsController__GetPortByAddress, self).setUp()
+ self.controller = api_port.PortsController()
+
+ def test__get_ports_by_address(self, mock_request, mock_gba):
+ mock_request.context = 'fake-context'
+ mock_gba.return_value = None
+ self.controller._get_ports_by_address('fake-address')
+ mock_gba.assert_called_once_with('fake-context', 'fake-address',
+ project=None)
+
+
class TestListPorts(test_api_base.BaseApiTest):
def setUp(self):
@@ -328,7 +363,7 @@ class TestListPorts(test_api_base.BaseApiTest):
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: str(api_v1.max_version())})
# We always append "links"
- self.assertItemsEqual(['address', 'extra', 'links'], data)
+ self.assertCountEqual(['address', 'extra', 'links'], data)
def test_hide_fields_in_newer_versions_internal_info(self):
port = obj_utils.create_test_port(self.context, node_id=self.node.id,
@@ -423,7 +458,7 @@ class TestListPorts(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['ports']))
for port in data['ports']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'extra', 'links'], port)
+ self.assertCountEqual(['uuid', 'extra', 'links'], port)
def test_get_collection_next_marker_no_uuid(self):
fields = 'address'
@@ -479,7 +514,7 @@ class TestListPorts(test_api_base.BaseApiTest):
'/ports/%s?fields=%s' % (port.uuid, fields),
headers={api_base.Version.string: "1.34"})
# We always append "links".
- self.assertItemsEqual(['uuid', 'physical_network', 'links'], response)
+ self.assertCountEqual(['uuid', 'physical_network', 'links'], response)
@mock.patch.object(objects.Port, 'supports_physical_network')
def test_get_custom_fields_physical_network_upgrade(self, mock_spn):
@@ -509,7 +544,7 @@ class TestListPorts(test_api_base.BaseApiTest):
# 'links' field is always retrieved in the response
# regardless of which fields are specified.
- self.assertItemsEqual(['uuid', 'is_smartnic', 'links'], response)
+ self.assertCountEqual(['uuid', 'is_smartnic', 'links'], response)
def test_detail(self):
llc = {'switch_info': 'switch', 'switch_id': 'aa:bb:cc:dd:ee:ff',
@@ -664,9 +699,10 @@ class TestListPorts(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'],
+ bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/ports/%s' % (public_url, uuid),
diff --git a/ironic/tests/unit/api/controllers/v1/test_portgroup.py b/ironic/tests/unit/api/controllers/v1/test_portgroup.py
index ec761edae..8d66c414d 100644
--- a/ironic/tests/unit/api/controllers/v1/test_portgroup.py
+++ b/ironic/tests/unit/api/controllers/v1/test_portgroup.py
@@ -15,9 +15,9 @@ Tests for the API /portgroups/ methods.
import datetime
from http import client as http_client
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -127,7 +127,7 @@ class TestListPortgroups(test_api_base.BaseApiTest):
'/portgroups/%s?fields=%s' % (portgroup.uuid, fields),
headers=self.headers)
# We always append "links"
- self.assertItemsEqual(['address', 'extra', 'links'], data)
+ self.assertCountEqual(['address', 'extra', 'links'], data)
def test_get_one_mode_field_lower_api_version(self):
portgroup = obj_utils.create_test_portgroup(self.context,
@@ -157,7 +157,7 @@ class TestListPortgroups(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['portgroups']))
for portgroup in data['portgroups']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'extra', 'links'], portgroup)
+ self.assertCountEqual(['uuid', 'extra', 'links'], portgroup)
def test_get_collection_properties_field_lower_api_version(self):
obj_utils.create_test_portgroup(self.context, node_id=self.node.id)
@@ -288,9 +288,9 @@ class TestListPortgroups(test_api_base.BaseApiTest):
self.assertIn('ports', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
diff --git a/ironic/tests/unit/api/controllers/v1/test_ramdisk.py b/ironic/tests/unit/api/controllers/v1/test_ramdisk.py
index 232146697..1b233fc70 100644
--- a/ironic/tests/unit/api/controllers/v1/test_ramdisk.py
+++ b/ironic/tests/unit/api/controllers/v1/test_ramdisk.py
@@ -16,9 +16,9 @@ Tests for the API /lookup/ methods.
"""
from http import client as http_client
+from unittest import mock
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils import uuidutils
diff --git a/ironic/tests/unit/api/controllers/v1/test_root.py b/ironic/tests/unit/api/controllers/v1/test_root.py
index 449c65def..78d3053e4 100644
--- a/ironic/tests/unit/api/controllers/v1/test_root.py
+++ b/ironic/tests/unit/api/controllers/v1/test_root.py
@@ -12,10 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc as webob_exc
from ironic.api.controllers import v1 as v1_api
+from ironic.api.controllers.v1 import versions
from ironic.tests import base as test_base
from ironic.tests.unit.api import base as api_base
@@ -24,8 +26,133 @@ class TestV1Routing(api_base.BaseApiTest):
def test_route_checks_version(self):
self.get_json('/')
self._check_version.assert_called_once_with(mock.ANY,
+ mock.ANY,
mock.ANY)
+ def test_min_version(self):
+ response = self.get_json(
+ '/',
+ headers={
+ 'Accept': 'application/json',
+ 'X-OpenStack-Ironic-API-Version':
+ versions.min_version_string()
+ })
+ self.assertEqual({
+ 'id': 'v1',
+ 'links': [
+ {'href': 'http://localhost/v1/', 'rel': 'self'},
+ {'href': 'https://docs.openstack.org//ironic/latest'
+ '/contributor//webapi.html',
+ 'rel': 'describedby', 'type': 'text/html'}
+ ],
+ 'media_types': {
+ 'base': 'application/json',
+ 'type': 'application/vnd.openstack.ironic.v1+json'
+ },
+ 'version': {
+ 'id': 'v1',
+ 'links': [{'href': 'http://localhost/v1/', 'rel': 'self'}],
+ 'status': 'CURRENT',
+ 'min_version': versions.min_version_string(),
+ 'version': versions.max_version_string()
+ },
+ 'chassis': [
+ {'href': 'http://localhost/v1/chassis/', 'rel': 'self'},
+ {'href': 'http://localhost/chassis/', 'rel': 'bookmark'}
+ ],
+ 'nodes': [
+ {'href': 'http://localhost/v1/nodes/', 'rel': 'self'},
+ {'href': 'http://localhost/nodes/', 'rel': 'bookmark'}
+ ],
+ 'ports': [
+ {'href': 'http://localhost/v1/ports/', 'rel': 'self'},
+ {'href': 'http://localhost/ports/', 'rel': 'bookmark'}
+ ],
+ 'drivers': [
+ {'href': 'http://localhost/v1/drivers/', 'rel': 'self'},
+ {'href': 'http://localhost/drivers/', 'rel': 'bookmark'}
+ ],
+ }, response)
+
+ def test_max_version(self):
+ response = self.get_json(
+ '/',
+ headers={
+ 'Accept': 'application/json',
+ 'X-OpenStack-Ironic-API-Version':
+ versions.max_version_string()
+ })
+ self.assertEqual({
+ 'id': 'v1',
+ 'links': [
+ {'href': 'http://localhost/v1/', 'rel': 'self'},
+ {'href': 'https://docs.openstack.org//ironic/latest'
+ '/contributor//webapi.html',
+ 'rel': 'describedby', 'type': 'text/html'}
+ ],
+ 'media_types': {
+ 'base': 'application/json',
+ 'type': 'application/vnd.openstack.ironic.v1+json'
+ },
+ 'version': {
+ 'id': 'v1',
+ 'links': [{'href': 'http://localhost/v1/', 'rel': 'self'}],
+ 'status': 'CURRENT',
+ 'min_version': versions.min_version_string(),
+ 'version': versions.max_version_string()
+ },
+ 'allocations': [
+ {'href': 'http://localhost/v1/allocations/', 'rel': 'self'},
+ {'href': 'http://localhost/allocations/', 'rel': 'bookmark'}
+ ],
+ 'chassis': [
+ {'href': 'http://localhost/v1/chassis/', 'rel': 'self'},
+ {'href': 'http://localhost/chassis/', 'rel': 'bookmark'}
+ ],
+ 'conductors': [
+ {'href': 'http://localhost/v1/conductors/', 'rel': 'self'},
+ {'href': 'http://localhost/conductors/', 'rel': 'bookmark'}
+ ],
+ 'deploy_templates': [
+ {'href': 'http://localhost/v1/deploy_templates/',
+ 'rel': 'self'},
+ {'href': 'http://localhost/deploy_templates/',
+ 'rel': 'bookmark'}
+ ],
+ 'drivers': [
+ {'href': 'http://localhost/v1/drivers/', 'rel': 'self'},
+ {'href': 'http://localhost/drivers/', 'rel': 'bookmark'}
+ ],
+ 'events': [
+ {'href': 'http://localhost/v1/events/', 'rel': 'self'},
+ {'href': 'http://localhost/events/', 'rel': 'bookmark'}
+ ],
+ 'heartbeat': [
+ {'href': 'http://localhost/v1/heartbeat/', 'rel': 'self'},
+ {'href': 'http://localhost/heartbeat/', 'rel': 'bookmark'}
+ ],
+ 'lookup': [
+ {'href': 'http://localhost/v1/lookup/', 'rel': 'self'},
+ {'href': 'http://localhost/lookup/', 'rel': 'bookmark'}
+ ],
+ 'nodes': [
+ {'href': 'http://localhost/v1/nodes/', 'rel': 'self'},
+ {'href': 'http://localhost/nodes/', 'rel': 'bookmark'}
+ ],
+ 'portgroups': [
+ {'href': 'http://localhost/v1/portgroups/', 'rel': 'self'},
+ {'href': 'http://localhost/portgroups/', 'rel': 'bookmark'}
+ ],
+ 'ports': [
+ {'href': 'http://localhost/v1/ports/', 'rel': 'self'},
+ {'href': 'http://localhost/ports/', 'rel': 'bookmark'}
+ ],
+ 'volume': [
+ {'href': 'http://localhost/v1/volume/', 'rel': 'self'},
+ {'href': 'http://localhost/volume/', 'rel': 'bookmark'}
+ ]
+ }, response)
+
class TestCheckVersions(test_base.TestCase):
diff --git a/ironic/tests/unit/api/controllers/v1/test_types.py b/ironic/tests/unit/api/controllers/v1/test_types.py
index dbf548516..40f8ea0c4 100644
--- a/ironic/tests/unit/api/controllers/v1/test_types.py
+++ b/ironic/tests/unit/api/controllers/v1/test_types.py
@@ -17,10 +17,9 @@
from http import client as http_client
import platform
+from unittest import mock
-import mock
from pecan import rest
-import wsme
from ironic.api.controllers.v1 import types
from ironic.api import expose
@@ -107,7 +106,7 @@ class MyPatchType(types.JsonPatchType):
class MyTest(rest.RestController):
"""Helper class for TestJsonPatchType tests."""
- @wsme.validate([MyPatchType])
+ @expose.validate([MyPatchType])
@expose.expose([str], body=[MyPatchType])
def patch(self, patch):
return patch
@@ -146,7 +145,7 @@ class TestJsonPatchType(api_base.BaseApiTest):
'value': {'cat': 'meow'}}]
ret = self._patch_json(valid_patches, False)
self.assertEqual(http_client.OK, ret.status_int)
- self.assertItemsEqual(valid_patches, ret.json)
+ self.assertCountEqual(valid_patches, ret.json)
def test_cannot_update_internal_attr(self):
patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}]
@@ -302,14 +301,14 @@ class TestLocalLinkConnectionType(base.TestCase):
value = {'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'value2',
'switch_info': 'value3'}
- self.assertItemsEqual(value, v.validate(value))
+ self.assertCountEqual(value, v.validate(value))
def test_local_link_connection_type_datapath_id(self):
v = types.locallinkconnectiontype
value = {'switch_id': '0000000000000000',
'port_id': 'value2',
'switch_info': 'value3'}
- self.assertItemsEqual(value,
+ self.assertCountEqual(value,
v.validate(value))
def test_local_link_connection_type_not_mac_or_datapath_id(self):
@@ -339,12 +338,12 @@ class TestLocalLinkConnectionType(base.TestCase):
v = types.locallinkconnectiontype
value = {'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'value2'}
- self.assertItemsEqual(value, v.validate(value))
+ self.assertCountEqual(value, v.validate(value))
def test_local_link_connection_type_empty_value(self):
v = types.locallinkconnectiontype
value = {}
- self.assertItemsEqual(value, v.validate(value))
+ self.assertCountEqual(value, v.validate(value))
def test_local_link_connection_type_smart_nic_keys_mandatory(self):
v = types.locallinkconnectiontype
@@ -377,14 +376,14 @@ class TestLocalLinkConnectionType(base.TestCase):
def test_local_link_connection_net_type_unmanaged(self):
v = types.locallinkconnectiontype
value = {'network_type': 'unmanaged'}
- self.assertItemsEqual(value, v.validate(value))
+ self.assertCountEqual(value, v.validate(value))
def test_local_link_connection_net_type_unmanaged_combine_ok(self):
v = types.locallinkconnectiontype
value = {'network_type': 'unmanaged',
'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'rep0-0'}
- self.assertItemsEqual(value, v.validate(value))
+ self.assertCountEqual(value, v.validate(value))
def test_local_link_connection_net_type_invalid(self):
v = types.locallinkconnectiontype
@@ -398,7 +397,7 @@ class TestVifType(base.TestCase):
def test_vif_type(self):
v = types.viftype
value = {'id': 'foo'}
- self.assertItemsEqual(value, v.validate(value))
+ self.assertCountEqual(value, v.validate(value))
def test_vif_type_missing_mandatory_key(self):
v = types.viftype
@@ -409,7 +408,7 @@ class TestVifType(base.TestCase):
def test_vif_type_optional_key(self):
v = types.viftype
value = {'id': 'foo', 'misc': 'something'}
- self.assertItemsEqual(value, v.frombasetype(value))
+ self.assertCountEqual(value, v.frombasetype(value))
def test_vif_type_bad_id(self):
v = types.viftype
@@ -428,7 +427,7 @@ class TestEventType(base.TestCase):
@mock.patch.object(types.EventType, 'valid_events', set(['valid.event']))
def test_simple_event_type(self):
value = {'event': 'valid.event'}
- self.assertItemsEqual(value, self.v.validate(value))
+ self.assertCountEqual(value, self.v.validate(value))
@mock.patch.object(types.EventType, 'valid_events', set(['valid.event']))
def test_invalid_event_type(self):
@@ -451,7 +450,7 @@ class TestEventType(base.TestCase):
'binding:host_id': '22222222-aaaa-bbbb-cccc-555555555555',
'binding:vnic_type': 'baremetal'
}
- self.assertItemsEqual(value, self.v.validate(value))
+ self.assertCountEqual(value, self.v.validate(value))
def test_invalid_mac_network_port_event(self):
value = {'event': 'network.bind_port',
diff --git a/ironic/tests/unit/api/controllers/v1/test_utils.py b/ironic/tests/unit/api/controllers/v1/test_utils.py
index 3defca326..381422993 100644
--- a/ironic/tests/unit/api/controllers/v1/test_utils.py
+++ b/ironic/tests/unit/api/controllers/v1/test_utils.py
@@ -15,13 +15,12 @@
# under the License.
from http import client as http_client
+import io
+from unittest import mock
-import mock
import os_traits
from oslo_config import cfg
from oslo_utils import uuidutils
-from webob import static
-import wsme
from ironic import api
from ironic.api.controllers.v1 import node as api_node
@@ -106,18 +105,15 @@ class TestApiUtils(base.TestCase):
# Raises a KeyError.
doc = {}
patch = [{"op": "remove", "path": "/foo"}]
- self.assertRaisesRegex(exception.PatchError,
- "can't remove non-existent object 'foo'",
- utils.apply_jsonpatch, doc, patch)
+ self.assertRaises(exception.PatchError,
+ utils.apply_jsonpatch, doc, patch)
def test_apply_jsonpatch_replace_non_existent_list_item(self):
# Raises an IndexError.
doc = []
patch = [{"op": "replace", "path": "/0", "value": 42}]
- self.assertRaisesRegex(exception.PatchError,
- "can't replace outside of list|"
- "list assignment index out of range",
- utils.apply_jsonpatch, doc, patch)
+ self.assertRaises(exception.PatchError,
+ utils.apply_jsonpatch, doc, patch)
def test_get_patch_values_no_path(self):
patch = [{'path': '/name', 'op': 'update', 'value': 'node-0'}]
@@ -692,9 +688,8 @@ class TestVendorPassthru(base.TestCase):
passthru_mock.assert_called_once_with(
'fake-context', 'fake-ident', 'squarepants', 'POST',
'fake-data', 'fake-topic')
- self.assertIsInstance(response, wsme.api.Response)
+ self.assertIsInstance(response, atypes.PassthruResponse)
self.assertEqual('SpongeBob', response.obj)
- self.assertEqual(response.return_type, atypes.Unset)
sc = http_client.ACCEPTED if async_call else http_client.OK
self.assertEqual(sc, response.status_code)
@@ -710,11 +705,10 @@ class TestVendorPassthru(base.TestCase):
def test_driver_vendor_passthru_sync(self):
self._vendor_passthru(async_call=False, driver_passthru=True)
- @mock.patch.object(api, 'response', spec_set=['app_iter'])
@mock.patch.object(api, 'request',
spec_set=['method', 'context', 'rpcapi'])
def _test_vendor_passthru_attach(self, return_value, expct_return_value,
- mock_request, mock_response):
+ mock_request):
return_ = {'return': return_value, 'async': False, 'attach': True}
mock_request.method = 'get'
mock_request.context = 'fake-context'
@@ -727,13 +721,10 @@ class TestVendorPassthru(base.TestCase):
'fake-data', 'fake-topic')
# Assert file was attached to the response object
- self.assertIsInstance(mock_response.app_iter, static.FileIter)
- self.assertEqual(expct_return_value,
- mock_response.app_iter.file.read())
+ self.assertIsInstance(response.obj, io.BytesIO)
+ self.assertEqual(expct_return_value, response.obj.read())
# Assert response message is none
- self.assertIsInstance(response, wsme.api.Response)
- self.assertIsNone(response.obj)
- self.assertIsNone(response.return_type)
+ self.assertIsInstance(response, atypes.PassthruResponse)
self.assertEqual(http_client.OK, response.status_code)
def test_vendor_passthru_attach(self):
diff --git a/ironic/tests/unit/api/controllers/v1/test_versions.py b/ironic/tests/unit/api/controllers/v1/test_versions.py
index 302542950..f6fc528fc 100644
--- a/ironic/tests/unit/api/controllers/v1/test_versions.py
+++ b/ironic/tests/unit/api/controllers/v1/test_versions.py
@@ -16,8 +16,7 @@ Tests for the versions constants and methods.
"""
import re
-
-import mock
+from unittest import mock
from ironic.api.controllers.v1 import versions
from ironic.common import release_mappings
diff --git a/ironic/tests/unit/api/controllers/v1/test_volume.py b/ironic/tests/unit/api/controllers/v1/test_volume.py
index 9bc50d52a..bca326375 100644
--- a/ironic/tests/unit/api/controllers/v1/test_volume.py
+++ b/ironic/tests/unit/api/controllers/v1/test_volume.py
@@ -25,9 +25,9 @@ class TestGetVolume(test_api_base.BaseApiTest):
def _test_links(self, data, key, headers):
self.assertIn(key, data)
self.assertEqual(2, len(data[key]))
- for l in data[key]:
- bookmark = (l['rel'] == 'bookmark')
- self.assertTrue(self.validate_link(l['href'],
+ for link in data[key]:
+ bookmark = (link['rel'] == 'bookmark')
+ self.assertTrue(self.validate_link(link['href'],
bookmark=bookmark,
headers=headers))
diff --git a/ironic/tests/unit/api/controllers/v1/test_volume_connector.py b/ironic/tests/unit/api/controllers/v1/test_volume_connector.py
index a80cf93cc..2c5f23944 100644
--- a/ironic/tests/unit/api/controllers/v1/test_volume_connector.py
+++ b/ironic/tests/unit/api/controllers/v1/test_volume_connector.py
@@ -17,9 +17,9 @@ Tests for the API /volume connectors/ methods.
import datetime
from http import client as http_client
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -114,7 +114,7 @@ class TestListVolumeConnectors(test_api_base.BaseApiTest):
'/volume/connectors/%s?fields=%s' % (connector.uuid, fields),
headers=self.headers)
# We always append "links"
- self.assertItemsEqual(['connector_id', 'extra', 'links'], data)
+ self.assertCountEqual(['connector_id', 'extra', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
@@ -131,7 +131,7 @@ class TestListVolumeConnectors(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['connectors']))
for connector in data['connectors']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'extra', 'links'], connector)
+ self.assertCountEqual(['uuid', 'extra', 'links'], connector)
def test_get_custom_fields_invalid_fields(self):
connector = obj_utils.create_test_volume_connector(
@@ -233,9 +233,9 @@ class TestListVolumeConnectors(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
diff --git a/ironic/tests/unit/api/controllers/v1/test_volume_target.py b/ironic/tests/unit/api/controllers/v1/test_volume_target.py
index d724ddffe..613f551a4 100644
--- a/ironic/tests/unit/api/controllers/v1/test_volume_target.py
+++ b/ironic/tests/unit/api/controllers/v1/test_volume_target.py
@@ -17,9 +17,9 @@ Tests for the API /volume targets/ methods.
import datetime
from http import client as http_client
+from unittest import mock
from urllib import parse as urlparse
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -114,7 +114,7 @@ class TestListVolumeTargets(test_api_base.BaseApiTest):
'/volume/targets/%s?fields=%s' % (target.uuid, fields),
headers=self.headers)
# We always append "links"
- self.assertItemsEqual(['boot_index', 'extra', 'links'], data)
+ self.assertCountEqual(['boot_index', 'extra', 'links'], data)
def test_get_collection_custom_fields(self):
fields = 'uuid,extra'
@@ -130,7 +130,7 @@ class TestListVolumeTargets(test_api_base.BaseApiTest):
self.assertEqual(3, len(data['targets']))
for target in data['targets']:
# We always append "links"
- self.assertItemsEqual(['uuid', 'extra', 'links'], target)
+ self.assertCountEqual(['uuid', 'extra', 'links'], target)
def test_get_custom_fields_invalid_fields(self):
target = obj_utils.create_test_volume_target(
@@ -223,9 +223,9 @@ class TestListVolumeTargets(test_api_base.BaseApiTest):
self.assertIn('links', data)
self.assertEqual(2, len(data['links']))
self.assertIn(uuid, data['links'][0]['href'])
- for l in data['links']:
- bookmark = l['rel'] == 'bookmark'
- self.assertTrue(self.validate_link(l['href'], bookmark=bookmark,
+ for link in data['links']:
+ bookmark = link['rel'] == 'bookmark'
+ self.assertTrue(self.validate_link(link['href'], bookmark=bookmark,
headers=self.headers))
def test_collection_links(self):
diff --git a/ironic/tests/unit/api/test_acl.py b/ironic/tests/unit/api/test_acl.py
index c358f5a15..9ebabfac0 100644
--- a/ironic/tests/unit/api/test_acl.py
+++ b/ironic/tests/unit/api/test_acl.py
@@ -17,8 +17,8 @@ are blocked or allowed to be processed.
"""
from http import client as http_client
+from unittest import mock
-import mock
from oslo_config import cfg
from ironic.tests.unit.api import base
diff --git a/ironic/tests/unit/api/test_args.py b/ironic/tests/unit/api/test_args.py
new file mode 100644
index 000000000..549c2efe1
--- /dev/null
+++ b/ironic/tests/unit/api/test_args.py
@@ -0,0 +1,506 @@
+# Copyright 2020 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import decimal
+import io
+
+from webob import multidict
+
+from ironic.api import args
+from ironic.api.controllers.v1 import types
+from ironic.api import functions
+from ironic.api import types as atypes
+from ironic.common import exception
+from ironic.tests import base as test_base
+
+
+class Obj(atypes.Base):
+
+ id = atypes.wsattr(int, mandatory=True)
+ name = str
+ readonly_field = atypes.wsattr(str, readonly=True)
+ default_field = atypes.wsattr(str, default='foo')
+ unset_me = str
+
+
+class NestedObj(atypes.Base):
+ o = Obj
+
+
+class TestArgs(test_base.TestCase):
+
+ def test_fromjson_array(self):
+ atype = atypes.ArrayType(int)
+ self.assertEqual(
+ [0, 1, 1234, None],
+ args.fromjson_array(atype, [0, '1', '1_234', None])
+ )
+ self.assertRaises(ValueError, args.fromjson_array,
+ atype, ['one', 'two', 'three'])
+ self.assertRaises(ValueError, args.fromjson_array,
+ atype, 'one')
+
+ def test_fromjson_dict(self):
+ dtype = atypes.DictType(str, int)
+ self.assertEqual({
+ 'zero': 0,
+ 'one': 1,
+ 'etc': 1234,
+ 'none': None
+ }, args.fromjson_dict(dtype, {
+ 'zero': 0,
+ 'one': '1',
+ 'etc': '1_234',
+ 'none': None
+ }))
+
+ self.assertRaises(ValueError, args.fromjson_dict,
+ dtype, [])
+ self.assertRaises(ValueError, args.fromjson_dict,
+ dtype, {'one': 'one'})
+
+ def test_fromjson_bool(self):
+ for b in (1, 2, True, 'true', 't', 'yes', 'y', 'on', '1'):
+ self.assertTrue(args.fromjson_bool(b))
+ for b in (0, False, 'false', 'f', 'no', 'n', 'off', '0'):
+ self.assertFalse(args.fromjson_bool(b))
+ for b in ('yup', 'yeet', 'NOPE', 3.14):
+ self.assertRaises(ValueError, args.fromjson_bool, b)
+
+ def test_fromjson(self):
+ # parse None
+ self.assertIsNone(args.fromjson(None, None))
+
+ # parse array
+ atype = atypes.ArrayType(int)
+ self.assertEqual(
+ [0, 1, 1234, None],
+ args.fromjson(atype, [0, '1', '1_234', None])
+ )
+
+ # parse dict
+ dtype = atypes.DictType(str, int)
+ self.assertEqual({
+ 'zero': 0,
+ 'one': 1,
+ 'etc': 1234,
+ 'none': None
+ }, args.fromjson(dtype, {
+ 'zero': 0,
+ 'one': '1',
+ 'etc': '1_234',
+ 'none': None
+ }))
+
+ # parse bytes
+ self.assertEqual(
+ b'asdf',
+ args.fromjson(bytes, b'asdf')
+ )
+ self.assertEqual(
+ b'asdf',
+ args.fromjson(bytes, 'asdf')
+ )
+ self.assertEqual(
+ b'33',
+ args.fromjson(bytes, 33)
+ )
+ self.assertEqual(
+ b'3.14',
+ args.fromjson(bytes, 3.14)
+ )
+
+ # parse str
+ self.assertEqual(
+ 'asdf',
+ args.fromjson(str, b'asdf')
+ )
+ self.assertEqual(
+ 'asdf',
+ args.fromjson(str, 'asdf')
+ )
+
+ # parse int/float
+ self.assertEqual(
+ 3,
+ args.fromjson(int, '3')
+ )
+ self.assertEqual(
+ 3,
+ args.fromjson(int, 3)
+ )
+ self.assertEqual(
+ 3.14,
+ args.fromjson(float, 3.14)
+ )
+
+ # parse bool
+ self.assertFalse(args.fromjson(bool, 'no'))
+ self.assertTrue(args.fromjson(bool, 'yes'))
+
+ # parse decimal
+ self.assertEqual(
+ decimal.Decimal(3.14),
+ args.fromjson(decimal.Decimal, 3.14)
+ )
+
+ # parse datetime
+ expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
+ self.assertEqual(
+ expected,
+ args.fromjson(datetime.datetime, '2015-08-13T11:38:09.496475')
+ )
+
+ # parse complex
+ n = args.fromjson(NestedObj, {'o': {
+ 'id': 1234,
+ 'name': 'an object'
+ }})
+ self.assertIsInstance(n.o, Obj)
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+ self.assertEqual('foo', n.o.default_field)
+
+ # parse usertype
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.fromjson(types.listtype, '0,1, 2, three')
+ )
+
+ def test_fromjson_complex(self):
+ n = args.fromjson_complex(NestedObj, {'o': {
+ 'id': 1234,
+ 'name': 'an object'
+ }})
+ self.assertIsInstance(n.o, Obj)
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+ self.assertEqual('foo', n.o.default_field)
+
+ e = self.assertRaises(exception.UnknownAttribute,
+ args.fromjson_complex,
+ Obj, {'ooo': {}})
+ self.assertEqual({'ooo'}, e.attributes)
+
+ e = self.assertRaises(exception.InvalidInput, args.fromjson_complex,
+ Obj,
+ {'name': 'an object'})
+ self.assertEqual('id', e.fieldname)
+ self.assertEqual('Mandatory field missing.', e.msg)
+
+ e = self.assertRaises(exception.InvalidInput, args.fromjson_complex,
+ Obj,
+ {'id': 1234, 'readonly_field': 'foo'})
+ self.assertEqual('readonly_field', e.fieldname)
+ self.assertEqual('Cannot set read only field.', e.msg)
+
+ def test_parse(self):
+ # source as bytes
+ s = b'{"o": {"id": 1234, "name": "an object"}}'
+
+ # test bodyarg=True
+ n = args.parse(s, {"o": NestedObj}, True)['o']
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+
+ # source as file
+ s = io.StringIO('{"o": {"id": 1234, "name": "an object"}}')
+
+ # test bodyarg=False
+ n = args.parse(s, {"o": Obj}, False)['o']
+ self.assertEqual(1234, n.id)
+ self.assertEqual('an object', n.name)
+
+ # fromjson ValueError
+ s = '{"o": ["id", "name"]}'
+ self.assertRaises(exception.InvalidInput, args.parse,
+ s, {"o": atypes.DictType(str, str)}, False)
+ s = '["id", "name"]'
+ self.assertRaises(exception.InvalidInput, args.parse,
+ s, {"o": atypes.DictType(str, str)}, True)
+
+ # fromjson UnknownAttribute
+ s = '{"o": {"foo": "bar", "id": 1234, "name": "an object"}}'
+ self.assertRaises(exception.UnknownAttribute, args.parse,
+ s, {"o": NestedObj}, True)
+ self.assertRaises(exception.UnknownAttribute, args.parse,
+ s, {"o": Obj}, False)
+
+ # invalid json
+ s = '{Sunn O)))}'
+ self.assertRaises(exception.ClientSideError, args.parse,
+ s, {"o": Obj}, False)
+
+ # extra args
+ s = '{"foo": "bar", "o": {"id": 1234, "name": "an object"}}'
+ self.assertRaises(exception.UnknownArgument, args.parse,
+ s, {"o": Obj}, False)
+
+ def test_from_param(self):
+ # datetime param
+ expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
+ self.assertEqual(
+ expected,
+ args.from_param(datetime.datetime, '2015-08-13T11:38:09.496475')
+ )
+ self.assertIsNone(args.from_param(datetime.datetime, None))
+
+ # usertype param
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.from_param(types.listtype, '0,1, 2, three')
+ )
+
+ # array param
+ atype = atypes.ArrayType(int)
+ self.assertEqual(
+ [0, 1, 1234, None],
+ args.from_param(atype, [0, '1', '1_234', None])
+ )
+ self.assertIsNone(args.from_param(atype, None))
+
+ # string param
+ self.assertEqual('foo', args.from_param(str, 'foo'))
+ self.assertIsNone(args.from_param(str, None))
+
+ # string param with from_params
+ hit_paths = set()
+ params = multidict.MultiDict(
+ foo='bar',
+ )
+ self.assertEqual(
+ 'bar',
+ args.from_params(str, params, 'foo', hit_paths)
+ )
+ self.assertEqual({'foo'}, hit_paths)
+
+ def test_array_from_params(self):
+ hit_paths = set()
+ datatype = atypes.ArrayType(str)
+ params = multidict.MultiDict(
+ foo='bar',
+ one='two'
+ )
+ self.assertEqual(
+ ['bar'],
+ args.from_params(datatype, params, 'foo', hit_paths)
+ )
+ self.assertEqual({'foo'}, hit_paths)
+ self.assertEqual(
+ ['two'],
+ args.array_from_params(datatype, params, 'one', hit_paths)
+ )
+ self.assertEqual({'foo', 'one'}, hit_paths)
+
+ def test_usertype_from_params(self):
+ hit_paths = set()
+ datatype = types.listtype
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ )
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.usertype_from_params(datatype, params, 'foo', hit_paths)
+ )
+ self.assertEqual(
+ ['0', '1', '2', 'three'],
+ args.from_params(datatype, params, 'foo', hit_paths)
+ )
+ self.assertEqual(
+ atypes.Unset,
+ args.usertype_from_params(datatype, params, 'bar', hit_paths)
+ )
+
+ def test_args_from_args(self):
+
+ fromargs = ['one', 2, [0, '1', '2_34']]
+ fromkwargs = {'foo': '1, 2, 3'}
+
+ @functions.signature(str, str, int, atypes.ArrayType(int),
+ types.listtype)
+ def myfunc(self, first, second, third, foo):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+
+ newargs, newkwargs = args.args_from_args(funcdef, fromargs, fromkwargs)
+ self.assertEqual(['one', 2, [0, 1, 234]], newargs)
+ self.assertEqual({'foo': ['1', '2', '3']}, newkwargs)
+
+ def test_args_from_params(self):
+
+ @functions.signature(str, str, int, atypes.ArrayType(int),
+ types.listtype)
+ def myfunc(self, first, second, third, foo):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ third='1',
+ second='2'
+ )
+ self.assertEqual(
+ ([], {'foo': ['0', '1', '2', 'three'], 'second': 2, 'third': [1]}),
+ args.args_from_params(funcdef, params)
+ )
+
+ # unexpected param
+ params = multidict.MultiDict(bar='baz')
+ self.assertRaises(exception.UnknownArgument, args.args_from_params,
+ funcdef, params)
+
+ # no params plus a body
+ params = multidict.MultiDict(__body__='')
+ self.assertEqual(
+ ([], {}),
+ args.args_from_params(funcdef, params)
+ )
+
+ def test_args_from_body(self):
+ @functions.signature(str, body=NestedObj)
+ def myfunc(self, nested):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+ mimetype = 'application/json'
+ body = b'{"o": {"id": 1234, "name": "an object"}}'
+ newargs, newkwargs = args.args_from_body(funcdef, body, mimetype)
+
+ self.assertEqual(1234, newkwargs['nested'].o.id)
+ self.assertEqual('an object', newkwargs['nested'].o.name)
+
+ self.assertEqual(
+ ((), {}),
+ args.args_from_body(funcdef, None, mimetype)
+ )
+
+ self.assertRaises(exception.ClientSideError, args.args_from_body,
+ funcdef, body, 'application/x-corba')
+
+ self.assertEqual(
+ ((), {}),
+ args.args_from_body(funcdef, body,
+ 'application/x-www-form-urlencoded')
+ )
+
+ def test_combine_args(self):
+
+ @functions.signature(str, str, int)
+ def myfunc(self, first, second,):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+
+ # empty
+ self.assertEqual(
+ ([], {}),
+ args.combine_args(
+ funcdef, (
+ ([], {}),
+ ([], {}),
+ )
+ )
+ )
+
+ # combine kwargs
+ self.assertEqual(
+ ([], {'first': 'one', 'second': 'two'}),
+ args.combine_args(
+ funcdef, (
+ ([], {}),
+ ([], {'first': 'one', 'second': 'two'}),
+ )
+ )
+ )
+
+ # combine mixed args
+ self.assertEqual(
+ ([], {'first': 'one', 'second': 'two'}),
+ args.combine_args(
+ funcdef, (
+ (['one'], {}),
+ ([], {'second': 'two'}),
+ )
+ )
+ )
+
+ # override kwargs
+ self.assertEqual(
+ ([], {'first': 'two'}),
+ args.combine_args(
+ funcdef, (
+ ([], {'first': 'one'}),
+ ([], {'first': 'two'}),
+ ),
+ allow_override=True
+ )
+ )
+
+ # override args
+ self.assertEqual(
+ ([], {'first': 'two', 'second': 'three'}),
+ args.combine_args(
+ funcdef, (
+ (['one', 'three'], {}),
+ (['two'], {}),
+ ),
+ allow_override=True
+ )
+ )
+
+ # can't override args
+ self.assertRaises(exception.ClientSideError, args.combine_args,
+ funcdef,
+ ((['one'], {}), (['two'], {})))
+
+ # can't override kwargs
+ self.assertRaises(exception.ClientSideError, args.combine_args,
+ funcdef,
+ (([], {'first': 'one'}), ([], {'first': 'two'})))
+
+ def test_get_args(self):
+ @functions.signature(str, str, int, atypes.ArrayType(int),
+ types.listtype, body=NestedObj)
+ def myfunc(self, first, second, third, foo, nested):
+ pass
+ funcdef = functions.FunctionDefinition.get(myfunc)
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ second='2'
+ )
+ mimetype = 'application/json'
+ body = b'{"o": {"id": 1234, "name": "an object"}}'
+ fromargs = ['one']
+ fromkwargs = {'third': '1'}
+
+ newargs, newkwargs = args.get_args(funcdef, fromargs, fromkwargs,
+ params, body, mimetype)
+ self.assertEqual([], newargs)
+ n = newkwargs.pop('nested')
+ self.assertEqual({
+ 'first': 'one',
+ 'foo': ['0', '1', '2', 'three'],
+ 'second': 2,
+ 'third': [1]},
+ newkwargs
+ )
+ self.assertEqual(1234, n.o.id)
+ self.assertEqual('an object', n.o.name)
+
+ # check_arguments missing mandatory argument 'second'
+ params = multidict.MultiDict(
+ foo='0,1, 2, three',
+ )
+ self.assertRaises(exception.MissingArgument, args.get_args,
+ funcdef, fromargs, fromkwargs,
+ params, body, mimetype)
diff --git a/ironic/tests/unit/api/test_audit.py b/ironic/tests/unit/api/test_audit.py
index c621013d3..d85ed3e85 100644
--- a/ironic/tests/unit/api/test_audit.py
+++ b/ironic/tests/unit/api/test_audit.py
@@ -13,8 +13,9 @@
Tests to assert that audit middleware works as expected.
"""
+from unittest import mock
+
from keystonemiddleware import audit
-import mock
from oslo_config import cfg
from ironic.common import exception
@@ -32,7 +33,7 @@ class TestAuditMiddleware(base.BaseApiTest):
the test suite in keystone audit_middleware.
"""
- @mock.patch.object(audit, 'AuditMiddleware')
+ @mock.patch.object(audit, 'AuditMiddleware', autospec=True)
def test_enable_audit_request(self, mock_audit):
CONF.audit.enabled = True
self._make_app()
@@ -41,7 +42,7 @@ class TestAuditMiddleware(base.BaseApiTest):
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list)
- @mock.patch.object(audit, 'AuditMiddleware')
+ @mock.patch.object(audit, 'AuditMiddleware', autospec=True)
def test_enable_audit_request_error(self, mock_audit):
CONF.audit.enabled = True
mock_audit.side_effect = IOError("file access error")
@@ -49,7 +50,7 @@ class TestAuditMiddleware(base.BaseApiTest):
self.assertRaises(exception.InputFileError,
self._make_app)
- @mock.patch.object(audit, 'AuditMiddleware')
+ @mock.patch.object(audit, 'AuditMiddleware', autospec=True)
def test_disable_audit_request(self, mock_audit):
CONF.audit.enabled = False
self._make_app()
diff --git a/ironic/tests/unit/api/test_functions.py b/ironic/tests/unit/api/test_functions.py
new file mode 100644
index 000000000..2ccd4134d
--- /dev/null
+++ b/ironic/tests/unit/api/test_functions.py
@@ -0,0 +1,88 @@
+# Copyright 2020 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironic.api import functions
+from ironic.tests import base as test_base
+
+
+class TestFunctionDefinition(test_base.TestCase):
+
+ def test_get_arg(self):
+ def myfunc(self, a):
+ pass
+
+ fd = functions.FunctionDefinition(myfunc)
+ fd.arguments.append(functions.FunctionArgument('a', int, True, 0))
+ arg = fd.get_arg('a')
+ self.assertEqual(int, arg.datatype)
+ self.assertEqual('a', arg.name)
+ self.assertEqual(True, arg.mandatory)
+ self.assertEqual(0, arg.default)
+ self.assertIsNone(fd.get_arg('b'))
+
+ def test_set_arg_types(self):
+ def myfunc(self, string, integer, boolean=True):
+ pass
+
+ fd = functions.FunctionDefinition(myfunc)
+ argspec = functions.getargspec(myfunc)
+ fd.set_arg_types(argspec, [str, int, bool])
+
+ arg = fd.get_arg('string')
+ self.assertEqual(str, arg.datatype)
+ self.assertEqual('string', arg.name)
+ self.assertEqual(True, arg.mandatory)
+ self.assertIsNone(arg.default)
+
+ arg = fd.get_arg('integer')
+ self.assertEqual(int, arg.datatype)
+ self.assertEqual('integer', arg.name)
+ self.assertEqual(True, arg.mandatory)
+ self.assertIsNone(arg.default)
+
+ arg = fd.get_arg('boolean')
+ self.assertEqual(bool, arg.datatype)
+ self.assertEqual('boolean', arg.name)
+ self.assertEqual(False, arg.mandatory)
+ self.assertTrue(arg.default)
+
+ def test_signature(self):
+ @functions.signature(str, str, int, bool)
+ def myfunc(self, string, integer, boolean=True):
+ '''Do the thing with the thing '''
+ return 'result'
+
+ fd = myfunc._wsme_definition
+ self.assertEqual('myfunc', fd.name)
+ self.assertEqual('Do the thing with the thing ', fd.doc)
+ self.assertEqual(str, fd.return_type)
+
+ arg = fd.get_arg('string')
+ self.assertEqual(str, arg.datatype)
+ self.assertEqual('string', arg.name)
+ self.assertEqual(True, arg.mandatory)
+ self.assertIsNone(arg.default)
+
+ arg = fd.get_arg('integer')
+ self.assertEqual(int, arg.datatype)
+ self.assertEqual('integer', arg.name)
+ self.assertEqual(True, arg.mandatory)
+ self.assertIsNone(arg.default)
+
+ arg = fd.get_arg('boolean')
+ self.assertEqual(bool, arg.datatype)
+ self.assertEqual('boolean', arg.name)
+ self.assertEqual(False, arg.mandatory)
+ self.assertTrue(arg.default)
diff --git a/ironic/tests/unit/api/test_healthcheck.py b/ironic/tests/unit/api/test_healthcheck.py
index 364ba8cf8..2b7e4bad8 100644
--- a/ironic/tests/unit/api/test_healthcheck.py
+++ b/ironic/tests/unit/api/test_healthcheck.py
@@ -13,7 +13,8 @@
Tests to assert that audit middleware works as expected.
"""
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_middleware import healthcheck
@@ -26,13 +27,13 @@ CONF = cfg.CONF
class TestHealthcheckMiddleware(base.BaseApiTest):
"""Provide a basic smoke test to ensure healthcheck middleware works."""
- @mock.patch.object(healthcheck, 'Healthcheck')
+ @mock.patch.object(healthcheck, 'Healthcheck', autospec=True)
def test_enable(self, mock_healthcheck):
CONF.set_override('enabled', True, group='healthcheck')
self._make_app()
mock_healthcheck.assert_called_once_with(mock.ANY, CONF)
- @mock.patch.object(healthcheck, 'Healthcheck')
+ @mock.patch.object(healthcheck, 'Healthcheck', autospec=True)
def test_disable(self, mock_healthcheck):
CONF.set_override('enabled', False, group='healthcheck')
self._make_app()
diff --git a/ironic/tests/unit/api/test_hooks.py b/ironic/tests/unit/api/test_hooks.py
index 095d08a36..45058bbe4 100644
--- a/ironic/tests/unit/api/test_hooks.py
+++ b/ironic/tests/unit/api/test_hooks.py
@@ -16,8 +16,8 @@
from http import client as http_client
import json
+from unittest import mock
-import mock
from oslo_config import cfg
import oslo_messaging as messaging
@@ -103,7 +103,7 @@ class TestNoExceptionTracebackHook(base.BaseApiTest):
def setUp(self):
super(TestNoExceptionTracebackHook, self).setUp()
- p = mock.patch.object(root.Root, 'convert')
+ p = mock.patch.object(root, 'root', autospec=True)
self.root_convert_mock = p.start()
self.addCleanup(p.stop)
@@ -204,8 +204,8 @@ class TestNoExceptionTracebackHook(base.BaseApiTest):
class TestContextHook(base.BaseApiTest):
- @mock.patch.object(context, 'RequestContext')
- @mock.patch.object(policy, 'check')
+ @mock.patch.object(context, 'RequestContext', autospec=True)
+ @mock.patch.object(policy, 'check', autospec=True)
def _test_context_hook(self, mock_policy, mock_ctx, is_admin=False,
is_public_api=False, auth_strategy='keystone',
request_id=None):
@@ -260,8 +260,8 @@ class TestContextHook(base.BaseApiTest):
class TestPolicyDeprecation(tests_base.TestCase):
@mock.patch.object(hooks, 'CHECKED_DEPRECATED_POLICY_ARGS', False)
- @mock.patch.object(hooks.LOG, 'warning')
- @mock.patch.object(policy, 'get_enforcer')
+ @mock.patch.object(hooks.LOG, 'warning', autospec=True)
+ @mock.patch.object(policy, 'get_enforcer', autospec=True)
def test_policy_deprecation_check(self, enforcer_mock, warning_mock):
rules = {'is_member': 'project_name:demo or tenant:baremetal',
'is_default_project_domain': 'project_domain_id:default'}
diff --git a/ironic/tests/unit/api/test_middleware.py b/ironic/tests/unit/api/test_middleware.py
index dfc7ed991..80f768fd1 100644
--- a/ironic/tests/unit/api/test_middleware.py
+++ b/ironic/tests/unit/api/test_middleware.py
@@ -16,11 +16,15 @@ Tests to assert that various incorporated middleware works as expected.
"""
from http import client as http_client
+import os
+import tempfile
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
from ironic.tests.unit.api import base
+from ironic.tests.unit.api import utils
+from ironic.tests.unit.db import utils as db_utils
class TestCORSMiddleware(base.BaseApiTest):
@@ -112,3 +116,38 @@ class TestCORSMiddleware(base.BaseApiTest):
self.assertEqual(
self._response_string(http_client.OK), response.status)
self.assertNotIn('Access-Control-Allow-Origin', response.headers)
+
+
+class TestBasicAuthMiddleware(base.BaseApiTest):
+
+ def _make_app(self):
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
+ f.write('myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.'
+ 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n')
+ cfg.CONF.set_override('http_basic_auth_user_file', f.name)
+ self.addCleanup(os.remove, cfg.CONF.http_basic_auth_user_file)
+
+ cfg.CONF.set_override('auth_strategy', 'http_basic')
+ return super(TestBasicAuthMiddleware, self)._make_app()
+
+ def setUp(self):
+ super(TestBasicAuthMiddleware, self).setUp()
+ self.environ = {'fake.cache': utils.FakeMemcache()}
+ self.fake_db_node = db_utils.get_test_node(chassis_id=None)
+
+ def test_not_authenticated(self):
+ response = self.get_json('/chassis', expect_errors=True)
+ self.assertEqual(http_client.UNAUTHORIZED, response.status_int)
+ self.assertEqual(
+ 'Basic realm="Baremetal API"',
+ response.headers['WWW-Authenticate']
+ )
+
+ def test_authenticated(self):
+ auth_header = {'Authorization': 'Basic bXlOYW1lOm15UGFzc3dvcmQ='}
+ response = self.get_json('/chassis', headers=auth_header)
+ self.assertEqual({'chassis': []}, response)
+
+ def test_public_unauthenticated(self):
+ response = self.get_json('/')
+ self.assertEqual('v1', response['id'])
diff --git a/ironic/tests/unit/api/test_ospmiddleware.py b/ironic/tests/unit/api/test_ospmiddleware.py
index 48638aff7..555251dd7 100644
--- a/ironic/tests/unit/api/test_ospmiddleware.py
+++ b/ironic/tests/unit/api/test_ospmiddleware.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from osprofiler import web
@@ -29,13 +30,13 @@ class TestOsprofilerWsgiMiddleware(base.BaseApiTest):
def setUp(self):
super(TestOsprofilerWsgiMiddleware, self).setUp()
- @mock.patch.object(web, 'WsgiMiddleware')
+ @mock.patch.object(web, 'WsgiMiddleware', autospec=True)
def test_enable_osp_wsgi_request(self, mock_ospmiddleware):
CONF.profiler.enabled = True
self._make_app()
mock_ospmiddleware.assert_called_once_with(mock.ANY)
- @mock.patch.object(web, 'WsgiMiddleware')
+ @mock.patch.object(web, 'WsgiMiddleware', autospec=True)
def test_disable_osp_wsgi_request(self, mock_ospmiddleware):
CONF.profiler.enabled = False
self._make_app()
diff --git a/ironic/tests/unit/api/test_root.py b/ironic/tests/unit/api/test_root.py
index 9a512d7ad..b784762f3 100644
--- a/ironic/tests/unit/api/test_root.py
+++ b/ironic/tests/unit/api/test_root.py
@@ -44,9 +44,10 @@ class TestRoot(base.BaseApiTest):
self.assertNotIn('<html', response.json['error_message'])
def test_no_html_errors2(self):
- response = self.delete('/v1', expect_errors=True)
+ response = self.delete('/', expect_errors=True)
self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int)
- self.assertIn('Not Allowed', response.json['error_message'])
+ self.assertIn('malformed or otherwise incorrect',
+ response.json['error_message'])
self.assertNotIn('<html', response.json['error_message'])
@@ -68,8 +69,8 @@ class TestV1Root(base.BaseApiTest):
expected_resources = (['chassis', 'drivers', 'nodes', 'ports']
+ additional_expected_resources)
self.assertEqual(sorted(expected_resources), sorted(actual_resources))
- self.assertIn({'type': 'application/vnd.openstack.ironic.v1+json',
- 'base': 'application/json'}, data['media_types'])
+ self.assertEqual({'type': 'application/vnd.openstack.ironic.v1+json',
+ 'base': 'application/json'}, data['media_types'])
version1 = data['version']
self.assertEqual('v1', version1['id'])
diff --git a/ironic/tests/unit/api/test_types.py b/ironic/tests/unit/api/test_types.py
new file mode 100644
index 000000000..fb39ff2b7
--- /dev/null
+++ b/ironic/tests/unit/api/test_types.py
@@ -0,0 +1,566 @@
+# coding: utf-8
+#
+# Copyright 2011-2019 the WSME authors and contributors
+# (See https://opendev.org/x/wsme/)
+#
+# This module is part of WSME and is also released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from ironic.api import types
+from ironic.common import exception as exc
+from ironic.tests import base as test_base
+
+
+def gen_class():
+ d = {}
+ exec('''class tmp(object): pass''', d)
+ return d['tmp']
+
+
+class TestTypes(test_base.TestCase):
+ def setUp(self):
+ super(TestTypes, self).setUp()
+ types.registry = types.Registry()
+
+ def test_default_usertype(self):
+ class MyType(types.UserType):
+ basetype = str
+
+ My = MyType()
+
+ assert My.validate('a') == 'a'
+ assert My.tobasetype('a') == 'a'
+ assert My.frombasetype('a') == 'a'
+
+ def test_unset(self):
+ u = types.Unset
+
+ assert not u
+
+ def test_flat_type(self):
+ class Flat(object):
+ aint = int
+ abytes = bytes
+ atext = str
+ afloat = float
+
+ types.register_type(Flat)
+
+ assert len(Flat._wsme_attributes) == 4
+ attrs = Flat._wsme_attributes
+ print(attrs)
+
+ assert attrs[0].key == 'aint'
+ assert attrs[0].name == 'aint'
+ assert isinstance(attrs[0], types.wsattr)
+ assert attrs[0].datatype == int
+ assert attrs[0].mandatory is False
+ assert attrs[1].key == 'abytes'
+ assert attrs[1].name == 'abytes'
+ assert attrs[2].key == 'atext'
+ assert attrs[2].name == 'atext'
+ assert attrs[3].key == 'afloat'
+ assert attrs[3].name == 'afloat'
+
+ def test_private_attr(self):
+ class WithPrivateAttrs(object):
+ _private = 12
+
+ types.register_type(WithPrivateAttrs)
+
+ assert len(WithPrivateAttrs._wsme_attributes) == 0
+
+ def test_attribute_order(self):
+ class ForcedOrder(object):
+ _wsme_attr_order = ('a2', 'a1', 'a3')
+ a1 = int
+ a2 = int
+ a3 = int
+
+ types.register_type(ForcedOrder)
+
+ print(ForcedOrder._wsme_attributes)
+ assert ForcedOrder._wsme_attributes[0].key == 'a2'
+ assert ForcedOrder._wsme_attributes[1].key == 'a1'
+ assert ForcedOrder._wsme_attributes[2].key == 'a3'
+
+ c = gen_class()
+ print(c)
+ types.register_type(c)
+ del c._wsme_attributes
+
+ c.a2 = int
+ c.a1 = int
+ c.a3 = int
+
+ types.register_type(c)
+
+ assert c._wsme_attributes[0].key == 'a1', c._wsme_attributes[0].key
+ assert c._wsme_attributes[1].key == 'a2'
+ assert c._wsme_attributes[2].key == 'a3'
+
+ def test_wsproperty(self):
+ class WithWSProp(object):
+ def __init__(self):
+ self._aint = 0
+
+ def get_aint(self):
+ return self._aint
+
+ def set_aint(self, value):
+ self._aint = value
+
+ aint = types.wsproperty(int, get_aint, set_aint, mandatory=True)
+
+ types.register_type(WithWSProp)
+
+ print(WithWSProp._wsme_attributes)
+ assert len(WithWSProp._wsme_attributes) == 1
+ a = WithWSProp._wsme_attributes[0]
+ assert a.key == 'aint'
+ assert a.datatype == int
+ assert a.mandatory
+
+ o = WithWSProp()
+ o.aint = 12
+
+ assert o.aint == 12
+
+ def test_nested(self):
+ class Inner(object):
+ aint = int
+
+ class Outer(object):
+ inner = Inner
+
+ types.register_type(Outer)
+
+ assert hasattr(Inner, '_wsme_attributes')
+ assert len(Inner._wsme_attributes) == 1
+
+ def test_inspect_with_inheritance(self):
+ class Parent(object):
+ parent_attribute = int
+
+ class Child(Parent):
+ child_attribute = int
+
+ types.register_type(Parent)
+ types.register_type(Child)
+
+ assert len(Child._wsme_attributes) == 2
+
+ def test_selfreftype(self):
+ class SelfRefType(object):
+ pass
+
+ SelfRefType.parent = SelfRefType
+
+ types.register_type(SelfRefType)
+
+ def test_inspect_with_property(self):
+ class AType(object):
+ @property
+ def test(self):
+ return 'test'
+
+ types.register_type(AType)
+
+ assert len(AType._wsme_attributes) == 0
+ assert AType().test == 'test'
+
+ def test_enum(self):
+ aenum = types.Enum(str, 'v1', 'v2')
+ assert aenum.basetype is str
+
+ class AType(object):
+ a = aenum
+
+ types.register_type(AType)
+
+ assert AType.a.datatype is aenum
+
+ obj = AType()
+ obj.a = 'v1'
+ assert obj.a == 'v1', repr(obj.a)
+
+ self.assertRaisesRegexp(exc.InvalidInput,
+ "Invalid input for field/attribute a. \
+Value: 'v3'. Value should be one of: v., v.",
+ setattr,
+ obj,
+ 'a',
+ 'v3')
+
+ def test_attribute_validation(self):
+ class AType(object):
+ alist = [int]
+ aint = int
+
+ types.register_type(AType)
+
+ obj = AType()
+
+ obj.alist = [1, 2, 3]
+ assert obj.alist == [1, 2, 3]
+ obj.aint = 5
+ assert obj.aint == 5
+
+ self.assertRaises(exc.InvalidInput, setattr, obj, 'alist', 12)
+ self.assertRaises(exc.InvalidInput, setattr, obj, 'alist', [2, 'a'])
+
+ def test_attribute_validation_minimum(self):
+ class ATypeInt(object):
+ attr = types.IntegerType(minimum=1, maximum=5)
+
+ types.register_type(ATypeInt)
+
+ obj = ATypeInt()
+ obj.attr = 2
+
+ # comparison between 'zero' value and intger minimum (1) raises a
+ # TypeError which must be wrapped into an InvalidInput exception
+ self.assertRaises(exc.InvalidInput, setattr, obj, 'attr', 'zero')
+
+ def test_text_attribute_conversion(self):
+ class SType(object):
+ atext = str
+ abytes = bytes
+
+ types.register_type(SType)
+
+ obj = SType()
+
+ obj.atext = b'somebytes'
+ assert obj.atext == 'somebytes'
+ assert isinstance(obj.atext, str)
+
+ obj.abytes = 'sometext'
+ assert obj.abytes == b'sometext'
+ assert isinstance(obj.abytes, bytes)
+
+ def test_named_attribute(self):
+ class ABCDType(object):
+ a_list = types.wsattr([int], name='a.list')
+ astr = str
+
+ types.register_type(ABCDType)
+
+ assert len(ABCDType._wsme_attributes) == 2
+ attrs = ABCDType._wsme_attributes
+
+ assert attrs[0].key == 'a_list', attrs[0].key
+ assert attrs[0].name == 'a.list', attrs[0].name
+ assert attrs[1].key == 'astr', attrs[1].key
+ assert attrs[1].name == 'astr', attrs[1].name
+
+ def test_wsattr_del(self):
+ class MyType(object):
+ a = types.wsattr(int)
+
+ types.register_type(MyType)
+
+ value = MyType()
+
+ value.a = 5
+ assert value.a == 5
+ del value.a
+ assert value.a is types.Unset
+
+ def test_validate_dict(self):
+ assert types.validate_value({int: str}, {1: '1', 5: '5'})
+
+ self.assertRaises(ValueError, types.validate_value,
+ {int: str}, [])
+
+ assert types.validate_value({int: str}, {'1': '1', 5: '5'})
+
+ self.assertRaises(ValueError, types.validate_value,
+ {int: str}, {1: 1, 5: '5'})
+
+ def test_validate_list_valid(self):
+ assert types.validate_value([int], [1, 2])
+ assert types.validate_value([int], ['5'])
+
+ def test_validate_list_empty(self):
+ assert types.validate_value([int], []) == []
+
+ def test_validate_list_none(self):
+ v = types.ArrayType(int)
+ assert v.validate(None) is None
+
+ def test_validate_list_invalid_member(self):
+ self.assertRaises(ValueError, types.validate_value, [int],
+ ['not-a-number'])
+
+ def test_validate_list_invalid_type(self):
+ self.assertRaises(ValueError, types.validate_value, [int], 1)
+
+ def test_validate_float(self):
+ self.assertEqual(types.validate_value(float, 1), 1.0)
+ self.assertEqual(types.validate_value(float, '1'), 1.0)
+ self.assertEqual(types.validate_value(float, 1.1), 1.1)
+ self.assertRaises(ValueError, types.validate_value, float, [])
+ self.assertRaises(ValueError, types.validate_value, float,
+ 'not-a-float')
+
+ def test_validate_int(self):
+ self.assertEqual(types.validate_value(int, 1), 1)
+ self.assertEqual(types.validate_value(int, '1'), 1)
+ self.assertRaises(ValueError, types.validate_value, int, 1.1)
+
+ def test_validate_integer_type(self):
+ v = types.IntegerType(minimum=1, maximum=10)
+ v.validate(1)
+ v.validate(5)
+ v.validate(10)
+ self.assertRaises(ValueError, v.validate, 0)
+ self.assertRaises(ValueError, v.validate, 11)
+
+ def test_validate_string_type(self):
+ v = types.StringType(min_length=1, max_length=10,
+ pattern='^[a-zA-Z0-9]*$')
+ v.validate('1')
+ v.validate('12345')
+ v.validate('1234567890')
+ self.assertRaises(ValueError, v.validate, '')
+ self.assertRaises(ValueError, v.validate, '12345678901')
+
+ # Test a pattern validation
+ v.validate('a')
+ v.validate('A')
+ self.assertRaises(ValueError, v.validate, '_')
+
+ def test_validate_string_type_precompile(self):
+ precompile = re.compile('^[a-zA-Z0-9]*$')
+ v = types.StringType(min_length=1, max_length=10,
+ pattern=precompile)
+
+ # Test a pattern validation
+ v.validate('a')
+ v.validate('A')
+ self.assertRaises(ValueError, v.validate, '_')
+
+ def test_validate_string_type_pattern_exception_message(self):
+ regex = '^[a-zA-Z0-9]*$'
+ v = types.StringType(pattern=regex)
+ try:
+ v.validate('_')
+ self.assertFail()
+ except ValueError as e:
+ self.assertIn(regex, str(e))
+
+ def test_register_invalid_array(self):
+ self.assertRaises(ValueError, types.register_type, [])
+ self.assertRaises(ValueError, types.register_type, [int, str])
+ self.assertRaises(AttributeError, types.register_type, [1])
+
+ def test_register_invalid_dict(self):
+ self.assertRaises(ValueError, types.register_type, {})
+ self.assertRaises(ValueError, types.register_type,
+ {int: str, str: int})
+ self.assertRaises(ValueError, types.register_type,
+ {types.Unset: str})
+
+ def test_list_attribute_no_auto_register(self):
+ class MyType(object):
+ aint = int
+
+ assert not hasattr(MyType, '_wsme_attributes')
+
+ self.assertRaises(TypeError, types.list_attributes, MyType)
+
+ assert not hasattr(MyType, '_wsme_attributes')
+
+ def test_list_of_complextypes(self):
+ class A(object):
+ bs = types.wsattr(['B'])
+
+ class B(object):
+ i = int
+
+ types.register_type(A)
+ types.register_type(B)
+
+ assert A.bs.datatype.item_type is B
+
+ def test_cross_referenced_types(self):
+ class A(object):
+ b = types.wsattr('B')
+
+ class B(object):
+ a = A
+
+ types.register_type(A)
+ types.register_type(B)
+
+ assert A.b.datatype is B
+
+ def test_base(self):
+ class B1(types.Base):
+ b2 = types.wsattr('B2')
+
+ class B2(types.Base):
+ b2 = types.wsattr('B2')
+
+ assert B1.b2.datatype is B2, repr(B1.b2.datatype)
+ assert B2.b2.datatype is B2
+
+ def test_base_init(self):
+ class C1(types.Base):
+ s = str
+
+ c = C1(s='test')
+ assert c.s == 'test'
+
+ def test_array_eq(self):
+ ell = [types.ArrayType(str)]
+ assert types.ArrayType(str) in ell
+
+ def test_array_sample(self):
+ s = types.ArrayType(str).sample()
+ assert isinstance(s, list)
+ assert s
+ assert s[0] == ''
+
+ def test_dict_sample(self):
+ s = types.DictType(str, str).sample()
+ assert isinstance(s, dict)
+ assert s
+ assert s == {'': ''}
+
+ def test_binary_to_base(self):
+ import base64
+ assert types.binary.tobasetype(None) is None
+ expected = base64.encodestring(b'abcdef')
+ assert types.binary.tobasetype(b'abcdef') == expected
+
+ def test_binary_from_base(self):
+ import base64
+ assert types.binary.frombasetype(None) is None
+ encoded = base64.encodestring(b'abcdef')
+ assert types.binary.frombasetype(encoded) == b'abcdef'
+
+ def test_wsattr_weakref_datatype(self):
+ # If the datatype inside the wsattr ends up a weakref, it
+ # should be converted to the real type when accessed again by
+ # the property getter.
+ import weakref
+ a = types.wsattr(int)
+ a.datatype = weakref.ref(int)
+ assert a.datatype is int
+
+ def test_wsattr_list_datatype(self):
+ # If the datatype inside the wsattr ends up a list of weakrefs
+ # to types, it should be converted to the real types when
+ # accessed again by the property getter.
+ import weakref
+ a = types.wsattr(int)
+ a.datatype = [weakref.ref(int)]
+ assert isinstance(a.datatype, list)
+ assert a.datatype[0] is int
+
+ def test_unregister(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ v = types.registry.lookup('TempType')
+ self.assertIs(v, TempType)
+ types.registry._unregister(TempType)
+ after = types.registry.lookup('TempType')
+ self.assertIsNone(after)
+
+ def test_unregister_twice(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ v = types.registry.lookup('TempType')
+ self.assertIs(v, TempType)
+ types.registry._unregister(TempType)
+ # Second call should not raise an exception
+ types.registry._unregister(TempType)
+ after = types.registry.lookup('TempType')
+ self.assertIsNone(after)
+
+ def test_unregister_array_type(self):
+ class TempType(object):
+ pass
+ t = [TempType]
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.array_types, set())
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.array_types, set())
+
+ def test_unregister_array_type_twice(self):
+ class TempType(object):
+ pass
+ t = [TempType]
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.array_types, set())
+ types.registry._unregister(t)
+ # Second call should not raise an exception
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.array_types, set())
+
+ def test_unregister_dict_type(self):
+ class TempType(object):
+ pass
+ t = {str: TempType}
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.dict_types, set())
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.dict_types, set())
+
+ def test_unregister_dict_type_twice(self):
+ class TempType(object):
+ pass
+ t = {str: TempType}
+ types.registry.register(t)
+ self.assertNotEqual(types.registry.dict_types, set())
+ types.registry._unregister(t)
+ # Second call should not raise an exception
+ types.registry._unregister(t)
+ self.assertEqual(types.registry.dict_types, set())
+
+ def test_reregister(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ v = types.registry.lookup('TempType')
+ self.assertIs(v, TempType)
+ types.registry.reregister(TempType)
+ after = types.registry.lookup('TempType')
+ self.assertIs(after, TempType)
+
+ def test_reregister_and_add_attr(self):
+ class TempType(object):
+ pass
+ types.registry.register(TempType)
+ attrs = types.list_attributes(TempType)
+ self.assertEqual(attrs, [])
+ TempType.one = str
+ types.registry.reregister(TempType)
+ after = types.list_attributes(TempType)
+ self.assertNotEqual(after, [])
+
+ def test_non_registered_complex_type(self):
+ class TempType(types.Base):
+ __registry__ = None
+
+ self.assertFalse(types.iscomplex(TempType))
+ types.registry.register(TempType)
+ self.assertTrue(types.iscomplex(TempType))
diff --git a/ironic/tests/unit/cmd/test_conductor.py b/ironic/tests/unit/cmd/test_conductor.py
index 8de4ebb40..ef0f8b085 100644
--- a/ironic/tests/unit/cmd/test_conductor.py
+++ b/ironic/tests/unit/cmd/test_conductor.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from ironic.cmd import conductor
diff --git a/ironic/tests/unit/cmd/test_dbsync.py b/ironic/tests/unit/cmd/test_dbsync.py
index f1f9e05b9..530b576e8 100644
--- a/ironic/tests/unit/cmd/test_dbsync.py
+++ b/ironic/tests/unit/cmd/test_dbsync.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.cmd import dbsync
from ironic.common import context
diff --git a/ironic/tests/unit/cmd/test_status.py b/ironic/tests/unit/cmd/test_status.py
index aa4a5258b..f776e2d51 100644
--- a/ironic/tests/unit/cmd/test_status.py
+++ b/ironic/tests/unit/cmd/test_status.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_upgradecheck.upgradecheck import Code
from ironic.cmd import dbsync
diff --git a/ironic/tests/unit/common/json_samples/neutron_network_show.json b/ironic/tests/unit/common/json_samples/neutron_network_show.json
new file mode 100644
index 000000000..7c54850ca
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_network_show.json
@@ -0,0 +1,33 @@
+{
+ "network": {
+ "admin_state_up": true,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "ipv4_address_scope": null,
+ "ipv6_address_scope": null,
+ "l2_adjacency": false,
+ "mtu": 1500,
+ "name": "private-network",
+ "port_security_enabled": true,
+ "project_id": "4fd44f30292945e481c7b8a0c8908869",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "router:external": false,
+ "shared": true,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tags": ["tag1,tag2"],
+ "tenant_id": "4fd44f30292945e481c7b8a0c8908869",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": false,
+ "description": "",
+ "is_default": true
+ }
+} \ No newline at end of file
diff --git a/ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json b/ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json
new file mode 100644
index 000000000..eb955e3b5
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_network_show_ipv6.json
@@ -0,0 +1,33 @@
+{
+ "network": {
+ "admin_state_up": true,
+ "availability_zone_hints": [],
+ "availability_zones": [
+ "nova"
+ ],
+ "created_at": "2016-03-08T20:19:41",
+ "dns_domain": "my-domain.org.",
+ "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
+ "ipv4_address_scope": null,
+ "ipv6_address_scope": null,
+ "l2_adjacency": false,
+ "mtu": 1500,
+ "name": "private-network",
+ "port_security_enabled": true,
+ "project_id": "5199666e520f4aed823710aec37cfd38",
+ "qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
+ "revision_number": 1,
+ "router:external": false,
+ "shared": true,
+ "status": "ACTIVE",
+ "subnets": [
+ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
+ ],
+ "tags": ["tag1,tag2"],
+ "tenant_id": "5199666e520f4aed823710aec37cfd38",
+ "updated_at": "2016-03-08T20:19:41",
+ "vlan_transparent": false,
+ "description": "",
+ "is_default": true
+ }
+}
diff --git a/ironic/tests/unit/common/json_samples/neutron_port_show.json b/ironic/tests/unit/common/json_samples/neutron_port_show.json
new file mode 100644
index 000000000..925f00fd0
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_port_show.json
@@ -0,0 +1,59 @@
+{
+ "port": {
+ "admin_state_up": true,
+ "allowed_address_pairs": [],
+ "binding:host_id": "devstack",
+ "binding:profile": {},
+ "binding:vif_details": {
+ "ovs_hybrid_plug": true,
+ "port_filter": true
+ },
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "created_at": "2016-03-08T20:19:41",
+ "data_plane_status": "ACTIVE",
+ "description": "",
+ "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e",
+ "device_owner": "network:router_interface",
+ "dns_assignment": {
+ "hostname": "myport",
+ "ip_address": "10.0.0.2",
+ "fqdn": "myport.my-domain.org"
+ },
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myport",
+ "extra_dhcp_opts": [
+ {
+ "opt_value": "pxelinux.0",
+ "ip_version": 4,
+ "opt_name": "bootfile-name"
+ }
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "10.0.0.2",
+ "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2"
+ }
+ ],
+ "id": "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2",
+ "ip_allocation": "immediate",
+ "mac_address": "fa:16:3e:23:fd:d7",
+ "mac_learning_enabled": false,
+ "name": "",
+ "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
+ "port_security_enabled": false,
+ "project_id": "7e02058126cc4950b75f9970368ba177",
+ "revision_number": 1,
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "7e02058126cc4950b75f9970368ba177",
+ "updated_at": "2016-03-08T20:19:41",
+ "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+ "resource_request": {
+ "required": ["CUSTOM_PHYSNET_PUBLIC", "CUSTOM_VNIC_TYPE_NORMAL"],
+ "resources": {"NET_BW_EGR_KILOBIT_PER_SEC": 1000}
+ },
+ "uplink_status_propagation": false
+ }
+} \ No newline at end of file
diff --git a/ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json b/ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json
new file mode 100644
index 000000000..1dd3ead68
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_port_show_ipv6.json
@@ -0,0 +1,59 @@
+{
+ "port": {
+ "admin_state_up": true,
+ "allowed_address_pairs": [],
+ "binding:host_id": "devstack",
+ "binding:profile": {},
+ "binding:vif_details": {
+ "ovs_hybrid_plug": true,
+ "port_filter": true
+ },
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "created_at": "2016-03-08T20:19:41",
+ "data_plane_status": "ACTIVE",
+ "description": "",
+ "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e",
+ "device_owner": "network:router_interface",
+ "dns_assignment": {
+ "hostname": "myport",
+ "ip_address": "fd00:203:0:113::2",
+ "fqdn": "myport.my-domain.org"
+ },
+ "dns_domain": "my-domain.org.",
+ "dns_name": "myport",
+ "extra_dhcp_opts": [
+ {
+ "opt_value": "pxelinux.0",
+ "ip_version": 6,
+ "opt_name": "bootfile-name"
+ }
+ ],
+ "fixed_ips": [
+ {
+ "ip_address": "fd00:203:0:113::2",
+ "subnet_id": "906e685a-b964-4d58-9939-9cf3af197c67"
+ }
+ ],
+ "id": "96d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb8",
+ "ip_allocation": "immediate",
+ "mac_address": "52:54:00:4f:ef:b7",
+ "mac_learning_enabled": false,
+ "name": "",
+ "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7",
+ "port_security_enabled": false,
+ "project_id": "7e02058126cc4950b75f9970368ba177",
+ "revision_number": 1,
+ "security_groups": [],
+ "status": "ACTIVE",
+ "tags": ["tag1,tag2"],
+ "tenant_id": "7e02058126cc4950b75f9970368ba177",
+ "updated_at": "2016-03-08T20:19:41",
+ "qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
+ "resource_request": {
+ "required": ["CUSTOM_PHYSNET_PUBLIC", "CUSTOM_VNIC_TYPE_NORMAL"],
+ "resources": {"NET_BW_EGR_KILOBIT_PER_SEC": 1000}
+ },
+ "uplink_status_propagation": false
+ }
+}
diff --git a/ironic/tests/unit/common/json_samples/neutron_subnet_show.json b/ironic/tests/unit/common/json_samples/neutron_subnet_show.json
new file mode 100644
index 000000000..f1b7ae5a5
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_subnet_show.json
@@ -0,0 +1,32 @@
+{
+ "subnet": {
+ "name": "private-subnet",
+ "enable_dhcp": true,
+ "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "segment_id": null,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "dns_nameservers": [],
+ "dns_publish_fixed_ip": false,
+ "allocation_pools": [
+ {
+ "start": "10.0.0.2",
+ "end": "10.0.0.254"
+ }
+ ],
+ "host_routes": [],
+ "ip_version": 4,
+ "gateway_ip": "10.0.0.1",
+ "cidr": "10.0.0.0/24",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2016-10-10T14:35:34Z",
+ "description": "",
+ "ipv6_address_mode": null,
+ "ipv6_ra_mode": null,
+ "revision_number": 2,
+ "service_types": [],
+ "subnetpool_id": null,
+ "tags": ["tag1,tag2"],
+ "updated_at": "2016-10-10T14:35:34Z"
+ }
+}
diff --git a/ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json b/ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json
new file mode 100644
index 000000000..e5bd1e496
--- /dev/null
+++ b/ironic/tests/unit/common/json_samples/neutron_subnet_show_ipv6.json
@@ -0,0 +1,32 @@
+{
+ "subnet": {
+ "name": "private-subnet",
+ "enable_dhcp": true,
+ "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
+ "segment_id": null,
+ "project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
+ "dns_nameservers": [],
+ "dns_publish_fixed_ip": false,
+ "allocation_pools": [
+ {
+ "start": "fd00:203:0:113::2",
+ "end": "fd00:203:0:113:ffff:ffff:ffff:ffff"
+ }
+ ],
+ "host_routes": [],
+ "ip_version": 6,
+ "gateway_ip": "fd00:203:0:113::1",
+ "cidr": "fd00:203:0:113::/64",
+ "id": "08eae331-0402-425a-923c-34f7cfe39c1b",
+ "created_at": "2016-10-10T14:35:34Z",
+ "description": "",
+ "ipv6_address_mode": "slaac",
+ "ipv6_ra_mode": null,
+ "revision_number": 2,
+ "service_types": [],
+ "subnetpool_id": null,
+ "tags": ["tag1,tag2"],
+ "updated_at": "2016-10-10T14:35:34Z"
+ }
+}
diff --git a/ironic/tests/unit/common/test_cinder.py b/ironic/tests/unit/common/test_cinder.py
index db5a96fbb..f3bd7ae77 100644
--- a/ironic/tests/unit/common/test_cinder.py
+++ b/ironic/tests/unit/common/test_cinder.py
@@ -14,10 +14,10 @@
import datetime
from http import client as http_client
import json
+from unittest import mock
from cinderclient import exceptions as cinder_exceptions
import cinderclient.v3 as cinderclient
-import mock
from oslo_utils import uuidutils
from ironic.common import cinder
@@ -97,21 +97,6 @@ class TestCinderClient(base.TestCase):
auth=mock.sentinel.auth)
self.assertFalse(mock_sauth.called)
- def test_get_client_deprecated_opts(self, mock_client_init, mock_session,
- mock_auth, mock_sauth, mock_adapter):
-
- self.config(url='http://test-url', group='cinder')
- mock_adapter.return_value = mock_adapter_obj = mock.Mock()
- mock_adapter_obj.get_endpoint.return_value = 'http://test-url'
-
- self._assert_client_call(mock_client_init, 'http://test-url')
- mock_auth.assert_called_once_with('cinder')
- mock_session.assert_called_once_with('cinder')
- mock_adapter.assert_called_once_with(
- 'cinder', session=mock.sentinel.session, auth=mock.sentinel.auth,
- endpoint_override='http://test-url')
- self.assertFalse(mock_sauth.called)
-
class TestCinderUtils(db_base.DbTestCase):
diff --git a/ironic/tests/unit/common/test_context.py b/ironic/tests/unit/common/test_context.py
index eb035deb6..b04613076 100644
--- a/ironic/tests/unit/common/test_context.py
+++ b/ironic/tests/unit/common/test_context.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_context import context as oslo_context
from ironic.common import context
@@ -49,7 +50,7 @@ class RequestContextTestCase(tests_base.TestCase):
{'project_name': 'demo', 'is_public_api': True,
'domain_id': 'meow'})
self.assertEqual('demo', test_context.project_name)
- self.assertEqual('meow', test_context.user_domain)
+ self.assertEqual('meow', test_context.user_domain_id)
self.assertTrue(test_context.is_public_api)
def test_to_policy_values(self):
diff --git a/ironic/tests/unit/common/test_driver_factory.py b/ironic/tests/unit/common/test_driver_factory.py
index fdf14e9fd..682622eec 100644
--- a/ironic/tests/unit/common/test_driver_factory.py
+++ b/ironic/tests/unit/common/test_driver_factory.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from stevedore import named
diff --git a/ironic/tests/unit/common/test_glance_service.py b/ironic/tests/unit/common/test_glance_service.py
index 02e4914bc..c8532d1b8 100644
--- a/ironic/tests/unit/common/test_glance_service.py
+++ b/ironic/tests/unit/common/test_glance_service.py
@@ -17,11 +17,11 @@
import datetime
import importlib
import time
+from unittest import mock
from glanceclient import client as glance_client
from glanceclient import exc as glance_exc
from keystoneauth1 import loading as kaloading
-import mock
from oslo_config import cfg
from oslo_utils import uuidutils
import retrying
diff --git a/ironic/tests/unit/common/test_image_service.py b/ironic/tests/unit/common/test_image_service.py
index bc0d391b6..d59c1a4bc 100644
--- a/ironic/tests/unit/common/test_image_service.py
+++ b/ironic/tests/unit/common/test_image_service.py
@@ -16,8 +16,8 @@ from http import client as http_client
import io
import os
import shutil
+from unittest import mock
-import mock
from oslo_utils import uuidutils
import requests
import sendfile
diff --git a/ironic/tests/unit/common/test_images.py b/ironic/tests/unit/common/test_images.py
index 9aa711f87..8a90ab55b 100644
--- a/ironic/tests/unit/common/test_images.py
+++ b/ironic/tests/unit/common/test_images.py
@@ -19,10 +19,10 @@ import builtins
import io
import os
import shutil
+from unittest import mock
from ironic_lib import disk_utils
from ironic_lib import utils as ironic_utils
-import mock
from oslo_concurrency import processutils
from oslo_config import cfg
@@ -911,6 +911,27 @@ class FsImageTestCase(base.TestCase):
'output_file', 'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid',
configdrive='tmpdir/configdrive', kernel_params=params)
+ @mock.patch.object(images, 'create_isolinux_image_for_bios', autospec=True)
+ @mock.patch.object(images, 'fetch', autospec=True)
+ @mock.patch.object(utils, 'tempdir', autospec=True)
+ def test_create_boot_iso_for_existing_iso(self, tempdir_mock,
+ fetch_images_mock,
+ create_isolinux_mock):
+ mock_file_handle = mock.MagicMock(spec=io.BytesIO)
+ mock_file_handle.__enter__.return_value = 'tmpdir'
+ tempdir_mock.return_value = mock_file_handle
+ base_iso = 'http://fake.local:1234/fake.iso'
+ images.create_boot_iso('ctx', 'output_file', 'kernel-uuid',
+ 'ramdisk-uuid', 'deploy_iso-uuid',
+ 'efiboot-uuid', None,
+ None, None, 'http://configdrive',
+ base_iso=base_iso)
+
+ fetch_images_mock.assert_any_call(
+ 'ctx', base_iso, 'output_file')
+
+ create_isolinux_mock.assert_not_called()
+
@mock.patch.object(image_service, 'get_image_service', autospec=True)
def test_get_glance_image_properties_no_such_prop(self,
image_service_mock):
diff --git a/ironic/tests/unit/common/test_json_rpc.py b/ironic/tests/unit/common/test_json_rpc.py
index 7924ffb86..bc0ccc5d6 100644
--- a/ironic/tests/unit/common/test_json_rpc.py
+++ b/ironic/tests/unit/common/test_json_rpc.py
@@ -10,8 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import tempfile
+from unittest import mock
+
import fixtures
-import mock
import oslo_messaging
import webob
@@ -108,7 +111,7 @@ class TestService(test_base.TestCase):
else:
return response.json_body
else:
- self.assertFalse(response.text)
+ return response.text
def _check(self, body, result=None, error=None, request_id='abcd'):
self.assertEqual('2.0', body.pop('jsonrpc'))
@@ -118,6 +121,33 @@ class TestService(test_base.TestCase):
else:
self.assertEqual({'result': result}, body)
+ def _setup_http_basic(self):
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
+ f.write('myName:$2y$05$lE3eGtyj41jZwrzS87KTqe6.'
+ 'JETVCWBkc32C63UP2aYrGoYOEpbJm\n\n\n')
+ self.addCleanup(os.remove, f.name)
+ self.config(http_basic_auth_user_file=f.name, group='json_rpc')
+ self.config(auth_strategy='http_basic', group='json_rpc')
+ # self.config(http_basic_username='myUser', group='json_rpc')
+ # self.config(http_basic_password='myPassword', group='json_rpc')
+ self.service = server.WSGIService(FakeManager(), self.serializer)
+ self.app = self.server_mock.call_args[0][2]
+
+ def test_http_basic_not_authenticated(self):
+ self._setup_http_basic()
+ self._request('success', {'context': self.ctx, 'x': 42},
+ request_id=None, expected_error=401)
+
+ def test_http_basic(self):
+ self._setup_http_basic()
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Authorization': 'Basic bXlOYW1lOm15UGFzc3dvcmQ='
+ }
+ body = self._request('success', {'context': self.ctx, 'x': 42},
+ headers=headers)
+ self._check(body, result=42)
+
def test_success(self):
body = self._request('success', {'context': self.ctx, 'x': 42})
self._check(body, result=42)
@@ -129,7 +159,7 @@ class TestService(test_base.TestCase):
def test_notification(self):
body = self._request('no_result', {'context': self.ctx},
request_id=None)
- self.assertIsNone(body)
+ self.assertEqual('', body)
def test_no_context(self):
body = self._request('no_context')
@@ -236,7 +266,7 @@ class TestService(test_base.TestCase):
'code': -32601,
})
- def test_no_blacklisted_methods(self):
+ def test_no_deny_methods(self):
for name in ('__init__', '_private', 'init_host', 'value'):
body = self._request(name, {'context': self.ctx})
self._check(body,
@@ -355,6 +385,24 @@ class TestClient(test_base.TestCase):
'rpc.version': '1.42'},
'id': self.context.request_id})
+ def test_call_with_ssl(self, mock_session):
+ self.config(use_ssl=True, group='json_rpc')
+ response = mock_session.return_value.post.return_value
+ response.json.return_value = {
+ 'jsonrpc': '2.0',
+ 'result': 42
+ }
+ cctx = self.client.prepare('foo.example.com')
+ self.assertEqual('example.com', cctx.host)
+ result = cctx.call(self.context, 'do_something', answer=42)
+ self.assertEqual(42, result)
+ mock_session.return_value.post.assert_called_once_with(
+ 'https://example.com:8089',
+ json={'jsonrpc': '2.0',
+ 'method': 'do_something',
+ 'params': {'answer': 42, 'context': self.ctx_json},
+ 'id': self.context.request_id})
+
def test_cast_success(self, mock_session):
cctx = self.client.prepare('foo.example.com')
self.assertEqual('example.com', cctx.host)
@@ -541,3 +589,92 @@ class TestClient(test_base.TestCase):
'redfish_password': '***'})
resp_text = mock_log.call_args_list[1][0][2]
self.assertEqual(body.replace('passw0rd', '***'), resp_text)
+
+
+@mock.patch('ironic.common.json_rpc.client.keystone', autospec=True)
+class TestSession(test_base.TestCase):
+
+ def setUp(self):
+ super(TestSession, self).setUp()
+ client._SESSION = None
+
+ def test_noauth(self, mock_keystone):
+ self.config(auth_strategy='noauth', group='json_rpc')
+ session = client._get_session()
+
+ mock_keystone.get_auth.assert_called_once_with('json_rpc')
+ auth = mock_keystone.get_auth.return_value
+
+ mock_keystone.get_session.assert_called_once_with(
+ 'json_rpc', auth=auth)
+
+ internal_session = mock_keystone.get_session.return_value
+
+ mock_keystone.get_adapter.assert_called_once_with(
+ 'json_rpc',
+ session=internal_session,
+ additional_headers={
+ 'Content-Type': 'application/json'
+ })
+ self.assertEqual(mock_keystone.get_adapter.return_value, session)
+
+ def test_keystone(self, mock_keystone):
+ self.config(auth_strategy='keystone', group='json_rpc')
+ session = client._get_session()
+
+ mock_keystone.get_auth.assert_called_once_with('json_rpc')
+ auth = mock_keystone.get_auth.return_value
+
+ mock_keystone.get_session.assert_called_once_with(
+ 'json_rpc', auth=auth)
+
+ internal_session = mock_keystone.get_session.return_value
+
+ mock_keystone.get_adapter.assert_called_once_with(
+ 'json_rpc',
+ session=internal_session,
+ additional_headers={
+ 'Content-Type': 'application/json'
+ })
+ self.assertEqual(mock_keystone.get_adapter.return_value, session)
+
+ def test_http_basic(self, mock_keystone):
+ self.config(auth_strategy='http_basic', group='json_rpc')
+ session = client._get_session()
+
+ mock_keystone.get_auth.assert_called_once_with('json_rpc')
+ auth = mock_keystone.get_auth.return_value
+ mock_keystone.get_session.assert_called_once_with(
+ 'json_rpc', auth=auth)
+
+ internal_session = mock_keystone.get_session.return_value
+
+ mock_keystone.get_adapter.assert_called_once_with(
+ 'json_rpc',
+ session=internal_session,
+ additional_headers={
+ 'Content-Type': 'application/json'
+ })
+ self.assertEqual(mock_keystone.get_adapter.return_value, session)
+
+ def test_http_basic_deprecated(self, mock_keystone):
+ self.config(auth_strategy='http_basic', group='json_rpc')
+ self.config(http_basic_username='myName', group='json_rpc')
+ self.config(http_basic_password='myPassword', group='json_rpc')
+ session = client._get_session()
+
+ mock_keystone.get_auth.assert_called_once_with(
+ 'json_rpc', username='myName', password='myPassword')
+ auth = mock_keystone.get_auth.return_value
+ mock_keystone.get_session.assert_called_once_with(
+ 'json_rpc', auth=auth)
+
+ internal_session = mock_keystone.get_session.return_value
+
+ mock_keystone.get_adapter.assert_called_once_with(
+ 'json_rpc',
+ session=internal_session,
+ additional_headers={
+ 'Content-Type': 'application/json'
+ })
+ self.assertEqual(mock_keystone.get_adapter.return_value, session)
diff --git a/ironic/tests/unit/common/test_keystone.py b/ironic/tests/unit/common/test_keystone.py
index 596b6a7fe..ffc08a7f7 100644
--- a/ironic/tests/unit/common/test_keystone.py
+++ b/ironic/tests/unit/common/test_keystone.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from keystoneauth1 import loading as kaloading
-import mock
from oslo_config import cfg
from oslo_config import fixture
diff --git a/ironic/tests/unit/common/test_network.py b/ironic/tests/unit/common/test_network.py
index e6ffcd08c..69b17b01b 100644
--- a/ironic/tests/unit/common/test_network.py
+++ b/ironic/tests/unit/common/test_network.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from ironic.common import exception
diff --git a/ironic/tests/unit/common/test_neutron.py b/ironic/tests/unit/common/test_neutron.py
index 81d8c99be..d290aaa61 100644
--- a/ironic/tests/unit/common/test_neutron.py
+++ b/ironic/tests/unit/common/test_neutron.py
@@ -11,10 +11,12 @@
# under the License.
import copy
+import json
+import os
import time
+from unittest import mock
from keystoneauth1 import loading as kaloading
-import mock
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client
from oslo_utils import uuidutils
@@ -270,6 +272,30 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
patcher.start()
self.addCleanup(patcher.stop)
+ port_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_port_show.json')
+ with open(port_show_file, 'rb') as fl:
+ self.port_data = json.load(fl)
+
+ self.client_mock.show_port.return_value = self.port_data
+
+ network_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_network_show.json')
+ with open(network_show_file, 'rb') as fl:
+ self.network_data = json.load(fl)
+
+ self.client_mock.show_network.return_value = self.network_data
+
+ subnet_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_subnet_show.json')
+ with open(subnet_show_file, 'rb') as fl:
+ self.subnet_data = json.load(fl)
+
+ self.client_mock.show_subnet.return_value = self.subnet_data
+
@mock.patch.object(neutron, 'update_neutron_port', autospec=True)
def _test_add_ports_to_network(self, update_mock, is_client_id,
security_groups=None,
@@ -667,6 +693,103 @@ class TestNeutronNetworkActions(db_base.DbTestCase):
self.client_mock.delete_port.assert_called_once_with(
self.neutron_port['id'])
+ def test__uncidr_ipv4(self):
+ network, netmask = neutron._uncidr('10.0.0.0/24')
+ self.assertEqual('10.0.0.0', network)
+ self.assertEqual('255.255.255.0', netmask)
+
+ def test__uncidr_ipv6(self):
+ network, netmask = neutron._uncidr('::1/64', ipv6=True)
+ self.assertEqual('::', network)
+ self.assertEqual('ffff:ffff:ffff:ffff::', netmask)
+
+ def test_get_neutron_port_data(self):
+
+ network_data = neutron.get_neutron_port_data('port0', 'vif0')
+
+ expected_port = {
+ 'id': 'port0',
+ 'type': 'vif',
+ 'ethernet_mac_address': 'fa:16:3e:23:fd:d7',
+ 'vif_id': '46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2',
+ 'mtu': 1500
+ }
+
+ self.assertEqual(expected_port, network_data['links'][0])
+
+ expected_network = {
+ 'id': 'a0304c3a-4f08-4c43-88af-d796509c97d2',
+ 'network_id': 'a87cc70a-3e15-4acf-8205-9b711a3531b7',
+ 'type': 'ipv4',
+ 'link': 'port0',
+ 'ip_address': '10.0.0.2',
+ 'netmask': '255.255.255.0',
+ 'routes': [
+ {'gateway': '10.0.0.1',
+ 'netmask': '0.0.0.0',
+ 'network': '0.0.0.0'}
+ ]
+ }
+
+ self.assertEqual(expected_network, network_data['networks'][0])
+
+ def load_ipv6_files(self):
+ port_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_port_show_ipv6.json')
+ with open(port_show_file, 'rb') as fl:
+ self.port_data = json.load(fl)
+
+ self.client_mock.show_port.return_value = self.port_data
+
+ network_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_network_show_ipv6.json')
+ with open(network_show_file, 'rb') as fl:
+ self.network_data = json.load(fl)
+
+ self.client_mock.show_network.return_value = self.network_data
+
+ subnet_show_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples',
+ 'neutron_subnet_show_ipv6.json')
+ with open(subnet_show_file, 'rb') as fl:
+ self.subnet_data = json.load(fl)
+
+ self.client_mock.show_subnet.return_value = self.subnet_data
+
+ def test_get_neutron_port_data_ipv6(self):
+ self.load_ipv6_files()
+
+ network_data = neutron.get_neutron_port_data('port1', 'vif1')
+
+ print(network_data)
+ expected_port = {
+ 'id': 'port1',
+ 'type': 'vif',
+ 'ethernet_mac_address': '52:54:00:4f:ef:b7',
+ 'vif_id': '96d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb8',
+ 'mtu': 1500
+ }
+
+ self.assertEqual(expected_port, network_data['links'][0])
+
+ expected_network = {
+ 'id': '906e685a-b964-4d58-9939-9cf3af197c67',
+ 'network_id': 'a87cc70a-3e15-4acf-8205-9b711a3531b7',
+ 'type': 'ipv6',
+ 'link': 'port1',
+ 'ip_address': 'fd00:203:0:113::2',
+ 'netmask': 'ffff:ffff:ffff:ffff::',
+ 'routes': [
+ {'gateway': 'fd00:203:0:113::1',
+ 'netmask': '::0',
+ 'network': '::0'}
+ ]
+ }
+
+ self.assertEqual(expected_network, network_data['networks'][0])
+
def test_get_node_portmap(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
portmap = neutron.get_node_portmap(task)
diff --git a/ironic/tests/unit/common/test_nova.py b/ironic/tests/unit/common/test_nova.py
index 1d63b4fb5..7a3c300c8 100644
--- a/ironic/tests/unit/common/test_nova.py
+++ b/ironic/tests/unit/common/test_nova.py
@@ -10,13 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
import ddt
from keystoneauth1 import exceptions as kaexception
-import mock
import requests
-
from ironic.common import context
from ironic.common import keystone
from ironic.common import nova
diff --git a/ironic/tests/unit/common/test_policy.py b/ironic/tests/unit/common/test_policy.py
index 046706554..67e3ea4d2 100644
--- a/ironic/tests/unit/common/test_policy.py
+++ b/ironic/tests/unit/common/test_policy.py
@@ -16,8 +16,8 @@
# under the License.
import sys
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
diff --git a/ironic/tests/unit/common/test_pxe_utils.py b/ironic/tests/unit/common/test_pxe_utils.py
index 6950bfe8a..d37c89d65 100644
--- a/ironic/tests/unit/common/test_pxe_utils.py
+++ b/ironic/tests/unit/common/test_pxe_utils.py
@@ -16,9 +16,9 @@
import os
import tempfile
+from unittest import mock
from ironic_lib import utils as ironic_utils
-import mock
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import uuidutils
@@ -108,6 +108,12 @@ class TestPXEUtils(db_base.DbTestCase):
self.ipxe_options_boot_from_volume_extra_volume.pop(
'initrd_filename', None)
+ self.ipxe_options_boot_from_iso = self.ipxe_options.copy()
+ self.ipxe_options_boot_from_iso.update({
+ 'boot_from_iso': True,
+ 'boot_iso_url': 'http://1.2.3.4:1234/uuid/iso'
+ })
+
self.node = object_utils.create_test_node(self.context)
def test_default_pxe_config(self):
@@ -218,6 +224,27 @@ class TestPXEUtils(db_base.DbTestCase):
expected_template = f.read().rstrip()
self.assertEqual(str(expected_template), rendered_template)
+ def test_default_ipxe_boot_from_iso(self):
+ self.config(
+ pxe_config_template='ironic/drivers/modules/ipxe_config.template',
+ group='pxe'
+ )
+ self.config(http_url='http://1.2.3.4:1234', group='deploy')
+
+ pxe_options = self.ipxe_options_boot_from_iso
+
+ rendered_template = utils.render_template(
+ CONF.pxe.pxe_config_template,
+ {'pxe_options': pxe_options,
+ 'ROOT': '{{ ROOT }}'},
+ )
+
+ templ_file = 'ironic/tests/unit/drivers/' \
+ 'ipxe_config_boot_from_iso.template'
+ with open(templ_file) as f:
+ expected_template = f.read().rstrip()
+ self.assertEqual(str(expected_template), rendered_template)
+
def test_default_grub_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
@@ -645,7 +672,7 @@ class TestPXEUtils(db_base.DbTestCase):
'config'),
pxe_utils.get_pxe_config_file_path(self.node.uuid))
- def _dhcp_options_for_instance(self, ip_version=4):
+ def _dhcp_options_for_instance(self, ip_version=4, ipxe=False):
self.config(ip_version=ip_version, group='pxe')
if ip_version == 4:
self.config(tftp_server='192.0.2.1', group='pxe')
@@ -653,6 +680,10 @@ class TestPXEUtils(db_base.DbTestCase):
self.config(tftp_server='ff80::1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
self.config(tftp_root='/tftp-path/', group='pxe')
+ if ipxe:
+ bootfile = 'fake-bootfile-ipxe'
+ else:
+ bootfile = 'fake-bootfile'
if ip_version == 6:
# NOTE(TheJulia): DHCPv6 RFCs seem to indicate that the prior
@@ -660,11 +691,11 @@ class TestPXEUtils(db_base.DbTestCase):
# by vendors. The apparent proper option is to return a
# URL in the field https://tools.ietf.org/html/rfc5970#section-3
expected_info = [{'opt_name': '59',
- 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'opt_value': 'tftp://[ff80::1]/%s' % bootfile,
'ip_version': ip_version}]
elif ip_version == 4:
expected_info = [{'opt_name': '67',
- 'opt_value': 'fake-bootfile',
+ 'opt_value': bootfile,
'ip_version': ip_version},
{'opt_name': '210',
'opt_value': '/tftp-path/',
@@ -1036,6 +1067,20 @@ class PXEInterfacesTestCase(db_base.DbTestCase):
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual({}, image_info)
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
+ return_value='ramdisk')
+ def test_get_instance_image_info_boot_iso(self, boot_opt_mock):
+ self.node.instance_info = {'boot_iso': 'http://localhost/boot.iso'}
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ image_info = pxe_utils.get_instance_image_info(
+ task, ipxe_enabled=True)
+ self.assertEqual('http://localhost/boot.iso',
+ image_info['boot_iso'][0])
+
+ boot_opt_mock.assert_called_once_with(task.node)
+
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test__cache_tftp_images_master_path(self, mock_fetch_image):
temp_dir = tempfile.mkdtemp()
@@ -1155,8 +1200,6 @@ class PXEBuildConfigOptionsTestCase(db_base.DbTestCase):
ramdisk_params=None):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
- # NOTE: right '/' should be removed from url string
- self.config(api_url='http://192.168.122.184:6385', group='conductor')
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whle_dsk_img
@@ -1322,7 +1365,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
# URL in the field https://tools.ietf.org/html/rfc5970#section-3
expected_boot_script_url = 'http://[ff80::1]:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,59',
- 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'opt_value': 'tftp://[ff80::1]/%s' % boot_file,
'ip_version': ip_version},
{'opt_name': '59',
'opt_value': expected_boot_script_url,
@@ -1346,7 +1389,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
- self.assertItemsEqual(expected_info,
+ self.assertCountEqual(expected_info,
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
@@ -1354,7 +1397,7 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
if ip_version == 6:
# Boot URL variable set from prior test of isc parameters.
expected_info = [{'opt_name': 'tag:!ipxe6,59',
- 'opt_value': 'tftp://[ff80::1]/fake-bootfile',
+ 'opt_value': 'tftp://[ff80::1]/%s' % boot_file,
'ip_version': ip_version},
{'opt_name': 'tag:ipxe6,59',
'opt_value': expected_boot_script_url,
@@ -1377,29 +1420,29 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
- self.assertItemsEqual(expected_info,
+ self.assertCountEqual(expected_info,
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
def test_dhcp_options_for_instance_ipxe_bios(self):
self.config(ip_version=4, group='pxe')
- boot_file = 'fake-bootfile-bios'
- self.config(pxe_bootfile_name=boot_file, group='pxe')
+ boot_file = 'fake-bootfile-bios-ipxe'
+ self.config(ipxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_instance_ipxe_uefi(self):
self.config(ip_version=4, group='pxe')
- boot_file = 'fake-bootfile-uefi'
- self.config(uefi_pxe_bootfile_name=boot_file, group='pxe')
+ boot_file = 'fake-bootfile-uefi-ipxe'
+ self.config(uefi_ipxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_ipxe_ipv6(self):
self.config(ip_version=6, group='pxe')
- boot_file = 'fake-bootfile'
- self.config(pxe_bootfile_name=boot_file, group='pxe')
+ boot_file = 'fake-bootfile-ipxe'
+ self.config(ipxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file, ip_version=6)
@@ -1412,11 +1455,10 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
ipxe_use_swift=False,
debug=False,
boot_from_volume=False,
- mode='deploy'):
+ mode='deploy',
+ iso_boot=False):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
- # NOTE: right '/' should be removed from url string
- self.config(api_url='http://192.168.122.184:6385', group='conductor')
self.config(ipxe_timeout=ipxe_timeout, group='pxe')
root_dir = CONF.deploy.http_root
@@ -1520,6 +1562,19 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
expected_options.pop('deployment_ari_path')
expected_options.pop('initrd_filename')
+ if iso_boot:
+ self.node.instance_info = {'boot_iso': 'http://test.url/file.iso'}
+ self.node.save()
+ print(expected_options)
+ print(image_info)
+ iso_url = os.path.join(http_url, self.node.uuid, 'boot_iso')
+ expected_options.update(
+ {
+ 'boot_iso_url': iso_url
+
+ }
+ )
+
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task,
@@ -1708,6 +1763,9 @@ class iPXEBuildConfigOptionsTestCase(db_base.DbTestCase):
self._test_build_pxe_config_options_ipxe(mode='rescue',
ipxe_timeout=120)
+ def test_build_pxe_config_options_ipxe_boot_iso(self):
+ self._test_build_pxe_config_options_ipxe(iso_boot=True)
+
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test_clean_up_ipxe_config_uefi(self, unlink_mock, rmtree_mock):
diff --git a/ironic/tests/unit/common/test_raid.py b/ironic/tests/unit/common/test_raid.py
index fd1677d8a..26ef6a9c0 100644
--- a/ironic/tests/unit/common/test_raid.py
+++ b/ironic/tests/unit/common/test_raid.py
@@ -212,7 +212,8 @@ class RaidPublicMethodsTestCase(db_base.DbTestCase):
self.assertIn('foo', logical_disk_properties)
def _test_update_raid_info(self, current_config,
- capabilities=None):
+ capabilities=None,
+ skip_local_gb=False):
node = self.node
if capabilities:
properties = node.properties
@@ -231,7 +232,10 @@ class RaidPublicMethodsTestCase(db_base.DbTestCase):
if current_config['logical_disks'][0].get('is_root_volume'):
self.assertEqual({'wwn': '600508B100'},
properties['root_device'])
- self.assertEqual(100, properties['local_gb'])
+ if skip_local_gb:
+ self.assertNotIn('local_gb', properties)
+ else:
+ self.assertEqual(100, properties['local_gb'])
self.assertIn('raid_level:1', properties['capabilities'])
if capabilities:
self.assertIn(capabilities, properties['capabilities'])
@@ -267,6 +271,13 @@ class RaidPublicMethodsTestCase(db_base.DbTestCase):
self._test_update_raid_info,
current_config)
+ def test_update_raid_info_skip_MAX(self):
+ current_config = json.loads(raid_constants.CURRENT_RAID_CONFIG)
+ current_config['logical_disks'][0]['size_gb'] = 'MAX'
+ self._test_update_raid_info(current_config,
+ capabilities='boot_mode:bios',
+ skip_local_gb=True)
+
def test_filter_target_raid_config(self):
result = raid.filter_target_raid_config(self.node)
self.assertEqual(self.node.target_raid_config, result)
diff --git a/ironic/tests/unit/common/test_release_mappings.py b/ironic/tests/unit/common/test_release_mappings.py
index defd04be2..b5adfa060 100644
--- a/ironic/tests/unit/common/test_release_mappings.py
+++ b/ironic/tests/unit/common/test_release_mappings.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import versionutils
from ironic.api.controllers.v1 import versions as api_versions
diff --git a/ironic/tests/unit/common/test_rpc.py b/ironic/tests/unit/common/test_rpc.py
index 6cb25098d..1cf8fa787 100644
--- a/ironic/tests/unit/common/test_rpc.py
+++ b/ironic/tests/unit/common/test_rpc.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
import oslo_messaging as messaging
diff --git a/ironic/tests/unit/common/test_rpc_service.py b/ironic/tests/unit/common/test_rpc_service.py
index 755df4f81..f187ff7e0 100644
--- a/ironic/tests/unit/common/test_rpc_service.py
+++ b/ironic/tests/unit/common/test_rpc_service.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
import oslo_messaging
from oslo_service import service as base_service
@@ -35,13 +36,14 @@ class TestRPCService(base.TestCase):
mgr_class = "ConductorManager"
self.rpc_svc = rpc_service.RPCService(host, mgr_module, mgr_class)
+ @mock.patch.object(manager.ConductorManager, 'prepare_host', autospec=True)
@mock.patch.object(oslo_messaging, 'Target', autospec=True)
@mock.patch.object(objects_base, 'IronicObjectSerializer', autospec=True)
@mock.patch.object(rpc, 'get_server', autospec=True)
@mock.patch.object(manager.ConductorManager, 'init_host', autospec=True)
@mock.patch.object(context, 'get_admin_context', autospec=True)
def test_start(self, mock_ctx, mock_init_method,
- mock_rpc, mock_ios, mock_target):
+ mock_rpc, mock_ios, mock_target, mock_prepare_method):
mock_rpc.return_value.start = mock.MagicMock()
self.rpc_svc.handle_signal = mock.MagicMock()
self.rpc_svc.start()
@@ -49,5 +51,6 @@ class TestRPCService(base.TestCase):
mock_target.assert_called_once_with(topic=self.rpc_svc.topic,
server="fake_host")
mock_ios.assert_called_once_with(is_server=True)
+ mock_prepare_method.assert_called_once_with(self.rpc_svc.manager)
mock_init_method.assert_called_once_with(self.rpc_svc.manager,
mock_ctx.return_value)
diff --git a/ironic/tests/unit/common/test_swift.py b/ironic/tests/unit/common/test_swift.py
index cb53d16ba..44103ea69 100644
--- a/ironic/tests/unit/common/test_swift.py
+++ b/ironic/tests/unit/common/test_swift.py
@@ -15,8 +15,8 @@
import builtins
from http import client as http_client
import io
+from unittest import mock
-import mock
from oslo_config import cfg
from swiftclient import client as swift_client
from swiftclient import exceptions as swift_exception
diff --git a/ironic/tests/unit/common/test_utils.py b/ironic/tests/unit/common/test_utils.py
index 0a652435d..df60b88ca 100644
--- a/ironic/tests/unit/common/test_utils.py
+++ b/ironic/tests/unit/common/test_utils.py
@@ -19,9 +19,9 @@ import os
import os.path
import shutil
import tempfile
+from unittest import mock
import jinja2
-import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import netutils
diff --git a/ironic/tests/unit/common/test_wsgi_service.py b/ironic/tests/unit/common/test_wsgi_service.py
index a489c869b..5af26bf37 100644
--- a/ironic/tests/unit/common/test_wsgi_service.py
+++ b/ironic/tests/unit/common/test_wsgi_service.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
diff --git a/ironic/tests/unit/conductor/mgr_utils.py b/ironic/tests/unit/conductor/mgr_utils.py
index 44e492462..9baadaf42 100644
--- a/ironic/tests/unit/conductor/mgr_utils.py
+++ b/ironic/tests/unit/conductor/mgr_utils.py
@@ -17,8 +17,9 @@
"""Test utils for Ironic Managers."""
+from unittest import mock
+
from futurist import periodics
-import mock
from oslo_utils import strutils
from oslo_utils import uuidutils
@@ -142,6 +143,7 @@ class ServiceSetUpMixin(object):
self.service.init_host()
else:
with mock.patch.object(periodics, 'PeriodicWorker', autospec=True):
+ self.service.prepare_host()
self.service.init_host()
self.addCleanup(self._stop_service)
diff --git a/ironic/tests/unit/conductor/test_allocations.py b/ironic/tests/unit/conductor/test_allocations.py
index 18b6025bf..91046b72f 100644
--- a/ironic/tests/unit/conductor/test_allocations.py
+++ b/ironic/tests/unit/conductor/test_allocations.py
@@ -12,7 +12,8 @@
"""Unit tests for functionality related to allocations."""
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_utils import uuidutils
diff --git a/ironic/tests/unit/conductor/test_base_manager.py b/ironic/tests/unit/conductor/test_base_manager.py
index 49cacbff5..7680af153 100644
--- a/ironic/tests/unit/conductor/test_base_manager.py
+++ b/ironic/tests/unit/conductor/test_base_manager.py
@@ -13,13 +13,13 @@
"""Test class for Ironic BaseConductorManager."""
import collections
+from unittest import mock
import uuid
import eventlet
import futurist
from futurist import periodics
from ironic_lib import mdns
-import mock
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_utils import uuidutils
@@ -31,6 +31,7 @@ from ironic.conductor import base_manager
from ironic.conductor import manager
from ironic.conductor import notification_utils
from ironic.conductor import task_manager
+from ironic.db import api as dbapi
from ironic.drivers import fake_hardware
from ironic.drivers import generic
from ironic.drivers.modules import deploy_utils
@@ -92,7 +93,7 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
res = objects.Conductor.get_by_hostname(self.context, self.hostname)
self.assertEqual(self.hostname, res['hostname'])
- @mock.patch.object(manager.ConductorManager, 'init_host')
+ @mock.patch.object(manager.ConductorManager, 'init_host', autospec=True)
def test_stop_uninitialized_conductor(self, mock_init):
self._start_service()
self.service.del_host()
@@ -107,7 +108,8 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_def_iface.return_value = 'fake'
df = driver_factory.HardwareTypesFactory()
- with mock.patch.object(df._extension_manager, 'names') as mock_names:
+ with mock.patch.object(df._extension_manager, 'names',
+ autospec=True) as mock_names:
# verify driver names are registered
self.config(enabled_hardware_types=init_names)
mock_names.return_value = init_names
@@ -180,10 +182,12 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(periodics.is_periodic(hw_type.task))
self.assertNotIn(hw_type.task, tasks)
- @mock.patch.object(driver_factory.HardwareTypesFactory, '__init__')
+ @mock.patch.object(driver_factory.HardwareTypesFactory, '__init__',
+ autospec=True)
def test_start_fails_on_missing_driver(self, mock_df):
mock_df.side_effect = exception.DriverNotFound('test')
- with mock.patch.object(self.dbapi, 'register_conductor') as mock_reg:
+ with mock.patch.object(self.dbapi, 'register_conductor',
+ autospec=True) as mock_reg:
self.assertRaises(exception.DriverNotFound,
self.service.init_host)
self.assertTrue(mock_df.called)
@@ -195,8 +199,8 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
'options enabled_boot_interfaces',
self.service.init_host)
- @mock.patch.object(base_manager, 'LOG')
- @mock.patch.object(driver_factory, 'HardwareTypesFactory')
+ @mock.patch.object(base_manager, 'LOG', autospec=True)
+ @mock.patch.object(driver_factory, 'HardwareTypesFactory', autospec=True)
def test_start_fails_on_hw_types(self, ht_mock, log_mock):
driver_factory_mock = mock.MagicMock(names=[])
ht_mock.return_value = driver_factory_mock
@@ -205,16 +209,18 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(log_mock.error.called)
ht_mock.assert_called_once_with()
- @mock.patch.object(base_manager, 'LOG')
+ @mock.patch.object(base_manager, 'LOG', autospec=True)
@mock.patch.object(base_manager.BaseConductorManager,
- '_register_and_validate_hardware_interfaces')
- @mock.patch.object(base_manager.BaseConductorManager, 'del_host')
+ '_register_and_validate_hardware_interfaces',
+ autospec=True)
+ @mock.patch.object(base_manager.BaseConductorManager, 'del_host',
+ autospec=True)
def test_start_fails_hw_type_register(self, del_mock, reg_mock, log_mock):
reg_mock.side_effect = exception.DriverNotFound('hw-type')
self.assertRaises(exception.DriverNotFound,
self.service.init_host)
self.assertTrue(log_mock.error.called)
- del_mock.assert_called_once_with()
+ del_mock.assert_called_once()
def test_prevent_double_start(self):
self._start_service()
@@ -230,6 +236,7 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
(states.ADOPTING, states.ADOPTFAIL),
(states.RESCUING, states.RESCUEFAIL),
(states.UNRESCUING, states.UNRESCUEFAIL),
+ (states.DELETING, states.ERROR),
]
nodes = [obj_utils.create_test_node(self.context, uuid=uuid.uuid4(),
driver='fake-hardware',
@@ -242,13 +249,13 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(state[1], node.provision_state,
'Test failed when recovering from %s' % state[0])
- @mock.patch.object(base_manager, 'LOG')
+ @mock.patch.object(base_manager, 'LOG', autospec=True)
def test_warning_on_low_workers_pool(self, log_mock):
CONF.set_override('workers_pool_size', 3, 'conductor')
self._start_service()
self.assertTrue(log_mock.warning.called)
- @mock.patch.object(eventlet.greenpool.GreenPool, 'waitall')
+ @mock.patch.object(eventlet.greenpool.GreenPool, 'waitall', autospec=True)
def test_del_host_waits_on_workerpool(self, wait_mock):
self._start_service()
self.service.del_host()
@@ -294,6 +301,15 @@ class StartStopTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_zc.close.assert_called_once_with()
self.assertIsNone(self.service._zeroconf)
+ @mock.patch.object(dbapi, 'get_instance', autospec=True)
+ def test_start_dbapi_single_call(self, mock_dbapi):
+ self._start_service()
+ # NOTE(TheJulia): This seems like it should only be 1, but
+ # the hash ring initailization pulls it's own database connection
+ # instance, which is likely a good thing, thus this is 2 instead of
+ # 3 without reuse of the database connection.
+ self.assertEqual(2, mock_dbapi.call_count)
+
class CheckInterfacesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__check_enabled_interfaces_success(self):
@@ -311,9 +327,10 @@ class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
- with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
+ with mock.patch.object(self.dbapi, 'touch_conductor',
+ autospec=True) as mock_touch:
with mock.patch.object(self.service._keepalive_evt,
- 'is_set') as mock_is_set:
+ 'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, True]
self.service._conductor_service_record_keepalive()
mock_touch.assert_called_once_with(self.hostname)
@@ -322,11 +339,12 @@ class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
- with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
+ with mock.patch.object(self.dbapi, 'touch_conductor',
+ autospec=True) as mock_touch:
mock_touch.side_effect = [None, db_exception.DBConnectionError(),
None]
with mock.patch.object(self.service._keepalive_evt,
- 'is_set') as mock_is_set:
+ 'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, False, False, True]
self.service._conductor_service_record_keepalive()
self.assertEqual(3, mock_touch.call_count)
@@ -335,11 +353,12 @@ class KeepAliveTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._start_service()
# avoid wasting time at the event.wait()
CONF.set_override('heartbeat_interval', 0, 'conductor')
- with mock.patch.object(self.dbapi, 'touch_conductor') as mock_touch:
+ with mock.patch.object(self.dbapi, 'touch_conductor',
+ autospec=True) as mock_touch:
mock_touch.side_effect = [None, Exception(),
None]
with mock.patch.object(self.service._keepalive_evt,
- 'is_set') as mock_is_set:
+ 'is_set', autospec=True) as mock_is_set:
mock_is_set.side_effect = [False, False, False, True]
self.service._conductor_service_record_keepalive()
self.assertEqual(3, mock_touch.call_count)
@@ -447,7 +466,8 @@ class RegisterInterfacesTestCase(mgr_utils.ServiceSetUpMixin,
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
-@mock.patch.object(notification_utils, 'emit_console_notification')
+@mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__start_consoles(self, mock_notify, mock_start_console):
obj_utils.create_test_node(self.context,
@@ -500,7 +520,7 @@ class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock.call(mock.ANY, 'console_restore',
fields.NotificationStatus.ERROR)])
- @mock.patch.object(base_manager, 'LOG')
+ @mock.patch.object(base_manager, 'LOG', autospec=True)
def test__start_consoles_node_locked(self, log_mock, mock_notify,
mock_start_console):
test_node = obj_utils.create_test_node(self.context,
@@ -516,14 +536,15 @@ class StartConsolesTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(log_mock.warning.called)
self.assertFalse(mock_notify.called)
- @mock.patch.object(base_manager, 'LOG')
+ @mock.patch.object(base_manager, 'LOG', autospec=True)
def test__start_consoles_node_not_found(self, log_mock, mock_notify,
mock_start_console):
test_node = obj_utils.create_test_node(self.context,
driver='fake-hardware',
console_enabled=True)
self._start_service()
- with mock.patch.object(task_manager, 'acquire') as mock_acquire:
+ with mock.patch.object(task_manager, 'acquire',
+ autospec=True) as mock_acquire:
mock_acquire.side_effect = exception.NodeNotFound(node='not found')
self.service._start_consoles(self.context)
self.assertFalse(mock_start_console.called)
diff --git a/ironic/tests/unit/conductor/test_cleaning.py b/ironic/tests/unit/conductor/test_cleaning.py
index f48fee047..6ed4bd270 100644
--- a/ironic/tests/unit/conductor/test_cleaning.py
+++ b/ironic/tests/unit/conductor/test_cleaning.py
@@ -12,7 +12,8 @@
"""Tests for cleaning bits."""
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -530,11 +531,15 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
autospec=True)
def _do_next_clean_step_last_step_noop(self, mock_execute, manual=False,
- retired=False):
+ retired=False, fast_track=False):
+ if fast_track:
+ self.config(fast_track=True, group='deploy')
# Resume where last_step is the last cleaning step, should be noop
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
info = {'clean_steps': self.clean_steps,
- 'clean_step_index': len(self.clean_steps) - 1}
+ 'clean_step_index': len(self.clean_steps) - 1,
+ 'agent_url': 'test-url',
+ 'agent_secret_token': 'token'}
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
@@ -562,6 +567,15 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
self.assertNotIn('clean_step_index', node.driver_internal_info)
self.assertIsNone(node.driver_internal_info['clean_steps'])
self.assertFalse(mock_execute.called)
+ if fast_track:
+ self.assertEqual('test-url',
+ node.driver_internal_info.get('agent_url'))
+ self.assertIsNotNone(
+ node.driver_internal_info.get('agent_secret_token'))
+ else:
+ self.assertNotIn('agent_url', node.driver_internal_info)
+ self.assertNotIn('agent_secret_token',
+ node.driver_internal_info)
def test__do_next_clean_step_automated_last_step_noop(self):
self._do_next_clean_step_last_step_noop()
@@ -572,12 +586,17 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
def test__do_next_clean_step_retired_last_step_change_tgt_state(self):
self._do_next_clean_step_last_step_noop(retired=True)
+ def test__do_next_clean_step_last_step_noop_fast_track(self):
+ self._do_next_clean_step_last_step_noop(fast_track=True)
+
+ @mock.patch('ironic.drivers.utils.collect_ramdisk_logs', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
autospec=True)
def _do_next_clean_step_all(self, mock_deploy_execute,
- mock_power_execute, manual=False):
+ mock_power_execute, mock_collect_logs,
+ manual=False):
# Run all steps from start to finish (all synchronous)
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
@@ -617,6 +636,7 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
mock_deploy_execute.assert_has_calls(
[mock.call(mock.ANY, mock.ANY, self.clean_steps[0]),
mock.call(mock.ANY, mock.ANY, self.clean_steps[2])])
+ self.assertFalse(mock_collect_logs.called)
def test_do_next_clean_step_automated_all(self):
self._do_next_clean_step_all()
@@ -624,11 +644,62 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
def test_do_next_clean_step_manual_all(self):
self._do_next_clean_step_all(manual=True)
+ @mock.patch('ironic.drivers.utils.collect_ramdisk_logs', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.execute_clean_step',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
+ autospec=True)
+ def test_do_next_clean_step_collect_logs(self, mock_deploy_execute,
+ mock_power_execute,
+ mock_collect_logs):
+ CONF.set_override('deploy_logs_collect', 'always', group='agent')
+ # Run all steps from start to finish (all synchronous)
+ tgt_prov_state = states.MANAGEABLE
+
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ provision_state=states.CLEANING,
+ target_provision_state=tgt_prov_state,
+ last_error=None,
+ driver_internal_info={'clean_steps': self.clean_steps,
+ 'clean_step_index': None},
+ clean_step={})
+
+ def fake_deploy(conductor_obj, task, step):
+ driver_internal_info = task.node.driver_internal_info
+ driver_internal_info['goober'] = 'test'
+ task.node.driver_internal_info = driver_internal_info
+ task.node.save()
+
+ mock_deploy_execute.side_effect = fake_deploy
+ mock_power_execute.return_value = None
+
+ with task_manager.acquire(
+ self.context, node.uuid, shared=False) as task:
+ cleaning.do_next_clean_step(task, 0)
+
+ node.refresh()
+
+ # Cleaning should be complete
+ self.assertEqual(tgt_prov_state, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.clean_step)
+ self.assertNotIn('clean_step_index', node.driver_internal_info)
+ self.assertEqual('test', node.driver_internal_info['goober'])
+ self.assertIsNone(node.driver_internal_info['clean_steps'])
+ mock_power_execute.assert_called_once_with(mock.ANY, mock.ANY,
+ self.clean_steps[1])
+ mock_deploy_execute.assert_has_calls(
+ [mock.call(mock.ANY, mock.ANY, self.clean_steps[0]),
+ mock.call(mock.ANY, mock.ANY, self.clean_steps[2])])
+ mock_collect_logs.assert_called_once_with(mock.ANY, label='cleaning')
+
+ @mock.patch('ironic.drivers.utils.collect_ramdisk_logs', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_clean_step',
autospec=True)
@mock.patch.object(fake.FakeDeploy, 'tear_down_cleaning', autospec=True)
def _do_next_clean_step_execute_fail(self, tear_mock, mock_execute,
- manual=False):
+ mock_collect_logs, manual=False):
# When a clean step fails, go to CLEANFAIL
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
@@ -658,6 +729,7 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
self.assertTrue(node.maintenance)
mock_execute.assert_called_once_with(
mock.ANY, mock.ANY, self.clean_steps[0])
+ mock_collect_logs.assert_called_once_with(mock.ANY, label='cleaning')
def test__do_next_clean_step_automated_execute_fail(self):
self._do_next_clean_step_execute_fail()
@@ -833,8 +905,9 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
self.config(fast_track=True, group='deploy')
for info in ({'clean_steps': None, 'clean_step_index': None,
- 'agent_url': 'test-url'},
- {'clean_steps': None, 'agent_url': 'test-url'}):
+ 'agent_url': 'test-url', 'agent_secret_token': 'magic'},
+ {'clean_steps': None, 'agent_url': 'test-url',
+ 'agent_secret_token': 'it_is_a_kind_of_magic'}):
# Resume where there are no steps, should be a noop
tgt_prov_state = states.MANAGEABLE if manual else states.AVAILABLE
@@ -862,8 +935,12 @@ class DoNodeCleanTestCase(db_base.DbTestCase):
if fast_track:
self.assertEqual('test-url',
node.driver_internal_info.get('agent_url'))
+ self.assertIsNotNone(
+ node.driver_internal_info.get('agent_secret_token'))
else:
self.assertNotIn('agent_url', node.driver_internal_info)
+ self.assertNotIn('agent_secret_token',
+ node.driver_internal_info)
mock_execute.reset_mock()
def test__do_next_clean_step_automated_no_steps(self):
@@ -928,6 +1005,8 @@ class DoNodeCleanAbortTestCase(db_base.DbTestCase):
target_provision_state=states.AVAILABLE,
clean_step={'step': 'foo', 'abortable': True},
driver_internal_info={
+ 'agent_url': 'some url',
+ 'agent_secret_token': 'token',
'clean_step_index': 2,
'cleaning_reboot': True,
'cleaning_polling': True,
@@ -949,6 +1028,10 @@ class DoNodeCleanAbortTestCase(db_base.DbTestCase):
task.node.driver_internal_info)
self.assertNotIn('skip_current_clean_step',
task.node.driver_internal_info)
+ self.assertNotIn('agent_url',
+ task.node.driver_internal_info)
+ self.assertNotIn('agent_secret_token',
+ task.node.driver_internal_info)
def test__do_node_clean_abort(self):
self._test__do_node_clean_abort(None)
diff --git a/ironic/tests/unit/conductor/test_deployments.py b/ironic/tests/unit/conductor/test_deployments.py
index bbac17652..5a72dc452 100644
--- a/ironic/tests/unit/conductor/test_deployments.py
+++ b/ironic/tests/unit/conductor/test_deployments.py
@@ -13,12 +13,14 @@
"""Tests for deployment aspects of the conductor."""
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_utils import uuidutils
from ironic.common import exception
+from ironic.common import images
from ironic.common import states
from ironic.common import swift
from ironic.conductor import deployments
@@ -36,8 +38,9 @@ CONF = cfg.CONF
@mgr_utils.mock_record_keepalive
class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_node_deploy_driver_raises_prepare_error(self, mock_prepare,
mock_deploy):
self._start_service()
@@ -61,8 +64,9 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(mock_prepare.called)
self.assertFalse(mock_deploy.called)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.deploy', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_node_deploy_unexpected_prepare_error(self, mock_prepare,
mock_deploy):
self._start_service()
@@ -123,7 +127,9 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(deployments, '_store_configdrive', autospec=True)
def _test__do_node_deploy_ok(self, mock_store, configdrive=None,
- expected_configdrive=None):
+ expected_configdrive=None, fast_track=False):
+ if fast_track:
+ self.config(fast_track=True, group='deploy')
expected_configdrive = expected_configdrive or configdrive
self._start_service()
with mock.patch.object(fake.FakeDeploy,
@@ -132,7 +138,9 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.node = obj_utils.create_test_node(
self.context, driver='fake-hardware', name=None,
provision_state=states.DEPLOYING,
- target_provision_state=states.ACTIVE)
+ target_provision_state=states.ACTIVE,
+ driver_internal_info={'agent_url': 'url',
+ 'agent_secret_token': 'token'})
task = task_manager.TaskManager(self.context, self.node.uuid)
deployments.do_node_deploy(task, self.service.conductor.id,
@@ -147,6 +155,12 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
expected_configdrive)
else:
self.assertFalse(mock_store.called)
+ self.assertEqual(
+ fast_track,
+ bool(task.node.driver_internal_info.get('agent_url')))
+ self.assertEqual(
+ fast_track,
+ bool(task.node.driver_internal_info.get('agent_secret_token')))
def test__do_node_deploy_ok(self):
self._test__do_node_deploy_ok()
@@ -155,7 +169,10 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
configdrive = 'foo'
self._test__do_node_deploy_ok(configdrive=configdrive)
- @mock.patch('openstack.baremetal.configdrive.build')
+ def test__do_node_deploy_fast_track(self):
+ self._test__do_node_deploy_ok(fast_track=True)
+
+ @mock.patch('openstack.baremetal.configdrive.build', autospec=True)
def test__do_node_deploy_configdrive_as_dict(self, mock_cd):
mock_cd.return_value = 'foo'
configdrive = {'user_data': 'abcd'}
@@ -166,7 +183,7 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
user_data=b'abcd',
vendor_data=None)
- @mock.patch('openstack.baremetal.configdrive.build')
+ @mock.patch('openstack.baremetal.configdrive.build', autospec=True)
def test__do_node_deploy_configdrive_as_dict_with_meta_data(self, mock_cd):
mock_cd.return_value = 'foo'
configdrive = {'meta_data': {'uuid': uuidutils.generate_uuid(),
@@ -179,7 +196,7 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
user_data=None,
vendor_data=None)
- @mock.patch('openstack.baremetal.configdrive.build')
+ @mock.patch('openstack.baremetal.configdrive.build', autospec=True)
def test__do_node_deploy_configdrive_with_network_data(self, mock_cd):
mock_cd.return_value = 'foo'
configdrive = {'network_data': {'links': []}}
@@ -190,7 +207,7 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
user_data=None,
vendor_data=None)
- @mock.patch('openstack.baremetal.configdrive.build')
+ @mock.patch('openstack.baremetal.configdrive.build', autospec=True)
def test__do_node_deploy_configdrive_and_user_data_as_dict(self, mock_cd):
mock_cd.return_value = 'foo'
configdrive = {'user_data': {'user': 'data'}}
@@ -201,7 +218,7 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
user_data=b'{"user": "data"}',
vendor_data=None)
- @mock.patch('openstack.baremetal.configdrive.build')
+ @mock.patch('openstack.baremetal.configdrive.build', autospec=True)
def test__do_node_deploy_configdrive_with_vendor_data(self, mock_cd):
mock_cd.return_value = 'foo'
configdrive = {'vendor_data': {'foo': 'bar'}}
@@ -212,8 +229,9 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
user_data=None,
vendor_data={'foo': 'bar'})
- @mock.patch.object(swift, 'SwiftAPI')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch.object(swift, 'SwiftAPI', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_node_deploy_configdrive_swift_error(self, mock_prepare,
mock_swift):
CONF.set_override('configdrive_use_object_store', True,
@@ -235,7 +253,8 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNotNone(node.last_error)
self.assertFalse(mock_prepare.called)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_node_deploy_configdrive_db_error(self, mock_prepare):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -244,7 +263,8 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
task = task_manager.TaskManager(self.context, node.uuid)
task.node.save()
expected_instance_info = dict(node.instance_info)
- with mock.patch.object(dbapi.IMPL, 'update_node') as mock_db:
+ with mock.patch.object(dbapi.IMPL, 'update_node',
+ autospec=True) as mock_db:
db_node = self.dbapi.get_node_by_uuid(node.uuid)
mock_db.side_effect = [db_exception.DBDataError('DB error'),
db_node, db_node, db_node]
@@ -274,7 +294,8 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertFalse(mock_prepare.called)
@mock.patch.object(deployments, '_store_configdrive', autospec=True)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_node_deploy_configdrive_unexpected_error(self, mock_prepare,
mock_store):
self._start_service()
@@ -359,6 +380,41 @@ class DoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNotNone(node.last_error)
self.assertFalse(mock_deploy.called)
+ @mock.patch.object(task_manager.TaskManager, 'process_event',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.validate',
+ autospec=True)
+ @mock.patch.object(conductor_steps, 'validate_deploy_templates',
+ autospec=True)
+ @mock.patch.object(conductor_utils, 'validate_instance_info_traits',
+ autospec=True)
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
+ def test_start_deploy(self, mock_iwdi, mock_validate_traits,
+ mock_validate_templates, mock_deploy_validate,
+ mock_power_validate, mock_process_event):
+ self._start_service()
+ mock_iwdi.return_value = False
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=states.AVAILABLE,
+ target_provision_state=states.ACTIVE)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ deployments.start_deploy(task, self.service, configdrive=None,
+ event='deploy')
+ node.refresh()
+ self.assertTrue(mock_iwdi.called)
+ mock_power_validate.assert_called_once_with(task.driver.power, task)
+ mock_deploy_validate.assert_called_once_with(task.driver.deploy, task)
+ mock_validate_traits.assert_called_once_with(task.node)
+ mock_validate_templates.assert_called_once_with(
+ task, skip_missing=True)
+ mock_process_event.assert_called_with(
+ mock.ANY, 'deploy', call_args=(
+ deployments.do_node_deploy, task, 1, None),
+ callback=mock.ANY, err_handler=mock.ANY)
+
@mgr_utils.mock_record_keepalive
class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
@@ -412,6 +468,39 @@ class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
autospec=True)
+ def test__do_next_deploy_step_in_deploywait(self, mock_execute):
+ driver_internal_info = {'deploy_step_index': None,
+ 'deploy_steps': self.deploy_steps}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_internal_info=driver_internal_info,
+ deploy_step={})
+
+ def fake_execute(interface, task, step):
+ # A deploy step leaves the node in DEPLOYWAIT
+ task.process_event('wait')
+ return states.DEPLOYWAIT
+
+ mock_execute.side_effect = fake_execute
+ expected_first_step = node.driver_internal_info['deploy_steps'][0]
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, 0, self.service.conductor.id)
+
+ node.refresh()
+ self.assertIsNone(node.last_error)
+ self.assertEqual(states.DEPLOYWAIT, node.provision_state)
+ self.assertEqual(states.ACTIVE, node.target_provision_state)
+ self.assertEqual(expected_first_step, node.deploy_step)
+ self.assertEqual(0, node.driver_internal_info['deploy_step_index'])
+ self.assertEqual(self.service.conductor.id, node.conductor_affinity)
+ mock_execute.assert_called_once_with(mock.ANY, task,
+ self.deploy_steps[0])
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
def test__do_next_deploy_step_continue_from_last_step(self, mock_execute):
# Resume an in-progress deploy after the first async step
driver_internal_info = {'deploy_step_index': 0,
@@ -494,7 +583,8 @@ class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
# Run all steps from start to finish (all synchronous)
driver_internal_info = {'deploy_step_index': None,
'deploy_steps': self.deploy_steps,
- 'agent_url': 'url'}
+ 'agent_url': 'url',
+ 'agent_secret_token': 'token'}
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
@@ -517,6 +607,41 @@ class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
mock_execute.assert_has_calls = [mock.call(self.deploy_steps[0]),
mock.call(self.deploy_steps[1])]
self.assertNotIn('agent_url', node.driver_internal_info)
+ self.assertNotIn('agent_secret_token', node.driver_internal_info)
+
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
+ autospec=True)
+ def test__do_next_deploy_step_fast_track(self, mock_execute):
+ self.config(fast_track=True, group='deploy')
+ # Run all steps from start to finish (all synchronous)
+ driver_internal_info = {'deploy_step_index': None,
+ 'deploy_steps': self.deploy_steps,
+ 'agent_url': 'url',
+ 'agent_secret_token': 'token'}
+ self._start_service()
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_internal_info=driver_internal_info,
+ deploy_step={})
+ mock_execute.return_value = None
+
+ task = task_manager.TaskManager(self.context, node.uuid)
+ task.process_event('deploy')
+
+ deployments.do_next_deploy_step(task, 1, self.service.conductor.id)
+
+ # Deploying should be complete
+ node.refresh()
+ self.assertEqual(states.ACTIVE, node.provision_state)
+ self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({}, node.deploy_step)
+ self.assertNotIn('deploy_step_index', node.driver_internal_info)
+ self.assertIsNone(node.driver_internal_info['deploy_steps'])
+ mock_execute.assert_has_calls = [mock.call(self.deploy_steps[0]),
+ mock.call(self.deploy_steps[1])]
+ self.assertEqual('url', node.driver_internal_info['agent_url'])
+ self.assertEqual('token',
+ node.driver_internal_info['agent_secret_token'])
@mock.patch.object(conductor_utils, 'LOG', autospec=True)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.execute_deploy_step',
@@ -686,7 +811,7 @@ class DoNextDeployStepTestCase(mgr_utils.ServiceSetUpMixin,
mock.ANY, mock.ANY, self.deploy_steps[0])
-@mock.patch.object(swift, 'SwiftAPI')
+@mock.patch.object(swift, 'SwiftAPI', autospec=True)
class StoreConfigDriveTestCase(db_base.DbTestCase):
def setUp(self):
diff --git a/ironic/tests/unit/conductor/test_manager.py b/ironic/tests/unit/conductor/test_manager.py
index 708cf452c..52a5e03a3 100644
--- a/ironic/tests/unit/conductor/test_manager.py
+++ b/ironic/tests/unit/conductor/test_manager.py
@@ -22,10 +22,10 @@ from collections import namedtuple
import datetime
import queue
import re
+from unittest import mock
import eventlet
from futurist import waiters
-import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import uuidutils
@@ -117,7 +117,7 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
# background task's link callback.
self.assertIsNone(node.reservation)
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_change_node_power_state_node_already_locked(self,
pwr_act_mock):
# Test change_node_power_state with mocked
@@ -155,7 +155,7 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
self._start_service()
with mock.patch.object(self.service,
- '_spawn_worker') as spawn_mock:
+ '_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
@@ -234,7 +234,8 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
self.assertIsNone(node.target_power_state)
self.assertIsNone(node.last_error)
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
def test_node_set_power_state_notif_success(self, mock_notif):
# Test that successfully changing a node's power state sends the
# correct .start and .end notifications
@@ -270,7 +271,8 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
obj_fields.NotificationLevel.INFO)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
def test_node_set_power_state_notif_get_power_fail(self, mock_notif,
get_power_mock):
# Test that correct notifications are sent when changing node power
@@ -310,7 +312,8 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
obj_fields.NotificationLevel.ERROR)
@mock.patch.object(fake.FakePower, 'set_power_state', autospec=True)
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
def test_node_set_power_state_notif_set_power_fail(self, mock_notif,
set_power_mock):
# Test that correct notifications are sent when changing node power
@@ -350,7 +353,8 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
'baremetal.node.power_set.error',
obj_fields.NotificationLevel.ERROR)
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
def test_node_set_power_state_notif_spawn_fail(self, mock_notif):
# Test that failure notification is not sent when spawning the
# background conductor worker fails
@@ -364,7 +368,7 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
self._start_service()
with mock.patch.object(self.service,
- '_spawn_worker') as spawn_mock:
+ '_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
self.assertRaises(messaging.rpc.ExpectedException,
self.service.change_node_power_state,
@@ -377,7 +381,8 @@ class ChangeNodePowerStateTestCase(mgr_utils.ServiceSetUpMixin,
timeout=None)
self.assertFalse(mock_notif.called)
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
def test_node_set_power_state_notif_no_state_change(self, mock_notif):
# Test that correct notifications are sent when changing node power
# state and no state change is necessary
@@ -648,7 +653,8 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
res = objects.Node.get_by_uuid(self.context, node['uuid'])
self.assertEqual(old_instance, res['instance_uuid'])
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
+ autospec=True)
def _test_associate_node(self, power_state, mock_get_power_state):
mock_get_power_state.return_value = power_state
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -954,12 +960,57 @@ class UpdateNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNone(node.instance_uuid)
self.assertIsNone(node.allocation_id)
+ def test_update_node_maintenance_with_broken_interface(self):
+ # Updates of non-driver fields are possible with a broken driver
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ power_interface='foobar',
+ extra={'test': 'one'})
+
+ node.maintenance = True
+ res = self.service.update_node(self.context, node)
+ self.assertTrue(res.maintenance)
+
+ node.refresh()
+ self.assertTrue(node.maintenance)
+ self.assertEqual('foobar', node.power_interface)
+
+ def test_update_node_interface_field_with_broken_interface(self):
+ # Updates of driver fields are NOT possible with a broken driver,
+ # unless they're fixing the breakage.
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ power_interface='foobar',
+ deploy_interface='fake',
+ extra={'test': 'one'})
+
+ node.deploy_interface = 'iscsi'
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.update_node,
+ self.context, node)
+ self.assertEqual(exception.InterfaceNotFoundInEntrypoint,
+ exc.exc_info[0])
+
+ node.refresh()
+ self.assertEqual('foobar', node.power_interface)
+ self.assertEqual('fake', node.deploy_interface)
+
+ def test_update_node_fix_broken_interface(self):
+ # Updates of non-driver fields are possible with a broken driver
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ power_interface='foobar',
+ extra={'test': 'one'})
+
+ node.power_interface = 'fake'
+ self.service.update_node(self.context, node)
+
+ node.refresh()
+ self.assertEqual('fake', node.power_interface)
+
@mgr_utils.mock_record_keepalive
class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch.object(task_manager.TaskManager, 'upgrade_lock')
- @mock.patch.object(task_manager.TaskManager, 'spawn_after')
+ @mock.patch.object(task_manager.TaskManager, 'upgrade_lock', autospec=True)
+ @mock.patch.object(task_manager.TaskManager, 'spawn_after', autospec=True)
def test_vendor_passthru_async(self, mock_spawn,
mock_upgrade):
node = obj_utils.create_test_node(self.context,
@@ -986,8 +1037,8 @@ class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
- @mock.patch.object(task_manager.TaskManager, 'upgrade_lock')
- @mock.patch.object(task_manager.TaskManager, 'spawn_after')
+ @mock.patch.object(task_manager.TaskManager, 'upgrade_lock', autospec=True)
+ @mock.patch.object(task_manager.TaskManager, 'spawn_after', autospec=True)
def test_vendor_passthru_sync(self, mock_spawn, mock_upgrade):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'bar': 'meow'}
@@ -1012,8 +1063,8 @@ class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
- @mock.patch.object(task_manager.TaskManager, 'upgrade_lock')
- @mock.patch.object(task_manager.TaskManager, 'spawn_after')
+ @mock.patch.object(task_manager.TaskManager, 'upgrade_lock', autospec=True)
+ @mock.patch.object(task_manager.TaskManager, 'spawn_after', autospec=True)
def test_vendor_passthru_shared_lock(self, mock_spawn, mock_upgrade):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
info = {'bar': 'woof'}
@@ -1115,7 +1166,7 @@ class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._start_service()
with mock.patch.object(self.service,
- '_spawn_worker') as spawn_mock:
+ '_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
@@ -1149,8 +1200,9 @@ class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
del fake_routes['test_method']['func']
self.assertEqual(fake_routes, data)
- @mock.patch.object(driver_factory, 'get_interface')
- @mock.patch.object(manager.ConductorManager, '_spawn_worker')
+ @mock.patch.object(driver_factory, 'get_interface', autospec=True)
+ @mock.patch.object(manager.ConductorManager, '_spawn_worker',
+ autospec=True)
def test_driver_vendor_passthru_sync(self, mock_spawn, mock_get_if):
expected = {'foo': 'bar'}
vendor_mock = mock.Mock(spec=drivers_base.VendorInterface)
@@ -1323,7 +1375,7 @@ class VendorPassthruTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mgr_utils.mock_record_keepalive
-@mock.patch.object(images, 'is_whole_disk_image')
+@mock.patch.object(images, 'is_whole_disk_image', autospec=True)
class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def test_do_node_deploy_invalid_state(self, mock_iwdi):
@@ -1385,21 +1437,25 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.validate',
+ autospec=True)
def test_do_node_deploy_validate_fail(self, mock_validate, mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test_do_node_deploy_power_validate_fail(self, mock_validate,
mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
- @mock.patch.object(conductor_utils, 'validate_instance_info_traits')
+ @mock.patch.object(conductor_utils, 'validate_instance_info_traits',
+ autospec=True)
def test_do_node_deploy_traits_validate_fail(self, mock_validate,
mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
- @mock.patch.object(conductor_steps, 'validate_deploy_templates')
+ @mock.patch.object(conductor_steps, 'validate_deploy_templates',
+ autospec=True)
def test_do_node_deploy_validate_template_fail(self, mock_validate,
mock_iwdi):
self._test_do_node_deploy_validate_fail(mock_validate, mock_iwdi)
@@ -1431,7 +1487,6 @@ class ServiceDoNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
mock.ANY, None)
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
self.assertFalse(node.driver_internal_info['is_whole_disk_image'])
- self.assertNotIn('agent_url', node.driver_internal_info)
def test_do_node_deploy_rebuild_active_state_error(self, mock_iwdi):
# Tests manager.do_node_deploy() & deployments.do_next_deploy_step(),
@@ -1852,10 +1907,40 @@ class ContinueNodeDeployTestCase(mgr_utils.ServiceSetUpMixin,
deployments.do_next_deploy_step,
mock.ANY, 1, mock.ANY)
+ @mock.patch.object(conductor_steps, 'validate_deploy_templates',
+ autospec=True)
+ @mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
+ autospec=True)
+ def test_continue_node_steps_validation(self, mock_spawn, mock_validate):
+ prv_state = states.DEPLOYWAIT
+ tgt_prv_state = states.ACTIVE
+ mock_validate.side_effect = exception.InvalidParameterValue('boom')
+ driver_info = {'deploy_steps': self.deploy_steps,
+ 'deploy_step_index': 0,
+ 'steps_validated': False}
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ provision_state=prv_state,
+ target_provision_state=tgt_prv_state,
+ last_error=None,
+ driver_internal_info=driver_info,
+ deploy_step=self.deploy_steps[0])
+ self._start_service()
+ mock_spawn.reset_mock()
+ self.service.continue_node_deploy(self.context, node.uuid)
+ self._stop_service()
+ node.refresh()
+ self.assertEqual(states.DEPLOYFAIL, node.provision_state)
+ self.assertIn('Failed to validate the final deploy steps',
+ node.last_error)
+ self.assertIn('boom', node.last_error)
+ self.assertEqual(tgt_prv_state, node.target_provision_state)
+ self.assertFalse(mock_spawn.called)
+
@mgr_utils.mock_record_keepalive
class CheckTimeoutsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.clean_up')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.clean_up',
+ autospec=True)
def test__check_deploy_timeouts(self, mock_cleanup):
self._start_service()
CONF.set_override('deploy_callback_timeout', 1, group='conductor')
@@ -1871,7 +1956,7 @@ class CheckTimeoutsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.DEPLOYFAIL, node.provision_state)
self.assertEqual(states.ACTIVE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
- mock_cleanup.assert_called_once_with(mock.ANY)
+ mock_cleanup.assert_called_once_with(mock.ANY, mock.ANY)
def _check_cleanwait_timeouts(self, manual=False):
self._start_service()
@@ -1907,8 +1992,9 @@ class CheckTimeoutsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__check_cleanwait_timeouts_manual_clean(self):
self._check_cleanwait_timeouts(manual=True)
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up',
+ autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_check_rescuewait_timeouts(self, node_power_mock,
mock_clean_up):
self._start_service()
@@ -1930,7 +2016,7 @@ class CheckTimeoutsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNotNone(node.last_error)
self.assertIn('Timeout reached while waiting for rescue ramdisk',
node.last_error)
- mock_clean_up.assert_called_once_with(mock.ANY)
+ mock_clean_up.assert_called_once_with(mock.ANY, mock.ANY)
node_power_mock.assert_called_once_with(mock.ANY, states.POWER_OFF)
@@ -1959,7 +2045,8 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NodeProtected, exc.exc_info[0])
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test_do_node_tear_down_validate_fail(self, mock_validate):
# InvalidParameterValue should be re-raised as InstanceDeployFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
@@ -1973,7 +2060,8 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InstanceDeployFailure, exc.exc_info[0])
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down',
+ autospec=True)
def test_do_node_tear_down_driver_raises_error(self, mock_tear_down):
# test when driver.deploy.tear_down raises exception
node = obj_utils.create_test_node(
@@ -1995,9 +2083,10 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNotNone(node.last_error)
# Assert instance_info was erased
self.assertEqual({}, node.instance_info)
- mock_tear_down.assert_called_once_with(task)
+ mock_tear_down.assert_called_once_with(mock.ANY, task)
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console')
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console',
+ autospec=True)
def test_do_node_tear_down_console_raises_error(self, mock_console):
# test when _set_console_mode raises exception
node = obj_utils.create_test_node(
@@ -2020,14 +2109,16 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNotNone(node.last_error)
# Assert instance_info was erased
self.assertEqual({}, node.instance_info)
- mock_console.assert_called_once_with(task)
+ mock_console.assert_called_once_with(mock.ANY, task)
# TODO(TheJulia): Since we're functionally bound to neutron support
# by default, the fake drivers still invoke neutron.
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console')
- @mock.patch('ironic.common.neutron.unbind_neutron_port')
- @mock.patch('ironic.conductor.cleaning.do_node_clean')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down')
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.stop_console',
+ autospec=True)
+ @mock.patch('ironic.common.neutron.unbind_neutron_port', autospec=True)
+ @mock.patch('ironic.conductor.cleaning.do_node_clean', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down',
+ autospec=True)
def _test__do_node_tear_down_ok(self, mock_tear_down, mock_clean,
mock_unbind, mock_console,
enabled_console=False,
@@ -2042,7 +2133,7 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
instance_info={'foo': 'bar'},
console_enabled=enabled_console,
driver_internal_info={'is_whole_disk_image': False,
- 'clean_steps': {},
+ 'deploy_steps': {},
'root_uuid_or_disk_id': 'foo',
'instance': {'ephemeral_gb': 10}})
port = obj_utils.create_test_port(
@@ -2068,15 +2159,15 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNone(node.allocation_id)
self.assertEqual({}, node.instance_info)
self.assertNotIn('instance', node.driver_internal_info)
- self.assertNotIn('clean_steps', node.driver_internal_info)
+ self.assertIsNone(node.driver_internal_info['deploy_steps'])
self.assertNotIn('root_uuid_or_disk_id', node.driver_internal_info)
self.assertNotIn('is_whole_disk_image', node.driver_internal_info)
- mock_tear_down.assert_called_once_with(task)
+ mock_tear_down.assert_called_once_with(task.driver.deploy, task)
mock_clean.assert_called_once_with(task)
self.assertEqual({}, port.internal_info)
mock_unbind.assert_called_once_with('foo', context=mock.ANY)
if enabled_console:
- mock_console.assert_called_once_with(task)
+ mock_console.assert_called_once_with(task.driver.console, task)
else:
self.assertFalse(mock_console.called)
if with_allocation:
@@ -2093,9 +2184,11 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test__do_node_tear_down_with_allocation(self):
self._test__do_node_tear_down_ok(with_allocation=True)
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up')
- @mock.patch('ironic.conductor.cleaning.do_node_clean')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.clean_up',
+ autospec=True)
+ @mock.patch('ironic.conductor.cleaning.do_node_clean', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.tear_down',
+ autospec=True)
def _test_do_node_tear_down_from_state(self, init_state, is_rescue_state,
mock_tear_down, mock_clean,
mock_rescue_clean):
@@ -2115,10 +2208,10 @@ class DoNodeTearDownTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.AVAILABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
self.assertEqual({}, node.instance_info)
- mock_tear_down.assert_called_once_with(mock.ANY)
+ mock_tear_down.assert_called_once_with(mock.ANY, mock.ANY)
mock_clean.assert_called_once_with(mock.ANY)
if is_rescue_state:
- mock_rescue_clean.assert_called_once_with(mock.ANY)
+ mock_rescue_clean.assert_called_once_with(mock.ANY, mock.ANY)
else:
self.assertFalse(mock_rescue_clean.called)
@@ -2420,8 +2513,8 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertNotIn('clean_steps', node.driver_internal_info)
self.assertIsNone(node.last_error)
- @mock.patch('ironic.conductor.utils.remove_agent_url')
- @mock.patch('ironic.conductor.utils.is_fast_track')
+ @mock.patch('ironic.conductor.utils.remove_agent_url', autospec=True)
+ @mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
@@ -2508,7 +2601,7 @@ class DoNodeCleanTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_continue_node_clean_wrong_state(self, mock_spawn):
# Test the appropriate exception is raised if node isn't already
# in CLEANWAIT state
- prv_state = states.DELETING
+ prv_state = states.ACTIVE
tgt_prv_state = states.AVAILABLE
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=prv_state,
@@ -2674,11 +2767,14 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_node_rescue(self, mock_acquire):
self._start_service()
+ dii = {'agent_secret_token': 'token',
+ 'agent_url': 'http://url',
+ 'other field': 'value'}
task = self._create_task(
node_attrs=dict(driver='fake-hardware',
provision_state=states.ACTIVE,
instance_info={},
- driver_internal_info={'agent_url': 'url'}))
+ driver_internal_info=dii))
mock_acquire.side_effect = self._get_acquire_side_effect(task)
self.service.do_node_rescue(self.context, task.node.uuid,
"password")
@@ -2689,7 +2785,8 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
err_handler=conductor_utils.spawn_rescue_error_handler)
self.assertIn('rescue_password', task.node.instance_info)
self.assertIn('hashed_rescue_password', task.node.instance_info)
- self.assertNotIn('agent_url', task.node.driver_internal_info)
+ self.assertEqual({'other field': 'value'},
+ task.node.driver_internal_info)
def test_do_node_rescue_invalid_state(self):
self._start_service()
@@ -2721,15 +2818,18 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.InstanceRescueFailure, exc.exc_info[0])
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.validate',
+ autospec=True)
def test_do_node_rescue_when_rescue_validate_fail(self, mock_validate):
self._test_do_node_rescue_when_validate_fail(mock_validate)
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test_do_node_rescue_when_power_validate_fail(self, mock_validate):
self._test_do_node_rescue_when_validate_fail(mock_validate)
- @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate')
+ @mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.validate',
+ autospec=True)
def test_do_node_rescue_when_network_validate_fail(self, mock_validate):
self._test_do_node_rescue_when_validate_fail(mock_validate)
@@ -2749,7 +2849,7 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
# This is a sync operation last_error should be None.
self.assertIsNone(node.last_error)
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_returns_rescuewait(self, mock_rescue):
self._start_service()
node = obj_utils.create_test_node(
@@ -2766,7 +2866,7 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
self.assertIn('rescue_password', node.instance_info)
self.assertIn('hashed_rescue_password', node.instance_info)
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_returns_rescue(self, mock_rescue):
self._start_service()
node = obj_utils.create_test_node(
@@ -2784,8 +2884,8 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
self.assertIn('rescue_password', node.instance_info)
self.assertIn('hashed_rescue_password', node.instance_info)
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_errors(self, mock_rescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(
@@ -2807,8 +2907,8 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
self.assertTrue(node.last_error.startswith('Failed to rescue'))
self.assertTrue(mock_log.error.called)
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.rescue', autospec=True)
def test__do_node_rescue_bad_state(self, mock_rescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(
@@ -2854,7 +2954,8 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
self.context, node.uuid)
self.assertEqual(exception.InvalidStateRequested, exc.exc_info[0])
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test_do_node_unrescue_validate_fail(self, mock_validate):
# InvalidParameterValue should be re-raised as InstanceUnrescueFailure
mock_validate.side_effect = exception.InvalidParameterValue('error')
@@ -2884,22 +2985,30 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
node.refresh()
self.assertIsNone(node.last_error)
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
+ autospec=True)
def test__do_node_unrescue(self, mock_unrescue):
self._start_service()
+ dii = {'agent_url': 'http://url',
+ 'agent_secret_token': 'token',
+ 'other field': 'value'}
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
provision_state=states.UNRESCUING,
target_provision_state=states.ACTIVE,
- instance_info={})
+ instance_info={},
+ driver_internal_info=dii)
with task_manager.TaskManager(self.context, node.uuid) as task:
mock_unrescue.return_value = states.ACTIVE
self.service._do_node_unrescue(task)
node.refresh()
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
+ self.assertEqual({'other field': 'value'},
+ node.driver_internal_info)
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
+ autospec=True)
def test__do_node_unrescue_ironic_error(self, mock_unrescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -2917,8 +3026,9 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
self.assertTrue('Unable to unrescue' in node.last_error)
self.assertTrue(mock_log.error.called)
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
+ autospec=True)
def test__do_node_unrescue_other_error(self, mock_unrescue, mock_log):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -2935,7 +3045,8 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
self.assertTrue('Some failure' in node.last_error)
self.assertTrue(mock_log.exception.called)
- @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue')
+ @mock.patch('ironic.drivers.modules.fake.FakeRescue.unrescue',
+ autospec=True)
def test__do_node_unrescue_bad_state(self, mock_unrescue):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -2999,9 +3110,12 @@ class DoNodeRescueTestCase(mgr_utils.CommonMixIn, mgr_utils.ServiceSetUpMixin,
@mgr_utils.mock_record_keepalive
class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification')
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state')
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test__do_node_verify(self, mock_validate, mock_get_power_state,
mock_notif):
self._start_service()
@@ -3031,16 +3145,18 @@ class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
- mock_validate.assert_called_once_with(task)
- mock_get_power_state.assert_called_once_with(task)
+ mock_validate.assert_called_once_with(mock.ANY, task)
+ mock_get_power_state.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertIsNone(node.last_error)
self.assertEqual(states.POWER_OFF, node.power_state)
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state')
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test__do_node_verify_validation_fails(self, mock_validate,
mock_get_power_state):
self._start_service()
@@ -3060,15 +3176,17 @@ class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._stop_service()
node.refresh()
- mock_validate.assert_called_once_with(task)
+ mock_validate.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ENROLL, node.provision_state)
self.assertIsNone(node.target_provision_state)
self.assertTrue(node.last_error)
self.assertFalse(mock_get_power_state.called)
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state')
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_power_state',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test__do_node_verify_get_state_fails(self, mock_validate,
mock_get_power_state):
self._start_service()
@@ -3088,7 +3206,7 @@ class DoNodeVerifyTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._stop_service()
node.refresh()
- mock_get_power_state.assert_called_once_with(task)
+ mock_get_power_state.assert_called_once_with(mock.ANY, task)
self.assertEqual(states.ENROLL, node.provision_state)
self.assertIsNone(node.target_provision_state)
@@ -3109,7 +3227,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
'otherdriver',
''))
- @mock.patch.object(images, 'is_whole_disk_image')
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_dynamic_driver_interfaces(self, mock_iwdi):
mock_iwdi.return_value = False
target_raid_config = {'logical_disks': [{'size_gb': 1,
@@ -3135,7 +3253,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(fake.FakeDeploy, 'validate', autospec=True)
- @mock.patch.object(images, 'is_whole_disk_image')
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail(self, mock_iwdi,
mock_val):
mock_iwdi.return_value = False
@@ -3150,7 +3268,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
@mock.patch.object(fake.FakeDeploy, 'validate', autospec=True)
- @mock.patch.object(images, 'is_whole_disk_image')
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_unexpected(
self, mock_iwdi, mock_val):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
@@ -3165,15 +3283,15 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
- @mock.patch.object(images, 'is_whole_disk_image')
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_instance_traits(
self, mock_iwdi):
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
with mock.patch(
- 'ironic.conductor.utils.validate_instance_info_traits'
- ) as ii_traits:
+ 'ironic.conductor.utils.validate_instance_info_traits',
+ autospec=True) as ii_traits:
reason = 'fake reason'
ii_traits.side_effect = exception.InvalidParameterValue(reason)
ret = self.service.validate_driver_interfaces(self.context,
@@ -3182,15 +3300,15 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
self.assertEqual(reason, ret['deploy']['reason'])
mock_iwdi.assert_called_once_with(self.context, node.instance_info)
- @mock.patch.object(images, 'is_whole_disk_image')
+ @mock.patch.object(images, 'is_whole_disk_image', autospec=True)
def test_validate_driver_interfaces_validation_fail_deploy_templates(
self, mock_iwdi):
mock_iwdi.return_value = False
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
network_interface='noop')
with mock.patch(
- 'ironic.conductor.steps.validate_deploy_templates'
- ) as mock_validate:
+ 'ironic.conductor.steps.validate_deploy_templates',
+ autospec=True) as mock_validate:
reason = 'fake reason'
mock_validate.side_effect = exception.InvalidParameterValue(reason)
ret = self.service.validate_driver_interfaces(self.context,
@@ -3201,8 +3319,9 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
@mock.patch.object(manager.ConductorManager, '_fail_if_in_state',
autospec=True)
- @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
- @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test_iter_nodes(self, mock_nodeinfo_list, mock_mapped,
mock_fail_if_state):
self._start_service()
@@ -3233,7 +3352,7 @@ class MiscTestCase(mgr_utils.ServiceSetUpMixin, mgr_utils.CommonMixIn,
last_error=mock.ANY)]
mock_fail_if_state.assert_has_calls(expected_calls)
- @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test_iter_nodes_shutdown(self, mock_nodeinfo_list):
self._start_service()
self.columns = ['uuid', 'driver', 'conductor_group', 'id']
@@ -3253,7 +3372,7 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self._start_service()
with mock.patch.object(self.service,
- '_spawn_worker') as spawn_mock:
+ '_spawn_worker', autospec=True) as spawn_mock:
spawn_mock.side_effect = exception.NoFreeConductorWorker()
exc = self.assertRaises(messaging.rpc.ExpectedException,
@@ -3264,7 +3383,8 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._stop_service()
spawn_mock.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_set_console_mode_enabled(self, mock_notify):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self._start_service()
@@ -3278,7 +3398,8 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock.call(mock.ANY, 'console_set',
obj_fields.NotificationStatus.END)])
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_set_console_mode_disabled(self, mock_notify):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
@@ -3306,7 +3427,8 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_set_console_mode_start_fail(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
last_error=None,
@@ -3325,7 +3447,8 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
obj_fields.NotificationStatus.ERROR)])
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_set_console_mode_stop_fail(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
last_error=None,
@@ -3344,7 +3467,8 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
obj_fields.NotificationStatus.ERROR)])
@mock.patch.object(fake.FakeConsole, 'start_console', autospec=True)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_enable_console_already_enabled(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=True)
@@ -3355,7 +3479,8 @@ class ConsoleTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertFalse(mock_notify.called)
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_disable_console_already_disabled(self, mock_notify, mock_sc):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
console_enabled=False)
@@ -3523,7 +3648,8 @@ class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.service.destroy_node(self.context, node.uuid)
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_destroy_node_console_enabled(self, mock_notify, mock_sc):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -3540,7 +3666,8 @@ class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
obj_fields.NotificationStatus.END)])
@mock.patch.object(fake.FakeConsole, 'stop_console', autospec=True)
- @mock.patch.object(notification_utils, 'emit_console_notification')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
def test_destroy_node_console_disable_fail(self, mock_notify, mock_sc):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -3566,11 +3693,20 @@ class DestroyNodeTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.service.destroy_node(self.context, node.uuid)
self.assertFalse(mock_power.called)
+ def test_destroy_node_broken_driver(self):
+ node = obj_utils.create_test_node(self.context,
+ power_interface='broken')
+ self._start_service()
+ self.service.destroy_node(self.context, node.uuid)
+ self.assertRaises(exception.NodeNotFound,
+ self.dbapi.get_node_by_uuid,
+ node.uuid)
+
@mgr_utils.mock_record_keepalive
class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port(self, mock_validate):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.get_test_port(self.context, node_id=node.id,
@@ -3593,7 +3729,7 @@ class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port_mac_exists(self, mock_validate):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
port = obj_utils.create_test_port(self.context, node_id=node.id)
@@ -3607,7 +3743,7 @@ class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port_physnet_validation_failure_conflict(self,
mock_validate):
mock_validate.side_effect = exception.Conflict
@@ -3621,7 +3757,7 @@ class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertRaises(exception.PortNotFound, port.get_by_uuid,
self.context, port.uuid)
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_create_port_physnet_validation_failure_inconsistent(
self, mock_validate):
mock_validate.side_effect = exception.PortgroupPhysnetInconsistent(
@@ -3641,7 +3777,7 @@ class CreatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mgr_utils.mock_record_keepalive
class UpdatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'port_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_port(self, mock_val, mock_pc, mock_vpp):
@@ -3866,7 +4002,7 @@ class UpdatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
port.refresh()
self.assertEqual(old_physnet, port.physical_network)
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_update_port_physnet_validation_failure_conflict(self,
mock_validate):
mock_validate.side_effect = exception.Conflict
@@ -3881,7 +4017,7 @@ class UpdatePortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(exception.Conflict, exc.exc_info[0])
mock_validate.assert_called_once_with(mock.ANY, port)
- @mock.patch.object(conductor_utils, 'validate_port_physnet')
+ @mock.patch.object(conductor_utils, 'validate_port_physnet', autospec=True)
def test_update_port_physnet_validation_failure_inconsistent(
self, mock_validate):
mock_validate.side_effect = exception.PortgroupPhysnetInconsistent(
@@ -3930,7 +4066,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(expected_result, actual_result)
@mock.patch.object(messaging.Notifier, 'info', autospec=True)
- @mock.patch.object(task_manager, 'acquire')
+ @mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task(self, acquire_mock, notifier_mock):
nodes = queue.Queue()
for i in range(5):
@@ -3959,7 +4095,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
notifier_mock.assert_has_calls([n_call, n_call, n_call,
n_call, n_call])
- @mock.patch.object(task_manager, 'acquire')
+ @mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_shutdown(self, acquire_mock):
nodes = queue.Queue()
nodes.put_nowait(('fake_uuid', 'fake-hardware', '', None))
@@ -3967,7 +4103,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.service._shutdown = True
CONF.set_override('send_sensor_data', True, group='conductor')
self.service._sensors_nodes_task(self.context, nodes)
- acquire_mock.__enter__.assert_not_called()
+ acquire_mock.return_value.__enter__.assert_not_called()
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_send_sensor_task_no_management(self, acquire_mock):
@@ -4007,8 +4143,9 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch.object(manager.ConductorManager, '_spawn_worker',
autospec=True)
- @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
- @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test___send_sensor_data(self, get_nodeinfo_list_mock,
_mapped_to_this_conductor_mock,
mock_spawn):
@@ -4027,8 +4164,9 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
@mock.patch('ironic.conductor.manager.ConductorManager._spawn_worker',
autospec=True)
- @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
- @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+ @mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+ @mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
def test___send_sensor_data_multiple_workers(
self, get_nodeinfo_list_mock, _mapped_to_this_conductor_mock,
mock_spawn):
@@ -4051,7 +4189,7 @@ class SensorsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.call_count)
# TODO(TheJulia): At some point, we should add a test to validate that
- # that a modified filter to return all nodes actually works, although
+ # a modified filter to return all nodes actually works, although
# the way the sensor tests are written, the list is all mocked.
@@ -4268,7 +4406,7 @@ class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
mock_attach.assert_called_once_with(mock.ANY, mock.ANY, self.vif)
- @mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_raises_portgroup_physnet_inconsistent(
self, mock_attach, mock_valid):
mock_valid.side_effect = exception.PortgroupPhysnetInconsistent(
@@ -4283,7 +4421,7 @@ class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_attach.called)
- @mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_raises_vif_invalid_for_attach(
self, mock_attach, mock_valid):
mock_valid.side_effect = exception.VifInvalidForAttach(
@@ -4298,7 +4436,7 @@ class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_attach.called)
- @mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_attach', autospec=True)
def test_vif_attach_validate_error(self, mock_attach,
mock_valid):
mock_valid.side_effect = exception.MissingParameterValue("BOOM")
@@ -4311,14 +4449,14 @@ class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
self.assertFalse(mock_attach.called)
- @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach(self, mock_detach, mock_valid):
node = obj_utils.create_test_node(self.context, driver='fake-hardware')
self.service.vif_detach(self.context, node.uuid, "interface")
- mock_detach.assert_called_once_with(mock.ANY, "interface")
+ mock_detach.assert_called_once_with(mock.ANY, mock.ANY, "interface")
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
- @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach_node_locked(self, mock_detach, mock_valid):
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
reservation='fake-reserv')
@@ -4330,7 +4468,7 @@ class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertFalse(mock_detach.called)
self.assertFalse(mock_valid.called)
- @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach_raises_network_error(self, mock_detach,
mock_valid):
mock_detach.side_effect = exception.NetworkError("BOOM")
@@ -4341,9 +4479,9 @@ class VifTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# Compare true exception hidden by @messaging.expected_exceptions
self.assertEqual(exception.NetworkError, exc.exc_info[0])
mock_valid.assert_called_once_with(mock.ANY, mock.ANY)
- mock_detach.assert_called_once_with(mock.ANY, "interface")
+ mock_detach.assert_called_once_with(mock.ANY, mock.ANY, "interface")
- @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autpspec=True)
+ @mock.patch.object(n_flat.FlatNetwork, 'vif_detach', autospec=True)
def test_vif_detach_validate_error(self, mock_detach,
mock_valid):
mock_valid.side_effect = exception.MissingParameterValue("BOOM")
@@ -4427,7 +4565,7 @@ class UpdatePortgroupTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
portgroup.refresh()
self.assertEqual(old_node_id, portgroup.node_id)
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id')
+ @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_to_node_in_manageable_state(self, mock_val,
@@ -4453,7 +4591,7 @@ class UpdatePortgroupTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pgc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id')
+ @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_to_node_in_inspect_wait_state(self, mock_val,
@@ -4479,7 +4617,7 @@ class UpdatePortgroupTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pgc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id')
+ @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_to_node_in_active_state_and_maintenance(
@@ -4505,7 +4643,7 @@ class UpdatePortgroupTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_val.assert_called_once_with(mock.ANY, mock.ANY)
mock_pgc.assert_called_once_with(mock.ANY, mock.ANY, portgroup)
- @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id')
+ @mock.patch.object(dbapi.IMPL, 'get_ports_by_portgroup_id', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'portgroup_changed', autospec=True)
@mock.patch.object(n_flat.FlatNetwork, 'validate', autospec=True)
def test_update_portgroup_association_with_ports(self, mock_val,
@@ -4621,7 +4759,7 @@ class RaidHardwareTypeTestCases(RaidTestCases):
self.assertIn('manual-management', str(exc.exc_info[1]))
-@mock.patch.object(conductor_utils, 'node_power_action')
+@mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
def setUp(self):
super(ManagerDoSyncPowerStateTestCase, self).setUp()
@@ -4719,7 +4857,8 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
- @mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification',
+ autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_state_changed_no_sync_notify(self, mock_power_update, mock_notif,
node_power_action):
@@ -4778,6 +4917,21 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
self.service.power_state_sync_count[self.node.uuid])
@mock.patch.object(nova, 'power_update', autospec=True)
+ def test_no_power_sync_support(self, mock_power_update, node_power_action):
+ self.config(force_power_state_during_sync=True, group='conductor')
+ self.power.supports_power_sync.return_value = False
+
+ self._do_sync_power_state(states.POWER_ON, states.POWER_OFF)
+
+ self.assertFalse(self.power.validate.called)
+ self.power.get_power_state.assert_called_once_with(self.task)
+ self.assertFalse(node_power_action.called)
+ self.assertEqual(states.POWER_OFF, self.node.power_state)
+ self.task.upgrade_lock.assert_called_once_with()
+ mock_power_update.assert_called_once_with(
+ self.task.context, self.node.instance_uuid, states.POWER_OFF)
+
+ @mock.patch.object(nova, 'power_update', autospec=True)
def test_max_retries_exceeded(self, mock_power_update, node_power_action):
self.config(force_power_state_during_sync=True, group='conductor')
self.config(power_state_sync_max_retries=1, group='conductor')
@@ -4822,7 +4976,8 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
mock_power_update.assert_called_once_with(
self.task.context, self.node.instance_uuid, states.POWER_OFF)
- @mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeCorrectedPowerStateNotification',
+ autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_max_retries_exceeded_notify(self, mock_power_update,
mock_notif, node_power_action):
@@ -4927,10 +5082,11 @@ class ManagerDoSyncPowerStateTestCase(db_base.DbTestCase):
new=mock.MagicMock(return_value=(0, 0)))
@mock.patch.object(manager.ConductorManager, '_spawn_worker',
new=lambda self, fun, *args: fun(*args))
-@mock.patch.object(manager, 'do_sync_power_state')
-@mock.patch.object(task_manager, 'acquire')
-@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
-@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+@mock.patch.object(manager, 'do_sync_power_state', autospec=True)
+@mock.patch.object(task_manager, 'acquire', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
@@ -4950,7 +5106,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
@@ -4968,7 +5125,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -4991,7 +5149,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5013,7 +5172,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5035,7 +5195,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5056,7 +5217,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5075,7 +5237,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5094,7 +5257,8 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5102,6 +5266,28 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
shared=True)
sync_mock.assert_called_once_with(task, mock.ANY)
+ def test_single_node_adopt_failed(self, get_nodeinfo_mock,
+ mapped_mock, acquire_mock, sync_mock):
+ get_nodeinfo_mock.return_value = self._get_nodeinfo_list_response()
+ mapped_mock.return_value = True
+ task = self._create_task(
+ node_attrs=dict(uuid=self.node.uuid,
+ provision_state=states.ADOPTFAIL))
+ acquire_mock.side_effect = self._get_acquire_side_effect(task)
+
+ self.service._sync_power_states(self.context)
+
+ get_nodeinfo_mock.assert_called_once_with(
+ columns=self.columns, filters=self.filters)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
+ acquire_mock.assert_called_once_with(self.context, self.node.uuid,
+ purpose=mock.ANY,
+ shared=True)
+ sync_mock.assert_not_called()
+
def test__sync_power_state_multiple_nodes(self, get_nodeinfo_mock,
mapped_mock, acquire_mock,
sync_mock):
@@ -5141,11 +5327,11 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.return_value = (
self._get_nodeinfo_list_response(nodes))
- mapped_mock.side_effect = lambda x, y, z: mapped_map[x]
+ mapped_mock.side_effect = lambda q, x, y, z: mapped_map[x]
acquire_mock.side_effect = self._get_acquire_side_effect(tasks)
sync_mock.side_effect = sync_results
- with mock.patch.object(eventlet, 'sleep') as sleep_mock:
+ with mock.patch.object(eventlet, 'sleep', autospec=True) as sleep_mock:
self.service._sync_power_states(self.context)
# Ensure we've yielded on every iteration, except for node
# not mapped to this conductor
@@ -5153,7 +5339,7 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_calls = [mock.call(x.uuid, x.driver,
+ mapped_calls = [mock.call(self.service, x.uuid, x.driver,
x.conductor_group) for x in nodes]
self.assertEqual(mapped_calls, mapped_mock.call_args_list)
acquire_calls = [mock.call(self.context, x.uuid,
@@ -5167,9 +5353,10 @@ class ManagerSyncPowerStatesTestCase(mgr_utils.CommonMixIn,
self.assertEqual(sync_calls, sync_mock.call_args_list)
-@mock.patch.object(task_manager, 'acquire')
-@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
-@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+@mock.patch.object(task_manager, 'acquire', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
@@ -5198,7 +5385,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
@@ -5216,7 +5404,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5262,7 +5451,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5271,7 +5461,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
self.assertFalse(self.power.validate.called)
@mock.patch.object(notification_utils,
- 'emit_power_state_corrected_notification')
+ 'emit_power_state_corrected_notification',
+ autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_node_recovery_success(self, mock_power_update, notify_mock,
get_nodeinfo_mock, mapped_mock,
@@ -5286,7 +5477,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5314,7 +5506,8 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
get_nodeinfo_mock.assert_called_once_with(
columns=self.columns, filters=self.filters)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5328,9 +5521,10 @@ class ManagerPowerRecoveryTestCase(mgr_utils.CommonMixIn,
self.assertEqual('Unreachable BMC', self.node.maintenance_reason)
-@mock.patch.object(task_manager, 'acquire')
-@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
-@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+@mock.patch.object(task_manager, 'acquire', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
@@ -5364,7 +5558,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
@@ -5376,7 +5571,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
@@ -5397,8 +5593,10 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -5415,8 +5613,10 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -5434,8 +5634,10 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -5457,9 +5659,11 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- self.assertEqual([mock.call(self.node.uuid, task.node.driver,
+ self.assertEqual([mock.call(self.service,
+ self.node.uuid, task.node.driver,
task.node.conductor_group),
- mock.call(self.node2.uuid, self.node2.driver,
+ mock.call(self.service,
+ self.node2.uuid, self.node2.driver,
self.node2.conductor_group)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
@@ -5491,8 +5695,10 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to NoFreeConductorWorker
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -5519,7 +5725,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to unknown exception
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
@@ -5546,7 +5753,8 @@ class ManagerCheckDeployTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_deploy_timeouts(self.context)
# Should only have ran 2.
- self.assertEqual([mock.call(self.node.uuid, self.node.driver,
+ self.assertEqual([mock.call(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
@@ -5583,7 +5791,7 @@ class ManagerTestProperties(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
enabled_power_interfaces=['ipmitool'],
enabled_management_interfaces=['ipmitool'],
enabled_console_interfaces=['ipmitool-socat'])
- expected = ['ipmi_address', 'ipmi_terminal_port',
+ expected = ['agent_verify_ca', 'ipmi_address', 'ipmi_terminal_port',
'ipmi_password', 'ipmi_port', 'ipmi_priv_level',
'ipmi_username', 'ipmi_bridging', 'ipmi_transit_channel',
'ipmi_transit_address', 'ipmi_target_channel',
@@ -5598,7 +5806,7 @@ class ManagerTestProperties(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def test_driver_properties_snmp(self):
self.config(enabled_hardware_types='snmp',
enabled_power_interfaces=['snmp'])
- expected = ['deploy_kernel', 'deploy_ramdisk',
+ expected = ['agent_verify_ca', 'deploy_kernel', 'deploy_ramdisk',
'force_persistent_boot_device',
'rescue_kernel', 'rescue_ramdisk',
'snmp_driver', 'snmp_address', 'snmp_port', 'snmp_version',
@@ -5619,9 +5827,9 @@ class ManagerTestProperties(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
enabled_boot_interfaces=['ilo-virtual-media'],
enabled_inspect_interfaces=['ilo'],
enabled_console_interfaces=['ilo'])
- expected = ['ilo_address', 'ilo_username', 'ilo_password',
- 'client_port', 'client_timeout', 'ilo_deploy_iso',
- 'console_port', 'ilo_change_password',
+ expected = ['agent_verify_ca', 'ilo_address', 'ilo_username',
+ 'ilo_password', 'client_port', 'client_timeout',
+ 'ilo_deploy_iso', 'console_port', 'ilo_change_password',
'ca_file', 'snmp_auth_user', 'snmp_auth_prot_password',
'snmp_auth_priv_password', 'snmp_auth_protocol',
'snmp_auth_priv_protocol', 'deploy_forces_oob_reboot']
@@ -5649,15 +5857,16 @@ class ManagerTestHardwareTypeProperties(mgr_utils.ServiceSetUpMixin,
self.assertEqual(sorted(expected), sorted(properties))
def test_hardware_type_properties_manual_management(self):
- expected = ['deploy_kernel', 'deploy_ramdisk',
+ expected = ['agent_verify_ca', 'deploy_kernel', 'deploy_ramdisk',
'force_persistent_boot_device', 'deploy_forces_oob_reboot',
'rescue_kernel', 'rescue_ramdisk']
self._check_hardware_type_properties('manual-management', expected)
-@mock.patch.object(waiters, 'wait_for_all')
-@mock.patch.object(manager.ConductorManager, '_spawn_worker')
-@mock.patch.object(manager.ConductorManager, '_sync_power_state_nodes_task')
+@mock.patch.object(waiters, 'wait_for_all', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_spawn_worker', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_sync_power_state_nodes_task',
+ autospec=True)
class ParallelPowerSyncTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
def setUp(self):
@@ -5741,9 +5950,10 @@ class ParallelPowerSyncTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
queue_mock.return_value.put.assert_has_calls(expected_calls)
-@mock.patch.object(task_manager, 'acquire')
-@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
-@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+@mock.patch.object(task_manager, 'acquire', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
def setUp(self):
@@ -5776,8 +5986,9 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
- self.node.conductor_group)
+ mapped_mock.assert_called_once_with(
+ self.service, self.node.uuid, self.node.driver,
+ self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_already_mapped(self, get_nodeinfo_mock, mapped_mock,
@@ -5792,8 +6003,9 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
- self.node.conductor_group)
+ mapped_mock.assert_called_once_with(
+ self.service, self.node.uuid, self.node.driver,
+ self.node.conductor_group)
self.assertFalse(acquire_mock.called)
def test_good(self, get_nodeinfo_mock, mapped_mock, acquire_mock):
@@ -5804,8 +6016,9 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self.service._sync_local_state(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
- self.node.conductor_group)
+ mapped_mock.assert_called_once_with(
+ self.service, self.node.uuid, self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
# assert spawn_after has been called
@@ -5834,7 +6047,7 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
# assert _mapped_to_this_conductor() gets called 2 times only
# instead of 3. When NoFreeConductorWorker is raised the loop
# should be broken
- expected = [mock.call(self.node.uuid, self.node.driver,
+ expected = [mock.call(self.service, self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2
self.assertEqual(expected, mapped_mock.call_args_list)
@@ -5864,8 +6077,9 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# assert _mapped_to_this_conductor() gets called 3 times
- expected = [mock.call(self.node.uuid, self.node.driver,
- self.node.conductor_group)] * 3
+ expected = [mock.call(
+ self.service, self.node.uuid, self.node.driver,
+ self.node.conductor_group)] * 3
self.assertEqual(expected, mapped_mock.call_args_list)
# assert acquire() gets called 3 times
@@ -5896,8 +6110,9 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
# assert _mapped_to_this_conductor() gets called only once
# because of the worker limit
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
- self.node.conductor_group)
+ mapped_mock.assert_called_once_with(
+ self.service, self.node.uuid, self.node.driver,
+ self.node.conductor_group)
# assert acquire() gets called only once because of the worker limit
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
@@ -5912,7 +6127,8 @@ class ManagerSyncLocalStateTestCase(mgr_utils.CommonMixIn, db_base.DbTestCase):
@mgr_utils.mock_record_keepalive
class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
def test_inspect_hardware_ok(self, mock_inspect):
self._start_service()
node = obj_utils.create_test_node(
@@ -5926,11 +6142,12 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.MANAGEABLE, node.provision_state)
self.assertEqual(states.NOSTATE, node.target_provision_state)
self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(mock.ANY)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
task.node.refresh()
self.assertNotIn('agent_url', task.node.driver_internal_info)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
def test_inspect_hardware_return_inspecting(self, mock_inspect):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -5944,9 +6161,10 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIn('driver returned unexpected state', node.last_error)
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
- mock_inspect.assert_called_once_with(mock.ANY)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
def test_inspect_hardware_return_inspect_wait(self, mock_inspect):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -5958,10 +6176,11 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.INSPECTWAIT, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNone(node.last_error)
- mock_inspect.assert_called_once_with(mock.ANY)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
def test_inspect_hardware_return_other_state(self, mock_inspect, log_mock):
self._start_service()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
@@ -5974,7 +6193,7 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.INSPECTFAIL, node.provision_state)
self.assertEqual(states.MANAGEABLE, node.target_provision_state)
self.assertIsNotNone(node.last_error)
- mock_inspect.assert_called_once_with(mock.ANY)
+ mock_inspect.assert_called_once_with(task.driver.inspect, task)
self.assertTrue(log_mock.error.called)
def test__check_inspect_wait_timeouts(self):
@@ -6044,15 +6263,18 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
# Verify reservation has been cleared.
self.assertIsNone(node.reservation)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.validate',
+ autospec=True)
def test_inspect_hardware_validate_fail(self, mock_validate):
self._test_inspect_hardware_validate_fail(mock_validate)
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
def test_inspect_hardware_power_validate_fail(self, mock_validate):
self._test_inspect_hardware_validate_fail(mock_validate)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
def test_inspect_hardware_raises_error(self, mock_inspect):
self._start_service()
mock_inspect.side_effect = exception.HardwareInspectionFailure('test')
@@ -6070,7 +6292,8 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual('test', node.last_error)
self.assertTrue(mock_inspect.called)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.inspect_hardware',
+ autospec=True)
def test_inspect_hardware_unexpected_error(self, mock_inspect):
self._start_service()
mock_inspect.side_effect = RuntimeError('x')
@@ -6091,9 +6314,10 @@ class NodeInspectHardware(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertTrue(mock_inspect.called)
-@mock.patch.object(task_manager, 'acquire')
-@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
-@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list')
+@mock.patch.object(task_manager, 'acquire', autospec=True)
+@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+@mock.patch.object(dbapi.IMPL, 'get_nodeinfo_list', autospec=True)
class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
db_base.DbTestCase):
def setUp(self):
@@ -6130,7 +6354,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)
self.assertFalse(acquire_mock.called)
@@ -6143,7 +6368,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid, self.node.driver,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context, self.node.uuid,
purpose=mock.ANY)
@@ -6161,7 +6387,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
@@ -6182,7 +6409,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(self.node.uuid,
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
self.node.driver,
self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
@@ -6204,8 +6432,10 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -6227,9 +6457,11 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
- self.assertEqual([mock.call(self.node.uuid, task.node.driver,
+ self.assertEqual([mock.call(self.service,
+ self.node.uuid, task.node.driver,
task.node.conductor_group),
- mock.call(self.node2.uuid, self.node2.driver,
+ mock.call(self.service,
+ self.node2.uuid, self.node2.driver,
self.node2.conductor_group)],
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
@@ -6256,8 +6488,10 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to NoFreeConductorWorker
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -6279,8 +6513,10 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self._assert_get_nodeinfo_args(get_nodeinfo_mock)
# mapped should be only called for the first node as we should
# have exited the loop early due to unknown exception
- mapped_mock.assert_called_once_with(
- self.node.uuid, self.node.driver, self.node.conductor_group)
+ mapped_mock.assert_called_once_with(self.service,
+ self.node.uuid,
+ self.node.driver,
+ self.node.conductor_group)
acquire_mock.assert_called_once_with(self.context,
self.node.uuid,
purpose=mock.ANY)
@@ -6302,7 +6538,8 @@ class ManagerCheckInspectWaitTimeoutsTestCase(mgr_utils.CommonMixIn,
self.service._check_inspect_wait_timeouts(self.context)
# Should only have ran 2.
- self.assertEqual([mock.call(self.node.uuid, self.node.driver,
+ self.assertEqual([mock.call(self.service,
+ self.node.uuid, self.node.driver,
self.node.conductor_group)] * 2,
mapped_mock.call_args_list)
self.assertEqual([mock.call(self.context, self.node.uuid,
@@ -6348,15 +6585,27 @@ class DestroyPortTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.context, port)
self.assertEqual(exception.InvalidState, exc.exc_info[0])
- def test_destroy_port_node_active_and_maintenance(self):
+ def test_destroy_port_node_active_and_maintenance_vif_present(self):
+ instance_uuid = uuidutils.generate_uuid()
+ node = obj_utils.create_test_node(self.context, driver='fake-hardware',
+ instance_uuid=instance_uuid,
+ provision_state='active',
+ maintenance=True)
+ port = obj_utils.create_test_port(
+ self.context,
+ node_id=node.id,
+ internal_info={'tenant_vif_port_id': 'fake-id'})
+ self.service.destroy_port(self.context, port)
+ self.assertRaises(exception.PortNotFound, port.refresh)
+
+ def test_destroy_port_node_active_and_maintenance_no_vif(self):
instance_uuid = uuidutils.generate_uuid()
node = obj_utils.create_test_node(self.context, driver='fake-hardware',
instance_uuid=instance_uuid,
provision_state='active',
maintenance=True)
port = obj_utils.create_test_port(self.context,
- node_id=node.id,
- extra={'vif_port_id': 'fake-id'})
+ node_id=node.id)
self.service.destroy_port(self.context, port)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_uuid,
@@ -6424,9 +6673,11 @@ class DestroyPortgroupTestCase(mgr_utils.ServiceSetUpMixin,
@mgr_utils.mock_record_keepalive
-@mock.patch.object(manager.ConductorManager, '_fail_if_in_state')
-@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor')
-@mock.patch.object(dbapi.IMPL, 'get_offline_conductors')
+@mock.patch.object(manager.ConductorManager, '_fail_if_in_state',
+ autospec=True)
+@mock.patch.object(manager.ConductorManager, '_mapped_to_this_conductor',
+ autospec=True)
+@mock.patch.object(dbapi.IMPL, 'get_offline_conductors', autospec=True)
class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
def setUp(self):
@@ -6455,9 +6706,10 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
- mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
- '')
+ mock_mapped.assert_called_once_with(
+ self.service, self.node.uuid, 'fake-hardware', '')
mock_fail_if.assert_called_once_with(
+ self.service,
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
'provision_updated_at',
@@ -6478,9 +6730,10 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
- mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
- '')
+ mock_mapped.assert_called_once_with(
+ self.service, self.node.uuid, 'fake-hardware', '')
mock_fail_if.assert_called_once_with(
+ self.service,
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
'provision_updated_at',
@@ -6504,7 +6757,7 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
# assert node still locked
self.assertIsNotNone(self.node.reservation)
- @mock.patch.object(objects.Node, 'release')
+ @mock.patch.object(objects.Node, 'release', autospec=True)
def test__check_orphan_nodes_release_exceptions_skipping(
self, mock_release, mock_off_cond, mock_mapped, mock_fail_if):
mock_off_cond.return_value = ['fake-conductor']
@@ -6522,8 +6775,10 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
- expected_calls = [mock.call(self.node.uuid, 'fake-hardware', ''),
- mock.call(node2.uuid, 'fake-hardware', '')]
+ expected_calls = [
+ mock.call(self.service, self.node.uuid, 'fake-hardware', ''),
+ mock.call(self.service, node2.uuid, 'fake-hardware', '')
+ ]
mock_mapped.assert_has_calls(expected_calls)
# Assert we skipped and didn't try to call _fail_if_in_state
self.assertFalse(mock_fail_if.called)
@@ -6544,15 +6799,17 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
mock_off_cond.return_value = ['fake-conductor']
mock_mapped.return_value = True
with mock.patch.object(objects.Node, 'release',
- side_effect=_fake_release) as mock_release:
+ side_effect=_fake_release,
+ autospec=True) as mock_release:
self.service._check_orphan_nodes(self.context)
mock_release.assert_called_with(self.context, mock.ANY,
self.node.id)
mock_off_cond.assert_called_once_with()
- mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
- '')
+ mock_mapped.assert_called_once_with(
+ self.service, self.node.uuid, 'fake-hardware', '')
mock_fail_if.assert_called_once_with(
+ self.service,
mock.ANY, {'uuid': self.node.uuid},
{states.DEPLOYING, states.CLEANING},
'provision_updated_at',
@@ -6569,8 +6826,8 @@ class ManagerCheckOrphanNodesTestCase(mgr_utils.ServiceSetUpMixin,
self.node.refresh()
mock_off_cond.assert_called_once_with()
- mock_mapped.assert_called_once_with(self.node.uuid, 'fake-hardware',
- '')
+ mock_mapped.assert_called_once_with(
+ self.service, self.node.uuid, 'fake-hardware', '')
# assert node was released
self.assertIsNone(self.node.reservation)
# not changing states in maintenance
@@ -6683,9 +6940,12 @@ class TestIndirectionApiConductor(db_base.DbTestCase):
@mgr_utils.mock_record_keepalive
class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_takeover(self, mock_prepare, mock_take_over,
mock_start_console):
self._start_service()
@@ -6696,14 +6956,18 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertIsNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY)
- mock_take_over.assert_called_once_with(mock.ANY)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
- @mock.patch.object(notification_utils, 'emit_console_notification')
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_takeover_with_console_enabled(self, mock_prepare,
mock_take_over,
mock_start_console,
@@ -6717,19 +6981,23 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertIsNone(node.last_error)
self.assertTrue(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY)
- mock_take_over.assert_called_once_with(mock.ANY)
- mock_start_console.assert_called_once_with(mock.ANY)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
+ mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
mock.call(task, 'console_restore',
obj_fields.NotificationStatus.END)])
- @mock.patch.object(notification_utils, 'emit_console_notification')
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_takeover_with_console_exception(self, mock_prepare,
mock_take_over,
mock_start_console,
@@ -6744,19 +7012,23 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
node.refresh()
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY)
- mock_take_over.assert_called_once_with(mock.ANY)
- mock_start_console.assert_called_once_with(mock.ANY)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
+ mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
mock.call(task, 'console_restore',
obj_fields.NotificationStatus.ERROR)])
- @mock.patch.object(notification_utils, 'emit_console_notification')
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch.object(notification_utils, 'emit_console_notification',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_takeover_with_console_port_cleaned(self, mock_prepare,
mock_take_over,
mock_start_console,
@@ -6778,9 +7050,9 @@ class DoNodeTakeOverTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertIsNone(
node.driver_internal_info.get('allocated_ipmi_terminal_port',
None))
- mock_prepare.assert_called_once_with(mock.ANY)
- mock_take_over.assert_called_once_with(mock.ANY)
- mock_start_console.assert_called_once_with(mock.ANY)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
+ mock_start_console.assert_called_once_with(task.driver.console, task)
mock_notify.assert_has_calls(
[mock.call(task, 'console_restore',
obj_fields.NotificationStatus.START),
@@ -6795,11 +7067,15 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
func(*args, **kwargs)
return mock.MagicMock()
- @mock.patch('ironic.drivers.modules.fake.FakePower.validate')
- @mock.patch('ironic.drivers.modules.fake.FakeBoot.validate')
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakePower.validate',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_adoption_with_takeover(self,
mock_prepare,
mock_take_over,
@@ -6819,16 +7095,19 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.ACTIVE, node.provision_state)
self.assertIsNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY)
- mock_take_over.assert_called_once_with(mock.ANY)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
self.assertIn('is_whole_disk_image', task.node.driver_internal_info)
- @mock.patch('ironic.drivers.modules.fake.FakeBoot.validate')
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_adoption_take_over_failure(self,
mock_prepare,
mock_take_over,
@@ -6843,7 +7122,13 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self._start_service()
node = obj_utils.create_test_node(
self.context, driver='fake-hardware',
- provision_state=states.ADOPTING)
+ provision_state=states.ADOPTING,
+ power_state=states.POWER_ON)
+ # NOTE(TheJulia): When nodes are created for adoption, they
+ # would have no power state. Under normal circumstances
+ # during validate the node object is updated with power state
+ # however we need to make sure that we wipe preserved state
+ # as part of failure handling.
task = task_manager.TaskManager(self.context, node.uuid)
self.service._do_adoption(task)
@@ -6852,16 +7137,20 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(states.ADOPTFAIL, node.provision_state)
self.assertIsNotNone(node.last_error)
self.assertFalse(node.console_enabled)
- mock_prepare.assert_called_once_with(mock.ANY)
- mock_take_over.assert_called_once_with(mock.ANY)
+ mock_prepare.assert_called_once_with(task.driver.deploy, task)
+ mock_take_over.assert_called_once_with(task.driver.deploy, task)
self.assertFalse(mock_start_console.called)
self.assertTrue(mock_boot_validate.called)
self.assertIn('is_whole_disk_image', task.node.driver_internal_info)
+ self.assertEqual(states.NOSTATE, node.power_state)
- @mock.patch('ironic.drivers.modules.fake.FakeBoot.validate')
- @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over')
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare')
+ @mock.patch('ironic.drivers.modules.fake.FakeBoot.validate', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeConsole.start_console',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.take_over',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.prepare',
+ autospec=True)
def test__do_adoption_boot_validate_failure(self,
mock_prepare,
mock_take_over,
@@ -7031,9 +7320,10 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
- self.assertRaises(
- exception.InvalidParameterValue, self.service.heartbeat,
+ exc = self.assertRaises(
+ messaging.rpc.ExpectedException, self.service.heartbeat,
self.context, node.uuid, 'http://callback', agent_token=None)
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
@@ -7106,10 +7396,11 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
- self.assertRaises(exception.InvalidParameterValue,
- self.service.heartbeat, self.context,
- node.uuid, 'http://callback',
- agent_token='evil', agent_version='5.0.0b23')
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.heartbeat, self.context,
+ node.uuid, 'http://callback',
+ agent_token='evil', agent_version='5.0.0b23')
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
@@ -7133,10 +7424,11 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
# Intentionally sending an older client in case something fishy
# occurs.
- self.assertRaises(exception.InvalidParameterValue,
- self.service.heartbeat, self.context,
- node.uuid, 'http://callback',
- agent_token='evil', agent_version='4.0.0')
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.heartbeat, self.context,
+ node.uuid, 'http://callback',
+ agent_token='evil', agent_version='4.0.0')
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@mock.patch('ironic.drivers.modules.fake.FakeDeploy.heartbeat',
@@ -7158,10 +7450,11 @@ class DoNodeAdoptionTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
mock_spawn.side_effect = self._fake_spawn
- self.assertRaises(exception.InvalidParameterValue,
- self.service.heartbeat, self.context,
- node.uuid, 'http://callback',
- agent_token=None, agent_version='6.1.5')
+ exc = self.assertRaises(messaging.rpc.ExpectedException,
+ self.service.heartbeat, self.context,
+ node.uuid, 'http://callback',
+ agent_token=None, agent_version='6.1.5')
+ self.assertEqual(exception.InvalidParameterValue, exc.exc_info[0])
self.assertFalse(mock_heartbeat.called)
@@ -7424,7 +7717,8 @@ class UpdateVolumeTargetTestCase(mgr_utils.ServiceSetUpMixin,
self.context, node_id=node.id, extra={'vol_id': 'fake-id'})
new_volume_type = 'fibre_channel'
volume_target.volume_type = new_volume_type
- with mock.patch.object(objects.VolumeTarget, 'save') as mock_save:
+ with mock.patch.object(objects.VolumeTarget, 'save',
+ autospec=True) as mock_save:
mock_save.side_effect = expected_exc('Boo')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.update_volume_target,
@@ -7484,7 +7778,8 @@ class NodeTraitsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
self.assertEqual(self.traits[1:], [trait.trait for trait in traits])
def _test_add_node_traits_exception(self, expected_exc):
- with mock.patch.object(objects.Trait, 'create') as mock_create:
+ with mock.patch.object(objects.Trait, 'create',
+ autospec=True) as mock_create:
mock_create.side_effect = expected_exc('Boo')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.add_node_traits, self.context,
@@ -7529,7 +7824,8 @@ class NodeTraitsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
def _test_remove_node_traits_exception(self, expected_exc):
objects.TraitList.create(self.context, self.node.id, self.traits)
- with mock.patch.object(objects.Trait, 'destroy') as mock_destroy:
+ with mock.patch.object(objects.Trait, 'destroy',
+ autospec=True) as mock_destroy:
mock_destroy.side_effect = expected_exc('Boo')
exc = self.assertRaises(messaging.rpc.ExpectedException,
self.service.remove_node_traits,
@@ -7553,8 +7849,8 @@ class NodeTraitsTestCase(mgr_utils.ServiceSetUpMixin, db_base.DbTestCase):
class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
mgr_utils.ServiceSetUpMixin,
db_base.DbTestCase):
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.abort')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_not_support(self, mock_acquire,
mock_abort, mock_log):
@@ -7574,8 +7870,8 @@ class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
exc.exc_info[0])
self.assertTrue(mock_log.error.called)
- @mock.patch.object(manager, 'LOG')
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.abort')
+ @mock.patch.object(manager, 'LOG', autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_interface_return_failed(self, mock_acquire,
mock_abort, mock_log):
@@ -7594,7 +7890,7 @@ class DoNodeInspectAbortTestCase(mgr_utils.CommonMixIn,
self.assertTrue(mock_log.exception.called)
self.assertIn('Failed to abort inspection.', node.last_error)
- @mock.patch('ironic.drivers.modules.fake.FakeInspect.abort')
+ @mock.patch('ironic.drivers.modules.fake.FakeInspect.abort', autospec=True)
@mock.patch('ironic.conductor.task_manager.acquire', autospec=True)
def test_do_inspect_abort_succeeded(self, mock_acquire, mock_abort):
self._start_service()
diff --git a/ironic/tests/unit/conductor/test_notification_utils.py b/ironic/tests/unit/conductor/test_notification_utils.py
index 8ef8780d3..dc3e1c43d 100644
--- a/ironic/tests/unit/conductor/test_notification_utils.py
+++ b/ironic/tests/unit/conductor/test_notification_utils.py
@@ -15,7 +15,8 @@
"""Test class for ironic-conductor notification utilities."""
-import mock
+from unittest import mock
+
from oslo_versionedobjects.exception import VersionedObjectsException
from ironic.common import exception
@@ -39,7 +40,8 @@ class TestNotificationUtils(db_base.DbTestCase):
'upgrade_lock', 'shared'])
self.task.node = self.node
- @mock.patch.object(notif_utils, '_emit_conductor_node_notification')
+ @mock.patch.object(notif_utils, '_emit_conductor_node_notification',
+ autospec=True)
def test_emit_power_state_corrected_notification(self, mock_cond_emit):
notif_utils.emit_power_state_corrected_notification(
self.task, states.POWER_ON)
@@ -53,7 +55,8 @@ class TestNotificationUtils(db_base.DbTestCase):
from_power=states.POWER_ON
)
- @mock.patch.object(notif_utils, '_emit_conductor_node_notification')
+ @mock.patch.object(notif_utils, '_emit_conductor_node_notification',
+ autospec=True)
def test_emit_power_set_notification(self, mock_cond_emit):
notif_utils.emit_power_set_notification(
self.task,
@@ -70,7 +73,8 @@ class TestNotificationUtils(db_base.DbTestCase):
to_power=states.POWER_ON
)
- @mock.patch.object(notif_utils, '_emit_conductor_node_notification')
+ @mock.patch.object(notif_utils, '_emit_conductor_node_notification',
+ autospec=True)
def test_emit_console_notification(self, mock_cond_emit):
notif_utils.emit_console_notification(
self.task, 'console_set', fields.NotificationStatus.END)
@@ -83,7 +87,8 @@ class TestNotificationUtils(db_base.DbTestCase):
fields.NotificationStatus.END,
)
- @mock.patch.object(notif_utils, '_emit_conductor_node_notification')
+ @mock.patch.object(notif_utils, '_emit_conductor_node_notification',
+ autospec=True)
def test_emit_console_notification_error_status(self, mock_cond_emit):
notif_utils.emit_console_notification(
self.task, 'console_set', fields.NotificationStatus.ERROR)
@@ -96,7 +101,7 @@ class TestNotificationUtils(db_base.DbTestCase):
fields.NotificationStatus.ERROR,
)
- @mock.patch.object(notification, 'mask_secrets')
+ @mock.patch.object(notification, 'mask_secrets', autospec=True)
def test__emit_conductor_node_notification(self, mock_secrets):
mock_notify_method = mock.Mock()
# Required for exception handling
@@ -151,7 +156,7 @@ class TestNotificationUtils(db_base.DbTestCase):
self.assertFalse(mock_notify_method.called)
- @mock.patch.object(notification, 'mask_secrets')
+ @mock.patch.object(notification, 'mask_secrets', autospec=True)
def test__emit_conductor_node_notification_known_notify_exc(self,
mock_secrets):
"""Test exception caught for a known notification exception."""
@@ -178,7 +183,8 @@ class TestNotificationUtils(db_base.DbTestCase):
class ProvisionNotifyTestCase(tests_base.TestCase):
- @mock.patch('ironic.objects.node.NodeSetProvisionStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetProvisionStateNotification',
+ autospec=True)
def test_emit_notification(self, provision_mock):
provision_mock.__name__ = 'NodeSetProvisionStateNotification'
self.config(host='fake-host')
diff --git a/ironic/tests/unit/conductor/test_rpcapi.py b/ironic/tests/unit/conductor/test_rpcapi.py
index a4d327398..a3529b2d6 100644
--- a/ironic/tests/unit/conductor/test_rpcapi.py
+++ b/ironic/tests/unit/conductor/test_rpcapi.py
@@ -19,8 +19,8 @@ Unit Tests for :py:class:`ironic.conductor.rpcapi.ConductorAPI`.
"""
import copy
+from unittest import mock
-import mock
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging import _utils as messaging_utils
@@ -48,14 +48,15 @@ class ConductorRPCAPITestCase(tests_base.TestCase):
conductor_manager.ConductorManager.RPC_API_VERSION,
conductor_rpcapi.ConductorAPI.RPC_API_VERSION)
- @mock.patch('ironic.common.rpc.get_client')
+ @mock.patch('ironic.common.rpc.get_client', autospec=True)
def test_version_cap(self, mock_get_client):
conductor_rpcapi.ConductorAPI()
self.assertEqual(conductor_rpcapi.ConductorAPI.RPC_API_VERSION,
mock_get_client.call_args[1]['version_cap'])
- @mock.patch('ironic.common.release_mappings.RELEASE_MAPPING')
- @mock.patch('ironic.common.rpc.get_client')
+ @mock.patch('ironic.common.release_mappings.RELEASE_MAPPING',
+ autospec=True)
+ @mock.patch('ironic.common.rpc.get_client', autospec=True)
def test_version_capped(self, mock_get_client, mock_release_mapping):
CONF.set_override('pin_release_version',
release_mappings.RELEASE_VERSIONS[0])
@@ -187,7 +188,8 @@ class RPCAPITestCase(db_base.DbTestCase):
def _test_can_send_create_port(self, can_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
with mock.patch.object(rpcapi.client,
- "can_send_version") as mock_can_send_version:
+ "can_send_version",
+ autospec=True) as mock_can_send_version:
mock_can_send_version.return_value = can_send
result = rpcapi.can_send_create_port()
self.assertEqual(can_send, result)
@@ -233,13 +235,16 @@ class RPCAPITestCase(db_base.DbTestCase):
return expected_retval
with mock.patch.object(rpcapi.client,
- "can_send_version") as mock_can_send_version:
+ "can_send_version",
+ autospec=True) as mock_can_send_version:
mock_can_send_version.side_effect = _fake_can_send_version_method
- with mock.patch.object(rpcapi.client, "prepare") as mock_prepared:
+ with mock.patch.object(rpcapi.client, "prepare",
+ autospec=True) as mock_prepared:
mock_prepared.side_effect = _fake_prepare_method
with mock.patch.object(rpcapi.client,
- rpc_method) as mock_method:
+ rpc_method,
+ autospec=True) as mock_method:
mock_method.side_effect = _fake_rpc_method
retval = getattr(rpcapi, method)(self.context, **kwargs)
self.assertEqual(retval, expected_retval)
@@ -598,7 +603,8 @@ class RPCAPITestCase(db_base.DbTestCase):
def _test_can_send_rescue(self, can_send):
rpcapi = conductor_rpcapi.ConductorAPI(topic='fake-topic')
with mock.patch.object(rpcapi.client,
- "can_send_version") as mock_can_send_version:
+ "can_send_version",
+ autospec=True) as mock_can_send_version:
mock_can_send_version.return_value = can_send
result = rpcapi.can_send_rescue()
self.assertEqual(can_send, result)
diff --git a/ironic/tests/unit/conductor/test_steps.py b/ironic/tests/unit/conductor/test_steps.py
index cc7c8af47..845d639d4 100644
--- a/ironic/tests/unit/conductor/test_steps.py
+++ b/ironic/tests/unit/conductor/test_steps.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -125,7 +126,7 @@ class NodeDeployStepsTestCase(db_base.DbTestCase):
mock_power_steps.assert_called_once_with(mock.ANY, task)
mock_deploy_steps.assert_called_once_with(mock.ANY, task)
- @mock.patch.object(objects.DeployTemplate, 'list_by_names')
+ @mock.patch.object(objects.DeployTemplate, 'list_by_names', autospec=True)
def test__get_deployment_templates_no_traits(self, mock_list):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
@@ -133,7 +134,8 @@ class NodeDeployStepsTestCase(db_base.DbTestCase):
self.assertEqual([], templates)
self.assertFalse(mock_list.called)
- @mock.patch.object(objects.DeployTemplate, 'list_by_names')
+ @mock.patch.object(objects.DeployTemplate, 'list_by_names',
+ autospec=True)
def test__get_deployment_templates(self, mock_list):
traits = ['CUSTOM_DT1', 'CUSTOM_DT2']
node = obj_utils.create_test_node(
@@ -504,8 +506,10 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeBIOS.get_clean_steps',
lambda self, task: [])
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps',
+ autospec=True)
def test__get_cleaning_steps(self, mock_power_steps, mock_deploy_steps):
# Test getting cleaning steps, with one driver returning None, two
# conflicting priorities, and asserting they are ordered properly.
@@ -526,8 +530,10 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
@mock.patch('ironic.drivers.modules.fake.FakeBIOS.get_clean_steps',
lambda self, task: [])
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps',
+ autospec=True)
def test__get_cleaning_steps_unsorted(self, mock_power_steps,
mock_deploy_steps):
node = obj_utils.create_test_node(
@@ -544,8 +550,10 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
sort=False)
self.assertEqual(mock_deploy_steps.return_value, steps)
- @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps')
- @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps')
+ @mock.patch('ironic.drivers.modules.fake.FakeDeploy.get_clean_steps',
+ autospec=True)
+ @mock.patch('ironic.drivers.modules.fake.FakePower.get_clean_steps',
+ autospec=True)
def test__get_cleaning_steps_only_enabled(self, mock_power_steps,
mock_deploy_steps):
# Test getting only cleaning steps, with one driver returning None, two
@@ -567,8 +575,9 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
self.assertEqual(self.clean_steps, steps)
- @mock.patch.object(conductor_steps, '_validate_user_clean_steps')
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_validate_user_clean_steps',
+ autospec=True)
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test_set_node_cleaning_steps_automated(self, mock_steps,
mock_validate_user_steps):
mock_steps.return_value = self.clean_steps
@@ -590,8 +599,9 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
mock_steps.assert_called_once_with(task, enabled=True)
self.assertFalse(mock_validate_user_steps.called)
- @mock.patch.object(conductor_steps, '_validate_user_clean_steps')
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_validate_user_clean_steps',
+ autospec=True)
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test_set_node_cleaning_steps_manual(self, mock_steps,
mock_validate_user_steps):
clean_steps = [self.deploy_raid]
@@ -616,7 +626,7 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
self.assertFalse(mock_steps.called)
mock_validate_user_steps.assert_called_once_with(task, clean_steps)
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test__validate_user_clean_steps(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = self.clean_steps
@@ -635,7 +645,7 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
'priority': 20, 'abortable': True}]
self.assertEqual(expected, result)
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test__validate_user_clean_steps_no_steps(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = self.clean_steps
@@ -644,7 +654,7 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
conductor_steps._validate_user_clean_steps(task, [])
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test__validate_user_clean_steps_get_steps_exception(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.side_effect = exception.NodeCleaningFailure('bad')
@@ -655,7 +665,7 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
task, [])
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test__validate_user_clean_steps_not_supported(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = [self.power_update, self.deploy_raid]
@@ -669,7 +679,7 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
task, user_steps)
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test__validate_user_clean_steps_invalid_arg(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = self.clean_steps
@@ -684,7 +694,7 @@ class NodeCleaningStepsTestCase(db_base.DbTestCase):
task, user_steps)
mock_steps.assert_called_once_with(task, enabled=False, sort=False)
- @mock.patch.object(conductor_steps, '_get_cleaning_steps')
+ @mock.patch.object(conductor_steps, '_get_cleaning_steps', autospec=True)
def test__validate_user_clean_steps_missing_required_arg(self, mock_steps):
node = obj_utils.create_test_node(self.context)
mock_steps.return_value = [self.power_update, self.deploy_raid]
diff --git a/ironic/tests/unit/conductor/test_task_manager.py b/ironic/tests/unit/conductor/test_task_manager.py
index 1cb61441f..8a57d7dea 100644
--- a/ironic/tests/unit/conductor/test_task_manager.py
+++ b/ironic/tests/unit/conductor/test_task_manager.py
@@ -17,8 +17,9 @@
"""Tests for :class:`ironic.conductor.task_manager`."""
+from unittest import mock
+
import futurist
-import mock
from oslo_utils import uuidutils
from ironic.common import driver_factory
@@ -34,14 +35,14 @@ from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
-@mock.patch.object(objects.Node, 'get')
-@mock.patch.object(objects.Node, 'release')
-@mock.patch.object(objects.Node, 'reserve')
-@mock.patch.object(driver_factory, 'build_driver_for_task')
-@mock.patch.object(objects.Port, 'list_by_node_id')
-@mock.patch.object(objects.Portgroup, 'list_by_node_id')
-@mock.patch.object(objects.VolumeConnector, 'list_by_node_id')
-@mock.patch.object(objects.VolumeTarget, 'list_by_node_id')
+@mock.patch.object(objects.Node, 'get', autospec=True)
+@mock.patch.object(objects.Node, 'release', autospec=True)
+@mock.patch.object(objects.Node, 'reserve', autospec=True)
+@mock.patch.object(driver_factory, 'build_driver_for_task', autospec=True)
+@mock.patch.object(objects.Port, 'list_by_node_id', autospec=True)
+@mock.patch.object(objects.Portgroup, 'list_by_node_id', autospec=True)
+@mock.patch.object(objects.VolumeConnector, 'list_by_node_id', autospec=True)
+@mock.patch.object(objects.VolumeTarget, 'list_by_node_id', autospec=True)
class TaskManagerTestCase(db_base.DbTestCase):
def setUp(self):
super(TaskManagerTestCase, self).setUp()
@@ -672,7 +673,7 @@ class TaskManagerTestCase(db_base.DbTestCase):
on_error_handler.assert_called_once_with(expected_exception,
'fake-argument')
- @mock.patch.object(states.machine, 'copy')
+ @mock.patch.object(states.machine, 'copy', autospec=True)
def test_init_prepares_fsm(
self, copy_mock, get_volconn_mock, get_voltgt_mock,
get_portgroups_mock, get_ports_mock,
diff --git a/ironic/tests/unit/conductor/test_utils.py b/ironic/tests/unit/conductor/test_utils.py
index 2ec5649c6..0dea519e2 100644
--- a/ironic/tests/unit/conductor/test_utils.py
+++ b/ironic/tests/unit/conductor/test_utils.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import time
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -175,7 +175,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
@mock.patch.object(nova, 'power_update', autospec=True)
def test_node_power_action_power_on_notify(self, mock_power_update,
@@ -224,10 +225,12 @@ class NodePowerActionTestCase(db_base.DbTestCase):
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_node_power_action_power_off(self, get_power_mock):
"""Test node_power_action to turn node power off."""
+ dii = {'agent_secret_token': 'token'}
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake-hardware',
- power_state=states.POWER_ON)
+ power_state=states.POWER_ON,
+ driver_internal_info=dii)
task = task_manager.TaskManager(self.context, node.uuid)
get_power_mock.return_value = states.POWER_ON
@@ -239,6 +242,31 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertEqual(states.POWER_OFF, node['power_state'])
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
+ self.assertNotIn('agent_secret_token', node['driver_internal_info'])
+
+ @mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
+ def test_node_power_action_power_off_pregenerated_token(self,
+ get_power_mock):
+ dii = {'agent_secret_token': 'token',
+ 'agent_secret_token_pregenerated': True}
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='fake-hardware',
+ power_state=states.POWER_ON,
+ driver_internal_info=dii)
+ task = task_manager.TaskManager(self.context, node.uuid)
+
+ get_power_mock.return_value = states.POWER_ON
+
+ conductor_utils.node_power_action(task, states.POWER_OFF)
+
+ node.refresh()
+ get_power_mock.assert_called_once_with(mock.ANY, mock.ANY)
+ self.assertEqual(states.POWER_OFF, node['power_state'])
+ self.assertIsNone(node['target_power_state'])
+ self.assertIsNone(node['last_error'])
+ self.assertEqual('token',
+ node['driver_internal_info']['agent_secret_token'])
@mock.patch.object(fake.FakePower, 'reboot', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
@@ -288,7 +316,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNone(node['last_error'])
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_node_power_action_invalid_state_notify(self, get_power_mock,
mock_notif):
@@ -440,7 +469,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_node_power_action_failed_getting_state_notify(self,
get_power_mock,
@@ -514,7 +544,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertIsNone(node['target_power_state'])
self.assertIsNotNone(node['last_error'])
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
@mock.patch.object(fake.FakePower, 'set_power_state', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test_node_power_action_set_power_failure_notify(self, get_power_mock,
@@ -711,7 +742,8 @@ class NodePowerActionTestCase(db_base.DbTestCase):
self.assertEqual(states.NOSTATE, node['target_power_state'])
self.assertIsNone(node['last_error'])
- @mock.patch('ironic.objects.node.NodeSetPowerStateNotification')
+ @mock.patch('ironic.objects.node.NodeSetPowerStateNotification',
+ autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
def test__can_skip_state_change_failed_getting_state_notify(
self, get_power_mock, mock_notif):
@@ -1001,7 +1033,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
maintenance=False, maintenance_reason=None)
self.task.context = self.context
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_provision_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
conductor_utils.provisioning_error_handler(exc, self.node, 'state-one',
@@ -1012,7 +1044,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertIn('No free conductor workers', self.node.last_error)
self.assertTrue(log_mock.warning.called)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_provision_error_handler_other_error(self, log_mock):
exc = Exception('foo')
conductor_utils.provisioning_error_handler(exc, self.node, 'state-one',
@@ -1020,7 +1052,8 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertFalse(self.node.save.called)
self.assertFalse(log_mock.warning.called)
- @mock.patch.object(conductor_utils, 'cleaning_error_handler')
+ @mock.patch.object(conductor_utils, 'cleaning_error_handler',
+ autospec=True)
def test_cleanup_cleanwait_timeout_handler_call(self, mock_error_handler):
self.node.clean_step = {}
conductor_utils.cleanup_cleanwait_timeout(self.task)
@@ -1117,7 +1150,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
driver.tear_down_cleaning.assert_called_once_with(self.task)
self.assertFalse(self.task.process_event.called)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_cleaning_error_handler_tear_down_error(self, log_mock):
def _side_effect(task):
# simulate overwriting last error by another operation (e.g. power)
@@ -1151,7 +1184,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertIn('take over', self.node.last_error)
self.node.save.assert_called_once_with()
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_spawn_cleaning_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
conductor_utils.spawn_cleaning_error_handler(exc, self.node)
@@ -1159,7 +1192,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertIn('No free conductor workers', self.node.last_error)
self.assertTrue(log_mock.warning.called)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_spawn_cleaning_error_handler_other_error(self, log_mock):
exc = Exception('foo')
conductor_utils.spawn_cleaning_error_handler(exc, self.node)
@@ -1181,7 +1214,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertFalse(self.node.save.called)
self.assertFalse(log_mock.warning.called)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_spawn_rescue_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
self.node.instance_info = {'rescue_password': 'pass',
@@ -1193,7 +1226,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertNotIn('rescue_password', self.node.instance_info)
self.assertNotIn('hashed_rescue_password', self.node.instance_info)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_spawn_rescue_error_handler_other_error(self, log_mock):
exc = Exception('foo')
self.node.instance_info = {'rescue_password': 'pass',
@@ -1203,7 +1236,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertFalse(log_mock.warning.called)
self.assertIn('rescue_password', self.node.instance_info)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_power_state_error_handler_no_worker(self, log_mock):
exc = exception.NoFreeConductorWorker()
conductor_utils.power_state_error_handler(exc, self.node, 'newstate')
@@ -1213,15 +1246,15 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertIn('No free conductor workers', self.node.last_error)
self.assertTrue(log_mock.warning.called)
- @mock.patch.object(conductor_utils, 'LOG')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
def test_power_state_error_handler_other_error(self, log_mock):
exc = Exception('foo')
conductor_utils.power_state_error_handler(exc, self.node, 'foo')
self.assertFalse(self.node.save.called)
self.assertFalse(log_mock.warning.called)
- @mock.patch.object(conductor_utils, 'LOG')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_cleanup_rescuewait_timeout(self, node_power_mock, log_mock):
conductor_utils.cleanup_rescuewait_timeout(self.task)
self.assertTrue(log_mock.error.called)
@@ -1230,8 +1263,8 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertIn('Timeout reached', self.node.last_error)
self.node.save.assert_called_once_with()
- @mock.patch.object(conductor_utils, 'LOG')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_cleanup_rescuewait_timeout_known_exc(
self, node_power_mock, log_mock):
clean_up_mock = self.task.driver.rescue.clean_up
@@ -1243,8 +1276,8 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.assertIn('moocow', self.node.last_error)
self.node.save.assert_called_once_with()
- @mock.patch.object(conductor_utils, 'LOG')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils, 'LOG', autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_cleanup_rescuewait_timeout_unknown_exc(
self, node_power_mock, log_mock):
clean_up_mock = self.task.driver.rescue.clean_up
@@ -1257,7 +1290,7 @@ class ErrorHandlersTestCase(tests_base.TestCase):
self.node.save.assert_called_once_with()
self.assertTrue(log_mock.exception.called)
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def _test_rescuing_error_handler(self, node_power_mock,
set_state=True):
self.node.provision_state = states.RESCUEWAIT
@@ -1280,8 +1313,8 @@ class ErrorHandlersTestCase(tests_base.TestCase):
def test_rescuing_error_handler_set_failed_state_false(self):
self._test_rescuing_error_handler(set_state=False)
- @mock.patch.object(conductor_utils.LOG, 'error')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils.LOG, 'error', autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_rescuing_error_handler_ironic_exc(self, node_power_mock,
log_mock):
self.node.provision_state = states.RESCUEWAIT
@@ -1299,8 +1332,8 @@ class ErrorHandlersTestCase(tests_base.TestCase):
'error': expected_exc})
self.node.save.assert_called_once_with()
- @mock.patch.object(conductor_utils.LOG, 'exception')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils.LOG, 'exception', autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_rescuing_error_handler_other_exc(self, node_power_mock,
log_mock):
self.node.provision_state = states.RESCUEWAIT
@@ -1317,8 +1350,8 @@ class ErrorHandlersTestCase(tests_base.TestCase):
{'node': self.node.uuid})
self.node.save.assert_called_once_with()
- @mock.patch.object(conductor_utils.LOG, 'error')
- @mock.patch.object(conductor_utils, 'node_power_action')
+ @mock.patch.object(conductor_utils.LOG, 'error', autospec=True)
+ @mock.patch.object(conductor_utils, 'node_power_action', autospec=True)
def test_rescuing_error_handler_bad_state(self, node_power_mock,
log_mock):
self.node.provision_state = states.RESCUE
@@ -1351,7 +1384,7 @@ class ValidatePortPhysnetTestCase(db_base.DbTestCase):
self.node = obj_utils.create_test_node(self.context,
driver='fake-hardware')
- @mock.patch.object(objects.Port, 'obj_what_changed')
+ @mock.patch.object(objects.Port, 'obj_what_changed', autospec=True)
def test_validate_port_physnet_no_portgroup_create(self, mock_owc):
port = obj_utils.get_test_port(self.context, node_id=self.node.id)
# NOTE(mgoddard): The port object passed to the conductor will not have
@@ -1362,7 +1395,7 @@ class ValidatePortPhysnetTestCase(db_base.DbTestCase):
# Verify the early return in the non-portgroup case.
self.assertFalse(mock_owc.called)
- @mock.patch.object(network, 'get_ports_by_portgroup_id')
+ @mock.patch.object(network, 'get_ports_by_portgroup_id', autospec=True)
def test_validate_port_physnet_no_portgroup_update(self, mock_gpbpi):
port = obj_utils.create_test_port(self.context, node_id=self.node.id)
port.extra = {'foo': 'bar'}
@@ -1716,7 +1749,8 @@ class MiscTestCase(db_base.DbTestCase):
@mock.patch.object(time, 'sleep', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
- @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on')
+ @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on',
+ autospec=True)
@mock.patch.object(conductor_utils, 'node_set_boot_device',
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action',
@@ -1736,7 +1770,8 @@ class MiscTestCase(db_base.DbTestCase):
@mock.patch.object(time, 'sleep', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
- @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on')
+ @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on',
+ autospec=True)
@mock.patch.object(conductor_utils, 'node_set_boot_device',
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action',
@@ -1755,7 +1790,8 @@ class MiscTestCase(db_base.DbTestCase):
@mock.patch.object(time, 'sleep', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
- @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on')
+ @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on',
+ autospec=True)
@mock.patch.object(conductor_utils, 'node_set_boot_device',
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action',
@@ -1776,7 +1812,8 @@ class MiscTestCase(db_base.DbTestCase):
@mock.patch.object(neutron, 'wait_for_host_agent', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state', autospec=True)
- @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on')
+ @mock.patch.object(drivers_base.NetworkInterface, 'need_power_on',
+ autospec=True)
@mock.patch.object(conductor_utils, 'node_set_boot_device',
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action',
@@ -2032,10 +2069,10 @@ class AgentTokenUtilsTestCase(tests_base.TestCase):
conductor_utils.add_secret_token(self.node)
self.assertIn('agent_secret_token', self.node.driver_internal_info)
- def test_del_secret_token(self):
+ def test_wipe_deploy_internal_info(self):
conductor_utils.add_secret_token(self.node)
self.assertIn('agent_secret_token', self.node.driver_internal_info)
- conductor_utils.del_secret_token(self.node)
+ conductor_utils.wipe_deploy_internal_info(mock.Mock(node=self.node))
self.assertNotIn('agent_secret_token', self.node.driver_internal_info)
def test_is_agent_token_present(self):
@@ -2051,3 +2088,48 @@ class AgentTokenUtilsTestCase(tests_base.TestCase):
conductor_utils.is_agent_token_supported('6.2.1'))
self.assertFalse(
conductor_utils.is_agent_token_supported('6.0.0'))
+
+
+class GetAttachedVifTestCase(db_base.DbTestCase):
+
+ def setUp(self):
+ super(GetAttachedVifTestCase, self).setUp()
+ self.node = obj_utils.create_test_node(self.context,
+ driver='fake-hardware')
+ self.port = obj_utils.get_test_port(self.context,
+ node_id=self.node.id)
+
+ def test_get_attached_vif_none(self):
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertIsNone(vif)
+ self.assertIsNone(use)
+
+ def test_get_attached_vif_tenant(self):
+ self.port.internal_info = {'tenant_vif_port_id': '1'}
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertEqual('1', vif)
+ self.assertEqual('tenant', use)
+
+ def test_get_attached_vif_provisioning(self):
+ self.port.internal_info = {'provisioning_vif_port_id': '1'}
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertEqual('1', vif)
+ self.assertEqual('provisioning', use)
+
+ def test_get_attached_vif_cleaning(self):
+ self.port.internal_info = {'cleaning_vif_port_id': '1'}
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertEqual('1', vif)
+ self.assertEqual('cleaning', use)
+
+ def test_get_attached_vif_rescuing(self):
+ self.port.internal_info = {'rescuing_vif_port_id': '1'}
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertEqual('1', vif)
+ self.assertEqual('rescuing', use)
+
+ def test_get_attached_vif_inspecting(self):
+ self.port.internal_info = {'inspection_vif_port_id': '1'}
+ vif, use = conductor_utils.get_attached_vif(self.port)
+ self.assertEqual('1', vif)
+ self.assertEqual('inspecting', use)
diff --git a/ironic/tests/unit/db/sqlalchemy/test_migrations.py b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
index 7aed87626..39293c6ac 100644
--- a/ironic/tests/unit/db/sqlalchemy/test_migrations.py
+++ b/ironic/tests/unit/db/sqlalchemy/test_migrations.py
@@ -36,10 +36,10 @@ For postgres on Ubuntu this can be done with the following commands:
import collections
import contextlib
+from unittest import mock
from alembic import script
import fixtures
-import mock
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
@@ -65,8 +65,8 @@ MIGRATIONS_TIMEOUT = 300
@contextlib.contextmanager
def patch_with_engine(engine):
- with mock.patch.object(enginefacade.writer,
- 'get_engine') as patch_engine:
+ with mock.patch.object(enginefacade.writer, 'get_engine',
+ autospec=True) as patch_engine:
patch_engine.return_value = engine
yield
@@ -150,10 +150,10 @@ class TestWalkVersions(base.TestCase, WalkVersionsMixin):
self._pre_upgrade_141.assert_called_with(self.engine)
self._check_141.assert_called_with(self.engine, test_value)
- @mock.patch.object(script, 'ScriptDirectory')
- @mock.patch.object(WalkVersionsMixin, '_migrate_up')
+ @mock.patch.object(script, 'ScriptDirectory', autospec=True)
+ @mock.patch.object(WalkVersionsMixin, '_migrate_up', autospec=True)
def test_walk_versions_all_default(self, _migrate_up, script_directory):
- fc = script_directory.from_config()
+ fc = script_directory.from_config.return_value
fc.walk_revisions.return_value = self.versions
self.migration_api.version.return_value = None
@@ -161,20 +161,20 @@ class TestWalkVersions(base.TestCase, WalkVersionsMixin):
self.migration_api.version.assert_called_with(self.config)
- upgraded = [mock.call(self.engine, self.config, v.revision,
+ upgraded = [mock.call(self, self.engine, self.config, v.revision,
with_data=True) for v in reversed(self.versions)]
self.assertEqual(self._migrate_up.call_args_list, upgraded)
- @mock.patch.object(script, 'ScriptDirectory')
- @mock.patch.object(WalkVersionsMixin, '_migrate_up')
+ @mock.patch.object(script, 'ScriptDirectory', autospec=True)
+ @mock.patch.object(WalkVersionsMixin, '_migrate_up', autospec=True)
def test_walk_versions_all_false(self, _migrate_up, script_directory):
- fc = script_directory.from_config()
+ fc = script_directory.from_config.return_value
fc.walk_revisions.return_value = self.versions
self.migration_api.version.return_value = None
self._walk_versions(self.engine, self.config)
- upgraded = [mock.call(self.engine, self.config, v.revision,
+ upgraded = [mock.call(self, self.engine, self.config, v.revision,
with_data=True) for v in reversed(self.versions)]
self.assertEqual(upgraded, self._migrate_up.call_args_list)
@@ -969,6 +969,13 @@ class MigrationCheckersMixin(object):
col_names = [column.name for column in allocations.c]
self.assertIn('owner', col_names)
+ def _check_cf1a80fdb352(self, engine, data):
+ nodes = db_utils.get_table(engine, 'nodes')
+ col_names = [column.name for column in nodes.c]
+ self.assertIn('network_data', col_names)
+ self.assertIsInstance(
+ nodes.c.network_data.type, sqlalchemy.types.String)
+
def _pre_upgrade_cd2c80feb331(self, engine):
data = {
'node_uuid': uuidutils.generate_uuid(),
diff --git a/ironic/tests/unit/db/test_api.py b/ironic/tests/unit/db/test_api.py
index ed640e9cd..f855e9c91 100644
--- a/ironic/tests/unit/db/test_api.py
+++ b/ironic/tests/unit/db/test_api.py
@@ -11,8 +11,8 @@
# under the License.
import random
+from unittest import mock
-import mock
from oslo_db.sqlalchemy import utils as db_utils
from oslo_utils import uuidutils
from testtools import matchers
diff --git a/ironic/tests/unit/db/test_bios_settings.py b/ironic/tests/unit/db/test_bios_settings.py
index 684c307b2..a13f516a3 100644
--- a/ironic/tests/unit/db/test_bios_settings.py
+++ b/ironic/tests/unit/db/test_bios_settings.py
@@ -62,9 +62,9 @@ class DbBIOSSettingTestCase(base.DbTestCase):
settings = db_utils.get_test_bios_setting_setting_list()
result = self.dbapi.create_bios_setting_list(
self.node.id, settings, '1.0')
- self.assertItemsEqual(['virtualization', 'hyperthread', 'numlock'],
+ self.assertCountEqual(['virtualization', 'hyperthread', 'numlock'],
[setting.name for setting in result])
- self.assertItemsEqual(['on', 'enabled', 'off'],
+ self.assertCountEqual(['on', 'enabled', 'off'],
[setting.value for setting in result])
def test_create_bios_setting_list_duplicate(self):
@@ -87,7 +87,7 @@ class DbBIOSSettingTestCase(base.DbTestCase):
{'name': 'numlock', 'value': 'on'}]
result = self.dbapi.update_bios_setting_list(
self.node.id, settings, '1.0')
- self.assertItemsEqual(['off', 'disabled', 'on'],
+ self.assertCountEqual(['off', 'disabled', 'on'],
[setting.value for setting in result])
def test_update_bios_setting_list_setting_not_exist(self):
diff --git a/ironic/tests/unit/db/test_conductor.py b/ironic/tests/unit/db/test_conductor.py
index 1ae38de88..bb7832b99 100644
--- a/ironic/tests/unit/db/test_conductor.py
+++ b/ironic/tests/unit/db/test_conductor.py
@@ -16,8 +16,8 @@
"""Tests for manipulating Conductors via the DB API"""
import datetime
+from unittest import mock
-import mock
import oslo_db
from oslo_db import exception as db_exc
from oslo_db import sqlalchemy
diff --git a/ironic/tests/unit/db/test_node_tags.py b/ironic/tests/unit/db/test_node_tags.py
index b2903b1d5..5475c786a 100644
--- a/ironic/tests/unit/db/test_node_tags.py
+++ b/ironic/tests/unit/db/test_node_tags.py
@@ -26,7 +26,7 @@ class DbNodeTagTestCase(base.DbTestCase):
def test_set_node_tags(self):
tags = self.dbapi.set_node_tags(self.node.id, ['tag1', 'tag2'])
self.assertEqual(self.node.id, tags[0].node_id)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in tags])
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in tags])
tags = self.dbapi.set_node_tags(self.node.id, [])
self.assertEqual([], tags)
@@ -35,7 +35,7 @@ class DbNodeTagTestCase(base.DbTestCase):
tags = self.dbapi.set_node_tags(self.node.id,
['tag1', 'tag2', 'tag2'])
self.assertEqual(self.node.id, tags[0].node_id)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in tags])
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in tags])
def test_set_node_tags_node_not_exist(self):
self.assertRaises(exception.NodeNotFound,
@@ -45,7 +45,7 @@ class DbNodeTagTestCase(base.DbTestCase):
self.dbapi.set_node_tags(self.node.id, ['tag1', 'tag2'])
tags = self.dbapi.get_node_tags_by_node_id(self.node.id)
self.assertEqual(self.node.id, tags[0].node_id)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in tags])
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in tags])
def test_get_node_tags_empty(self):
tags = self.dbapi.get_node_tags_by_node_id(self.node.id)
diff --git a/ironic/tests/unit/db/test_node_traits.py b/ironic/tests/unit/db/test_node_traits.py
index bbd687411..2d8bf2068 100644
--- a/ironic/tests/unit/db/test_node_traits.py
+++ b/ironic/tests/unit/db/test_node_traits.py
@@ -27,7 +27,7 @@ class DbNodeTraitTestCase(base.DbTestCase):
result = self.dbapi.set_node_traits(self.node.id, ['trait1', 'trait2'],
'1.0')
self.assertEqual(self.node.id, result[0].node_id)
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in result])
result = self.dbapi.set_node_traits(self.node.id, [], '1.0')
@@ -38,14 +38,14 @@ class DbNodeTraitTestCase(base.DbTestCase):
['trait1', 'trait2', 'trait2'],
'1.0')
self.assertEqual(self.node.id, result[0].node_id)
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in result])
def test_set_node_traits_at_limit(self):
traits = ['trait%d' % n for n in range(50)]
result = self.dbapi.set_node_traits(self.node.id, traits, '1.0')
self.assertEqual(self.node.id, result[0].node_id)
- self.assertItemsEqual(traits, [trait.trait for trait in result])
+ self.assertCountEqual(traits, [trait.trait for trait in result])
def test_set_node_traits_over_limit(self):
traits = ['trait%d' % n for n in range(51)]
@@ -66,7 +66,7 @@ class DbNodeTraitTestCase(base.DbTestCase):
traits=['trait1', 'trait2'])
result = self.dbapi.get_node_traits_by_node_id(self.node.id)
self.assertEqual(self.node.id, result[0].node_id)
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in result])
def test_get_node_traits_empty(self):
diff --git a/ironic/tests/unit/db/test_nodes.py b/ironic/tests/unit/db/test_nodes.py
index 88200bf99..2ca7eb5e9 100644
--- a/ironic/tests/unit/db/test_nodes.py
+++ b/ironic/tests/unit/db/test_nodes.py
@@ -16,8 +16,8 @@
"""Tests for manipulating Nodes via the DB API"""
import datetime
+from unittest import mock
-import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
@@ -72,8 +72,8 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_by_id(node.id)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_uuid(self):
@@ -84,8 +84,8 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_name(self):
@@ -97,8 +97,8 @@ class DbNodeTestCase(base.DbTestCase):
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.name, res.name)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_that_does_not_exist(self):
@@ -455,8 +455,8 @@ class DbNodeTestCase(base.DbTestCase):
res = self.dbapi.get_node_by_instance(node.instance_uuid)
self.assertEqual(node.uuid, res.uuid)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_instance_wrong_uuid(self):
@@ -595,6 +595,8 @@ class DbNodeTestCase(base.DbTestCase):
node = utils.create_test_node()
allocation = utils.create_test_allocation(node_id=node.id)
+ node = self.dbapi.update_node(node.id,
+ {'allocation_id': allocation.id})
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.AllocationNotFound,
@@ -723,8 +725,8 @@ class DbNodeTestCase(base.DbTestCase):
# reserve the node
res = self.dbapi.reserve_node(r1, uuid)
- self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
- self.assertItemsEqual(['trait1', 'trait2'],
+ self.assertCountEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
+ self.assertCountEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
# check reservation
diff --git a/ironic/tests/unit/db/test_ports.py b/ironic/tests/unit/db/test_ports.py
index e0d2e1d66..d2434d603 100644
--- a/ironic/tests/unit/db/test_ports.py
+++ b/ironic/tests/unit/db/test_ports.py
@@ -28,7 +28,8 @@ class DbPortTestCase(base.DbTestCase):
# This method creates a port for every test and
# replaces a test for creating a port.
super(DbPortTestCase, self).setUp()
- self.node = db_utils.create_test_node(owner='12345')
+ self.node = db_utils.create_test_node(owner='12345',
+ lessee='54321')
self.portgroup = db_utils.create_test_portgroup(node_id=self.node.id)
self.port = db_utils.create_test_port(node_id=self.node.id,
portgroup_id=self.portgroup.id)
@@ -56,6 +57,17 @@ class DbPortTestCase(base.DbTestCase):
self.port.address,
owner='54321')
+ def test_get_port_by_address_filter_by_project(self):
+ res = self.dbapi.get_port_by_address(self.port.address,
+ project=self.node.lessee)
+ self.assertEqual(self.port.id, res.id)
+
+ def test_get_port_by_address_filter_by_project_no_match(self):
+ self.assertRaises(exception.PortNotFound,
+ self.dbapi.get_port_by_address,
+ self.port.address,
+ project='55555')
+
def test_get_port_list(self):
uuids = []
for i in range(1, 6):
@@ -99,6 +111,30 @@ class DbPortTestCase(base.DbTestCase):
res_uuids = [r.uuid for r in res]
self.assertCountEqual(uuids, res_uuids)
+ def test_get_port_list_filter_by_node_project(self):
+ lessee_node = db_utils.create_test_node(uuid=uuidutils.generate_uuid(),
+ lessee=self.node.owner)
+
+ uuids = []
+ for i in range(1, 3):
+ port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=lessee_node.id,
+ address='52:54:00:cf:2d:4%s' % i)
+ uuids.append(str(port.uuid))
+ for i in range(4, 6):
+ port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ address='52:54:00:cf:2d:4%s' % i)
+ for i in range(7, 9):
+ port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
+ node_id=self.node.id,
+ address='52:54:00:cf:2d:4%s' % i)
+ uuids.append(str(port.uuid))
+ # Also add the uuid for the port created in setUp()
+ uuids.append(str(self.port.uuid))
+ res = self.dbapi.get_port_list(project=self.node.owner)
+ res_uuids = [r.uuid for r in res]
+ self.assertCountEqual(uuids, res_uuids)
+
def test_get_ports_by_node_id(self):
res = self.dbapi.get_ports_by_node_id(self.node.id)
self.assertEqual(self.port.address, res[0].address)
@@ -113,6 +149,16 @@ class DbPortTestCase(base.DbTestCase):
owner='54321')
self.assertEqual([], res)
+ def test_get_ports_by_node_id_filter_by_node_project(self):
+ res = self.dbapi.get_ports_by_node_id(self.node.id,
+ project=self.node.lessee)
+ self.assertEqual(self.port.address, res[0].address)
+
+ def test_get_ports_by_node_id_filter_by_node_project_no_match(self):
+ res = self.dbapi.get_ports_by_node_id(self.node.id,
+ owner='11111')
+ self.assertEqual([], res)
+
def test_get_ports_by_node_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_node_id(99))
@@ -130,6 +176,16 @@ class DbPortTestCase(base.DbTestCase):
owner='54321')
self.assertEqual([], res)
+ def test_get_ports_by_portgroup_id_filter_by_node_project(self):
+ res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
+ project=self.node.lessee)
+ self.assertEqual(self.port.address, res[0].address)
+
+ def test_get_ports_by_portgroup_id_filter_by_node_project_no_match(self):
+ res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
+ project='11111')
+ self.assertEqual([], res)
+
def test_get_ports_by_portgroup_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_portgroup_id(99))
diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py
index a7b720f4b..96254889d 100644
--- a/ironic/tests/unit/db/utils.py
+++ b/ironic/tests/unit/db/utils.py
@@ -228,6 +228,7 @@ def get_test_node(**kw):
'retired': kw.get('retired', False),
'retired_reason': kw.get('retired_reason', None),
'lessee': kw.get('lessee', None),
+ 'network_data': kw.get('network_data'),
}
for iface in drivers_base.ALL_INTERFACES:
diff --git a/ironic/tests/unit/dhcp/test_factory.py b/ironic/tests/unit/dhcp/test_factory.py
index bda6bed0e..02362efcb 100644
--- a/ironic/tests/unit/dhcp/test_factory.py
+++ b/ironic/tests/unit/dhcp/test_factory.py
@@ -14,8 +14,8 @@
# under the License.
import inspect
+from unittest import mock
-import mock
import stevedore
from ironic.common import dhcp_factory
@@ -61,8 +61,8 @@ class TestDHCPFactory(base.TestCase):
group='dhcp')
dhcp_factory.DHCPFactory()
- with mock.patch.object(dhcp_factory.DHCPFactory,
- '_set_dhcp_provider') as mock_set_dhcp:
+ with mock.patch.object(dhcp_factory.DHCPFactory, '_set_dhcp_provider',
+ autospec=True) as mock_set_dhcp:
# There is already a dhcp_provider, so this shouldn't call
# _set_dhcp_provider again.
dhcp_factory.DHCPFactory()
diff --git a/ironic/tests/unit/dhcp/test_neutron.py b/ironic/tests/unit/dhcp/test_neutron.py
index 391b1cf74..e4091c58b 100644
--- a/ironic/tests/unit/dhcp/test_neutron.py
+++ b/ironic/tests/unit/dhcp/test_neutron.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions as neutron_client_exc
from oslo_utils import uuidutils
diff --git a/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template b/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template
new file mode 100644
index 000000000..ede73acff
--- /dev/null
+++ b/ironic/tests/unit/drivers/ipxe_config_boot_from_iso.template
@@ -0,0 +1,39 @@
+#!ipxe
+
+set attempts:int32 10
+set i:int32 0
+
+goto deploy
+
+:deploy
+imgfree
+kernel http://1.2.3.4:1234/deploy_kernel selinux=0 troubleshoot=0 text test_param BOOTIF=${mac} initrd=deploy_ramdisk || goto retry
+
+initrd http://1.2.3.4:1234/deploy_ramdisk || goto retry
+boot
+
+:retry
+iseq ${i} ${attempts} && goto fail ||
+inc i
+echo No response, retrying in {i} seconds.
+sleep ${i}
+goto deploy
+
+:fail
+echo Failed to get a response after ${attempts} attempts
+echo Powering off in 30 seconds.
+sleep 30
+poweroff
+
+:boot_partition
+imgfree
+kernel http://1.2.3.4:1234/kernel root={{ ROOT }} ro text test_param initrd=ramdisk || goto boot_partition
+initrd http://1.2.3.4:1234/ramdisk || goto boot_partition
+boot
+
+:boot_ramdisk
+imgfree
+sanboot http://1.2.3.4:1234/uuid/iso
+
+:boot_whole_disk
+sanboot --no-describe
diff --git a/ironic/tests/unit/drivers/modules/ansible/test_deploy.py b/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
index 95741d02e..17ab45786 100644
--- a/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
+++ b/ironic/tests/unit/drivers/modules/ansible/test_deploy.py
@@ -11,9 +11,9 @@
# limitations under the License.
import json
+from unittest import mock
from ironic_lib import utils as irlib_utils
-import mock
from oslo_concurrency import processutils
from ironic.common import exception
@@ -576,7 +576,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
def test_get_properties(self):
self.assertEqual(
set(list(ansible_deploy.COMMON_PROPERTIES)
- + ['deploy_forces_oob_reboot']),
+ + ['agent_verify_ca', 'deploy_forces_oob_reboot']),
set(self.driver.get_properties()))
@mock.patch.object(deploy_utils, 'check_for_missing_params',
@@ -637,7 +637,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
@mock.patch('ironic.drivers.modules.deploy_utils.'
'build_instance_info_for_deploy',
return_value={'test': 'test'}, autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare(self, pxe_prepare_ramdisk_mock,
build_instance_info_mock, build_options_mock,
power_action_mock):
@@ -656,7 +656,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'op1': 'test1'})
+ task.driver.boot, task, {'op1': 'test1'})
self.node.refresh()
self.assertEqual('test', self.node.instance_info['test'])
@@ -664,13 +664,13 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
@mock.patch.object(ansible_deploy, '_get_configdrive_path',
return_value='/path/test', autospec=True)
@mock.patch.object(irlib_utils, 'unlink_without_raise', autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_clean_up(self, pxe_clean_up_mock, unlink_mock,
get_cfdrive_path_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
- pxe_clean_up_mock.assert_called_once_with(task)
+ pxe_clean_up_mock.assert_called_once_with(task.driver.boot, task)
get_cfdrive_path_mock.assert_called_once_with(self.node['uuid'])
unlink_mock.assert_called_once_with('/path/test')
@@ -764,7 +764,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch('ironic.drivers.modules.deploy_utils.build_agent_options',
return_value={'op1': 'test1'}, autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_cleaning(
self, prepare_ramdisk_mock, buid_options_mock, power_action_mock,
set_node_cleaning_steps, run_playbook_mock):
@@ -785,7 +785,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
task)
buid_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
- task, {'op1': 'test1'})
+ task.driver.boot, task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
self.assertFalse(run_playbook_mock.called)
self.assertEqual(states.CLEANWAIT, state)
@@ -802,7 +802,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
self.assertFalse(task.driver.network.add_cleaning_network.called)
@mock.patch.object(utils, 'node_power_action', autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_tear_down_cleaning(self, clean_ramdisk_mock, power_action_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.network.remove_cleaning_network = mock.Mock()
@@ -810,7 +810,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
self.driver.tear_down_cleaning(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
- clean_ramdisk_mock.assert_called_once_with(task)
+ clean_ramdisk_mock.assert_called_once_with(task.driver.boot, task)
(task.driver.network.remove_cleaning_network
.assert_called_once_with(task))
@@ -890,13 +890,11 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
run_playbook_mock.assert_called_once_with(
task.node, 'test_pl', ironic_nodes, 'test_k')
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(fake.FakePower, 'get_power_state',
- return_value=states.POWER_OFF)
+ return_value=states.POWER_OFF, autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_reboot_and_finish_deploy_force_reboot(
- self, power_action_mock, get_pow_state_mock,
- power_on_node_if_needed_mock):
+ def test_tear_down_agent_force_reboot(
+ self, power_action_mock, get_pow_state_mock):
d_info = self.node.driver_info
d_info['deploy_forces_oob_reboot'] = True
self.node.driver_info = d_info
@@ -906,26 +904,15 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
self.node.provision_state = states.DEPLOYING
self.node.save()
- power_on_node_if_needed_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(task.driver, 'network') as net_mock:
- self.driver.reboot_and_finish_deploy(task)
- net_mock.remove_provisioning_network.assert_called_once_with(
- task)
- net_mock.configure_tenant_networks.assert_called_once_with(
- task)
- expected_power_calls = [((task, states.POWER_OFF),),
- ((task, states.POWER_ON),)]
- self.assertEqual(expected_power_calls,
- power_action_mock.call_args_list)
+ self.driver.tear_down_agent(task)
+ power_action_mock.assert_called_once_with(task, states.POWER_OFF)
get_pow_state_mock.assert_not_called()
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(ansible_deploy, '_run_playbook', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_retry(
- self, power_action_mock, run_playbook_mock,
- power_on_node_if_needed_mock):
+ def test_tear_down_agent_soft_poweroff_retry(
+ self, power_action_mock, run_playbook_mock):
self.config(group='ansible',
post_deploy_get_power_state_retry_interval=0)
self.config(group='ansible',
@@ -936,82 +923,38 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
self.node.driver_internal_info = di_info
self.node.save()
- power_on_node_if_needed_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(task.driver, 'network') as net_mock:
- with mock.patch.object(task.driver.power,
- 'get_power_state',
- return_value=states.POWER_ON) as p_mock:
- self.driver.reboot_and_finish_deploy(task)
- p_mock.assert_called_with(task)
- self.assertEqual(2, len(p_mock.mock_calls))
- net_mock.remove_provisioning_network.assert_called_once_with(
- task)
- net_mock.configure_tenant_networks.assert_called_once_with(
- task)
- power_action_mock.assert_has_calls(
- [mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- expected_power_calls = [((task, states.POWER_OFF),),
- ((task, states.POWER_ON),)]
- self.assertEqual(expected_power_calls,
- power_action_mock.call_args_list)
+ with mock.patch.object(task.driver.power,
+ 'get_power_state',
+ return_value=states.POWER_ON,
+ autospec=True) as p_mock:
+ self.driver.tear_down_agent(task)
+ p_mock.assert_called_with(task)
+ self.assertEqual(2, len(p_mock.mock_calls))
+ power_action_mock.assert_called_once_with(task, states.POWER_OFF)
run_playbook_mock.assert_called_once_with(
task.node, 'shutdown.yaml', mock.ANY, mock.ANY)
+ @mock.patch.object(utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(ansible_deploy, '_get_node_ip', autospec=True,
return_value='1.2.3.4')
- def test_continue_deploy(self, getip_mock):
- self.node.provision_state = states.DEPLOYWAIT
+ def test_write_image(self, getip_mock, bootdev_mock):
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
with mock.patch.multiple(self.driver, autospec=True,
_ansible_deploy=mock.DEFAULT,
reboot_to_instance=mock.DEFAULT):
- self.driver.continue_deploy(task)
+ result = self.driver.write_image(task)
+ self.assertIsNone(result)
getip_mock.assert_called_once_with(task)
self.driver._ansible_deploy.assert_called_once_with(
task, '1.2.3.4')
- self.driver.reboot_to_instance.assert_called_once_with(task)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- self.assertEqual(states.DEPLOYING, task.node.provision_state)
-
- @mock.patch.object(utils, 'notify_conductor_resume_deploy', autospec=True)
- @mock.patch.object(utils, 'node_set_boot_device', autospec=True)
- def test_reboot_to_instance(self, bootdev_mock, resume_mock):
- self.node.provision_state = states.DEPLOYING
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 100, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(self.driver, 'reboot_and_finish_deploy',
- autospec=True):
- task.driver.boot = mock.Mock()
- self.driver.reboot_to_instance(task)
bootdev_mock.assert_called_once_with(task, 'disk',
persistent=True)
- resume_mock.assert_called_once_with(task)
- self.driver.reboot_and_finish_deploy.assert_called_once_with(
- task)
- task.driver.boot.clean_up_ramdisk.assert_called_once_with(
- task)
-
- @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
- @mock.patch.object(utils, 'power_on_node_if_needed')
- @mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_tear_down_with_smartnic_port(
- self, power_mock, power_on_node_if_needed_mock,
- restore_power_state_mock):
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- driver_return = self.driver.tear_down(task)
- power_mock.assert_called_once_with(task, states.POWER_OFF)
- self.assertEqual(driver_return, states.DELETED)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
+ self.assertEqual(states.ACTIVE, task.node.target_provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
autospec=True)
@@ -1021,7 +964,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_with_smartnic_port(
self, pxe_prepare_ramdisk_mock,
build_instance_info_mock, build_options_mock,
@@ -1039,7 +982,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'op1': 'test1'})
+ task.driver.boot, task, {'op1': 'test1'})
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
@@ -1053,7 +996,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
@mock.patch.object(steps, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_cleaning_with_smartnic_port(
self, prepare_ramdisk_mock, build_options_mock, power_action_mock,
set_node_cleaning_steps, run_playbook_mock,
@@ -1075,7 +1018,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
task)
build_options_mock.assert_called_once_with(task.node)
prepare_ramdisk_mock.assert_called_once_with(
- task, {'op1': 'test1'})
+ task.driver.boot, task, {'op1': 'test1'})
power_action_mock.assert_called_once_with(task, states.REBOOT)
self.assertFalse(run_playbook_mock.called)
self.assertEqual(states.CLEANWAIT, state)
@@ -1086,7 +1029,7 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
@mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
@mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(utils, 'node_power_action', autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_tear_down_cleaning_with_smartnic_port(
self, clean_ramdisk_mock, power_action_mock,
power_on_node_if_needed_mock, restore_power_state_mock):
@@ -1095,42 +1038,10 @@ class TestAnsibleDeploy(AnsibleDeployTestCaseBase):
power_on_node_if_needed_mock.return_value = states.POWER_OFF
self.driver.tear_down_cleaning(task)
power_action_mock.assert_called_once_with(task, states.POWER_OFF)
- clean_ramdisk_mock.assert_called_once_with(task)
+ power_action_mock.assert_called_once_with(task, states.POWER_OFF)
+ clean_ramdisk_mock.assert_called_once_with(task.driver.boot, task)
(task.driver.network.remove_cleaning_network
.assert_called_once_with(task))
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
-
- @mock.patch.object(flat_network.FlatNetwork, 'remove_provisioning_network',
- autospec=True)
- @mock.patch.object(flat_network.FlatNetwork, 'configure_tenant_networks',
- autospec=True)
- @mock.patch.object(utils, 'restore_power_state_if_needed', autospec=True)
- @mock.patch.object(utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- return_value=states.POWER_OFF)
- @mock.patch.object(utils, 'node_power_action', autospec=True)
- def test_reboot_and_finish_deploy_with_smartnic_port(
- self, power_action_mock, get_pow_state_mock,
- power_on_node_if_needed_mock, restore_power_state_mock,
- configure_tenant_networks_mock, remove_provisioning_network_mock):
- d_info = self.node.driver_info
- d_info['deploy_forces_oob_reboot'] = True
- self.node.driver_info = d_info
- self.node.save()
- self.config(group='ansible',
- post_deploy_get_power_state_retry_interval=0)
- self.node.provision_state = states.DEPLOYING
- self.node.save()
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- with task_manager.acquire(self.context, self.node.uuid) as task:
- self.driver.reboot_and_finish_deploy(task)
- expected_power_calls = [((task, states.POWER_OFF),),
- ((task, states.POWER_ON),)]
- self.assertEqual(
- expected_power_calls, power_action_mock.call_args_list)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
- get_pow_state_mock.assert_not_called()
diff --git a/ironic/tests/unit/drivers/modules/drac/test_bios.py b/ironic/tests/unit/drivers/modules/drac/test_bios.py
index 2c59f9f52..6fdca0684 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_bios.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_bios.py
@@ -19,8 +19,9 @@
Test class for DRAC BIOS configuration specific methods
"""
+from unittest import mock
+
from dracclient import exceptions as drac_exceptions
-import mock
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/drac/test_boot.py b/ironic/tests/unit/drivers/modules/drac/test_boot.py
index ec8ae4fe6..d73ef69d7 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_boot.py
@@ -18,7 +18,8 @@
Test class for DRAC boot interface
"""
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import boot_devices
diff --git a/ironic/tests/unit/drivers/modules/drac/test_common.py b/ironic/tests/unit/drivers/modules/drac/test_common.py
index 6863ed564..cc1baee28 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_common.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_common.py
@@ -15,8 +15,9 @@
Test class for common methods used by DRAC modules.
"""
+from unittest import mock
+
import dracclient.client
-import mock
from ironic.common import exception
from ironic.drivers.modules.drac import common as drac_common
diff --git a/ironic/tests/unit/drivers/modules/drac/test_inspect.py b/ironic/tests/unit/drivers/modules/drac/test_inspect.py
index e76b351f2..628f3c855 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_inspect.py
@@ -15,8 +15,9 @@
Test class for DRAC inspection interface
"""
+from unittest import mock
+
from dracclient import exceptions as drac_exceptions
-import mock
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/drac/test_job.py b/ironic/tests/unit/drivers/modules/drac/test_job.py
index 5a68c8d5e..a0ea526cb 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_job.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_job.py
@@ -15,8 +15,9 @@
Test class for DRAC job specific methods
"""
+from unittest import mock
+
from dracclient import exceptions as drac_exceptions
-import mock
from ironic.common import exception
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/drac/test_management.py b/ironic/tests/unit/drivers/modules/drac/test_management.py
index 74f2ea372..27de5f7d5 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_management.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_management.py
@@ -20,7 +20,8 @@
Test class for DRAC management interface
"""
-import mock
+from unittest import mock
+
from oslo_utils import importutils
import ironic.common.boot_devices
@@ -447,8 +448,8 @@ class DracManagementInternalMethodsTestCase(test_utils.BaseDracTest):
self.assertEqual(0, mock_client.set_bios_settings.call_count)
self.assertEqual(0, mock_client.commit_pending_bios_changes.call_count)
- @mock.patch('time.time')
- @mock.patch('time.sleep')
+ @mock.patch('time.time', autospec=True)
+ @mock.patch('time.sleep', autospec=True)
@mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode',
spec_set=True, autospec=True)
@mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True,
diff --git a/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py b/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py
index a4392f269..ba5b7ae5e 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py
@@ -15,7 +15,7 @@
Test class for DRAC periodic tasks
"""
-import mock
+from unittest import mock
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
@@ -187,7 +187,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self.node.raid_config['logical_disks'])
mock_notify_conductor_resume.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_clean')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
+ autospec=True)
def test__check_node_raid_jobs_with_completed_job_in_clean(
self, mock_notify_conductor_resume):
self.node.clean_step = {'foo': 'bar'}
@@ -195,7 +196,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self._test__check_node_raid_jobs_with_completed_job(
mock_notify_conductor_resume)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ autospec=True)
def test__check_node_raid_jobs_with_completed_job_in_deploy(
self, mock_notify_conductor_resume):
self._test__check_node_raid_jobs_with_completed_job(
@@ -272,7 +274,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
task.process_event.assert_called_once_with('fail')
self.assertFalse(mock_notify_conductor_resume.called)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_clean')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
+ autospec=True)
def test__check_node_raid_jobs_with_completed_job_already_failed_in_clean(
self, mock_notify_conductor_resume):
self.node.clean_step = {'foo': 'bar'}
@@ -280,7 +283,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self._test__check_node_raid_jobs_with_completed_job_already_failed(
mock_notify_conductor_resume)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ autospec=True)
def test__check_node_raid_jobs_with_completed_job_already_failed_in_deploy(
self, mock_notify_conductor_resume):
self._test__check_node_raid_jobs_with_completed_job_already_failed(
@@ -326,7 +330,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self.node.raid_config['logical_disks'])
mock_notify_conductor_resume.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_clean')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
+ autospec=True)
def test__check_node_raid_jobs_with_multiple_jobs_completed_in_clean(
self, mock_notify_conductor_resume):
self.node.clean_step = {'foo': 'bar'}
@@ -334,7 +339,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self._test__check_node_raid_jobs_with_multiple_jobs_completed(
mock_notify_conductor_resume)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ autospec=True)
def test__check_node_raid_jobs_with_multiple_jobs_completed_in_deploy(
self, mock_notify_conductor_resume):
self._test__check_node_raid_jobs_with_multiple_jobs_completed(
@@ -384,7 +390,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
task.process_event.assert_called_once_with('fail')
self.assertFalse(mock_notify_conductor_resume.called)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_clean')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_clean',
+ autospec=True)
def test__check_node_raid_jobs_with_multiple_jobs_failed_in_clean(
self, mock_notify_conductor_resume):
self.node.clean_step = {'foo': 'bar'}
@@ -392,7 +399,8 @@ class DracPeriodicTaskTestCase(db_base.DbTestCase):
self._test__check_node_raid_jobs_with_multiple_jobs_failed(
mock_notify_conductor_resume)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy')
+ @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ autospec=True)
def test__check_node_raid_jobs_with_multiple_jobs_failed_in_deploy(
self, mock_notify_conductor_resume):
self._test__check_node_raid_jobs_with_multiple_jobs_failed(
diff --git a/ironic/tests/unit/drivers/modules/drac/test_power.py b/ironic/tests/unit/drivers/modules/drac/test_power.py
index 4c442ba6f..aeb3c038e 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_power.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_power.py
@@ -15,9 +15,10 @@
Test class for DRAC power interface
"""
+from unittest import mock
+
from dracclient import constants as drac_constants
from dracclient import exceptions as drac_exceptions
-import mock
from ironic.common import exception
from ironic.common import states
@@ -68,7 +69,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
mock_client.get_power_state.assert_called_once_with()
- @mock.patch.object(drac_power.LOG, 'warning')
+ @mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_set_power_state(self, mock_log, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
@@ -94,7 +95,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
- @mock.patch.object(drac_power.LOG, 'warning')
+ @mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_set_power_state_timeout(self, mock_log, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
@@ -107,7 +108,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
mock_client.set_power_state.assert_called_once_with(drac_power_state)
self.assertTrue(mock_log.called)
- @mock.patch.object(drac_power.LOG, 'warning')
+ @mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_reboot_while_powered_on(self, mock_log, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_ON
@@ -120,7 +121,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
mock_client.set_power_state.assert_called_once_with(drac_power_state)
self.assertFalse(mock_log.called)
- @mock.patch.object(drac_power.LOG, 'warning')
+ @mock.patch.object(drac_power.LOG, 'warning', autospec=True)
def test_reboot_while_powered_on_timeout(self, mock_log,
mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
@@ -145,7 +146,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_ON]
mock_client.set_power_state.assert_called_once_with(drac_power_state)
- @mock.patch('time.sleep')
+ @mock.patch('time.sleep', autospec=True)
def test_reboot_retries_success(self, mock_sleep, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_OFF
@@ -163,7 +164,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
[mock.call(drac_power_state),
mock.call(drac_power_state)])
- @mock.patch('time.sleep')
+ @mock.patch('time.sleep', autospec=True)
def test_reboot_retries_fail(self, mock_sleep, mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
mock_client.get_power_state.return_value = drac_constants.POWER_OFF
@@ -179,7 +180,7 @@ class DracPowerTestCase(test_utils.BaseDracTest):
self.assertEqual(drac_power.POWER_STATE_TRIES,
mock_client.set_power_state.call_count)
- @mock.patch('time.sleep')
+ @mock.patch('time.sleep', autospec=True)
def test_reboot_retries_power_change_success(self, mock_sleep,
mock_get_drac_client):
mock_client = mock_get_drac_client.return_value
diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py
index 1cb50f129..48f38a09b 100644
--- a/ironic/tests/unit/drivers/modules/drac/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py
@@ -15,13 +15,16 @@
Test class for DRAC RAID interface
"""
+from collections import defaultdict
+from unittest import mock
+
from dracclient import constants
from dracclient import exceptions as drac_exceptions
-import mock
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
+from ironic.drivers import base
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import job as drac_job
from ironic.drivers.modules.drac import raid as drac_raid
@@ -285,6 +288,33 @@ class DracManageVirtualDisksTestCase(test_utils.BaseDracTest):
@mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
autospec=True)
+ def test_set_raid_settings(self, mock_validate_job_queue,
+ mock_get_drac_client):
+ mock_client = mock.Mock()
+ mock_get_drac_client.return_value = mock_client
+ controller_fqdd = "RAID.Integrated.1-1"
+ raid_cntrl_attr = "RAID.Integrated.1-1:RAIDRequestedControllerMode"
+ raid_settings = {raid_cntrl_attr: 'RAID'}
+ drac_raid.set_raid_settings(self.node, controller_fqdd, raid_settings)
+
+ mock_validate_job_queue.assert_called_once_with(
+ self.node)
+ mock_client.set_raid_settings.assert_called_once_with(
+ controller_fqdd, raid_settings)
+
+ @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
+ autospec=True)
+ def test_list_raid_settings(self, mock_validate_job_queue,
+ mock_get_drac_client):
+ mock_client = mock.Mock()
+ mock_get_drac_client.return_value = mock_client
+ drac_raid.list_raid_settings(self.node)
+ mock_validate_job_queue.assert_called_once_with(
+ self.node)
+ mock_client.list_raid_settings.assert_called_once_with()
+
+ @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
+ autospec=True)
def test_change_physical_disk_state(self,
mock_validate_job_queue,
mock_get_drac_client):
@@ -388,6 +418,7 @@ class DracManageVirtualDisksTestCase(test_utils.BaseDracTest):
mock_get_drac_client):
controllers = [{'is_reboot_required': 'true',
'is_commit_required': True,
+ 'is_ehba_mode': False,
'raid_controller': 'AHCI.Slot.3-1'}]
substep = "delete_foreign_config"
@@ -1064,6 +1095,7 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
autospec=True)
@mock.patch.object(drac_raid, '_reset_raid_config', autospec=True)
@mock.patch.object(drac_raid, 'list_virtual_disks', autospec=True)
+ @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True)
@mock.patch.object(drac_raid, 'list_physical_disks', autospec=True)
@mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True,
autospec=True)
@@ -1076,6 +1108,7 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
mock_validate_job_queue,
mock_change_physical_disk_state,
mock_list_physical_disks,
+ mock_list_raid_settings,
mock_list_virtual_disks,
mock__reset_raid_config,
mock_get_drac_client):
@@ -1096,6 +1129,18 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
'supports_realtime': True}
raid_controller = test_utils.make_raid_controller(
raid_controller_dict)
+
+ raid_attr = "RAID.Integrated.1-1:RAIDCurrentControllerMode"
+ raid_controller_config = {
+ 'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode',
+ 'current_value': ['RAID'],
+ 'read_only': True,
+ 'name': 'RAIDCurrentControllerMode',
+ 'possible_values': ['RAID', 'Enhanced HBA']}
+ raid_cntrl_settings = {
+ raid_attr: test_utils.create_raid_setting(raid_controller_config)}
+
+ mock_list_raid_settings.return_value = raid_cntrl_settings
mock_list_physical_disks.return_value = physical_disks
mock_commit_config.side_effect = ['12']
mock_client.list_raid_controllers.return_value = [raid_controller]
@@ -1805,6 +1850,7 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
autospec=True)
@mock.patch.object(drac_raid, '_reset_raid_config', autospec=True)
@mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True)
+ @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True)
@mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid, 'commit_config', spec_set=True,
@@ -1812,11 +1858,23 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
def _test_delete_configuration(self, expected_state,
mock_commit_config,
mock_validate_job_queue,
+ mock_list_raid_settings,
mock_list_raid_controllers,
mock__reset_raid_config,
mock_get_drac_client):
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
+ raid_attr = "RAID.Integrated.1-1:RAIDCurrentControllerMode"
+ raid_controller_config = {
+ 'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode',
+ 'current_value': ['RAID'],
+ 'read_only': True,
+ 'name': 'RAIDCurrentControllerMode',
+ 'possible_values': ['RAID', 'Enhanced HBA']}
+
+ raid_cntrl_settings = {
+ raid_attr: test_utils.create_raid_setting(raid_controller_config)}
+
raid_controller_dict = {
'id': 'RAID.Integrated.1-1',
'description': 'Integrated RAID Controller 1',
@@ -1829,6 +1887,7 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
mock_list_raid_controllers.return_value = [
test_utils.make_raid_controller(raid_controller_dict)]
+ mock_list_raid_settings.return_value = raid_cntrl_settings
mock_commit_config.return_value = '42'
mock__reset_raid_config.return_value = {
'is_reboot_required': constants.RebootRequired.optional,
@@ -1858,16 +1917,17 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True)
+ @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True)
@mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid, 'commit_config', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid, '_reset_raid_config', spec_set=True,
autospec=True)
- def test_delete_configuration_with_non_realtime_controller(
+ def test_delete_configuration_with_mix_realtime_controller_in_raid_mode(
self, mock__reset_raid_config, mock_commit_config,
- mock_validate_job_queue, mock_list_raid_controllers,
- mock_get_drac_client):
+ mock_validate_job_queue, mock_list_raid_settings,
+ mock_list_raid_controllers, mock_get_drac_client):
mock_client = mock.Mock()
mock_get_drac_client.return_value = mock_client
expected_raid_config_params = ['AHCI.Slot.3-1', 'RAID.Integrated.1-1']
@@ -1892,6 +1952,25 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
test_utils.make_raid_controller(controller) for
controller in mix_controllers]
+ raid_controller_config = [
+ {'id': 'AHCI.Slot.3-1:RAIDCurrentControllerMode',
+ 'current_value': ['RAID'],
+ 'read_only': True,
+ 'name': 'RAIDCurrentControllerMode',
+ 'possible_values': ['RAID', 'Enhanced HBA']},
+ {'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode',
+ 'current_value': ['RAID'],
+ 'read_only': True,
+ 'name': 'RAIDCurrentControllerMode',
+ 'possible_values': ['RAID', 'Enhanced HBA']}]
+
+ raid_settings = defaultdict()
+ for sett in raid_controller_config:
+ raid_settings[sett.get('id')] = test_utils.create_raid_setting(
+ sett)
+
+ mock_list_raid_settings.return_value = raid_settings
+
mock_commit_config.side_effect = ['42', '12']
mock__reset_raid_config.side_effect = [{
'is_reboot_required': constants.RebootRequired.optional,
@@ -1923,6 +2002,97 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
@mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True)
+ @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True)
+ @mock.patch.object(drac_job, 'list_unfinished_jobs', autospec=True)
+ @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
+ autospec=True)
+ @mock.patch.object(drac_raid, 'set_raid_settings', autospec=True)
+ @mock.patch.object(drac_raid, 'commit_config', spec_set=True,
+ autospec=True)
+ @mock.patch.object(drac_raid, '_reset_raid_config', spec_set=True,
+ autospec=True)
+ def test_delete_configuration_with_mix_realtime_controller_in_ehba_mode(
+ self, mock__reset_raid_config, mock_commit_config,
+ mock_set_raid_settings, mock_validate_job_queue,
+ mock_list_unfinished_jobs, mock_list_raid_settings,
+ mock_list_raid_controllers, mock_get_drac_client):
+ mock_client = mock.Mock()
+ mock_get_drac_client.return_value = mock_client
+ expected_raid_config_params = ['RAID.Integrated.1-1', 'AHCI.Slot.3-1']
+ mix_controllers = [{'id': 'RAID.Integrated.1-1',
+ 'description': 'Integrated RAID Controller 1',
+ 'manufacturer': 'DELL',
+ 'model': 'PERC H740 Mini',
+ 'primary_status': 'unknown',
+ 'firmware_version': '50.5.0-1750',
+ 'bus': '3C',
+ 'supports_realtime': True},
+ {'id': 'AHCI.Slot.3-1',
+ 'description': 'AHCI controller in slot 3',
+ 'manufacturer': 'DELL',
+ 'model': 'BOSS-S1',
+ 'primary_status': 'unknown',
+ 'firmware_version': '2.5.13.3016',
+ 'bus': '5E',
+ 'supports_realtime': False}]
+
+ mock_list_raid_controllers.return_value = [
+ test_utils.make_raid_controller(controller) for
+ controller in mix_controllers]
+ raid_controller_config = [
+ {'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode',
+ 'current_value': ['Enhanced HBA'],
+ 'read_only': True,
+ 'name': 'RAIDCurrentControllerMode',
+ 'possible_values': ['RAID', 'Enhanced HBA']},
+ {'id': 'AHCI.Slot.3-1:RAIDCurrentControllerMode',
+ 'current_value': ['RAID'],
+ 'read_only': True,
+ 'name': 'RAIDCurrentControllerMode',
+ 'possible_values': ['RAID', 'Enhanced HBA']}]
+
+ raid_settings = defaultdict()
+ for sett in raid_controller_config:
+ raid_settings[sett.get('id')] = test_utils.create_raid_setting(
+ sett)
+
+ mock_list_raid_settings.return_value = raid_settings
+ mock_list_unfinished_jobs.return_value = []
+ mock_commit_config.side_effect = ['42', '12', '13']
+ mock__reset_raid_config.side_effect = [{
+ 'is_reboot_required': constants.RebootRequired.optional,
+ 'is_commit_required': True
+ }, {
+ 'is_reboot_required': constants.RebootRequired.true,
+ 'is_commit_required': True
+ }]
+ mock_set_raid_settings.return_value = {
+ 'is_reboot_required': constants.RebootRequired.true,
+ 'is_commit_required': True}
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ return_value = task.driver.raid.delete_configuration(task)
+ mock_commit_config.assert_has_calls(
+ [mock.call(mock.ANY, raid_controller='RAID.Integrated.1-1',
+ reboot=False, realtime=True),
+ mock.call(mock.ANY, raid_controller='AHCI.Slot.3-1',
+ reboot=False, realtime=False),
+ mock.call(mock.ANY, raid_controller='RAID.Integrated.1-1',
+ reboot=True, realtime=False)],
+ any_order=True)
+
+ self.assertEqual(states.CLEANWAIT, return_value)
+ self.node.refresh()
+ self.assertEqual(expected_raid_config_params,
+ self.node.driver_internal_info[
+ 'raid_config_parameters'])
+ self.assertEqual(['42', '12', '13'],
+ self.node.driver_internal_info['raid_config_job_ids'])
+
+ @mock.patch.object(drac_common, 'get_drac_client', spec_set=True,
+ autospec=True)
+ @mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True)
@mock.patch.object(drac_job, 'validate_job_queue', spec_set=True,
autospec=True)
@mock.patch.object(drac_raid, 'commit_config', spec_set=True,
@@ -2055,3 +2225,17 @@ class DracRaidInterfaceTestCase(test_utils.BaseDracTest):
mock_commit_config.assert_called_once_with(
self.node, raid_controller='RAID.Integrated.1-1', reboot=False,
realtime=True)
+
+ @mock.patch.object(base.RAIDInterface, 'apply_configuration',
+ autospec=True)
+ def test_apply_configuration(self, mock_apply_configuration):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.raid.apply_configuration(
+ task, self.target_raid_configuration,
+ create_root_volume=False, create_nonroot_volumes=True,
+ delete_existing=False)
+
+ mock_apply_configuration.assert_called_once_with(
+ task.driver.raid, task,
+ self.target_raid_configuration, False, True, False)
diff --git a/ironic/tests/unit/drivers/modules/drac/utils.py b/ironic/tests/unit/drivers/modules/drac/utils.py
index 44b92e77b..bc248b237 100644
--- a/ironic/tests/unit/drivers/modules/drac/utils.py
+++ b/ironic/tests/unit/drivers/modules/drac/utils.py
@@ -96,3 +96,8 @@ def make_physical_disk(physical_disk_dict):
tuple_class = dracclient_raid.PhysicalDisk if dracclient_raid else None
return dict_to_namedtuple(values=physical_disk_dict,
tuple_class=tuple_class)
+
+
+def create_raid_setting(raid_settings_dict):
+ """Returns the raid configuration tuple object"""
+ return dict_to_namedtuple(values=raid_settings_dict)
diff --git a/ironic/tests/unit/drivers/modules/ibmc/base.py b/ironic/tests/unit/drivers/modules/ibmc/base.py
index cb337207e..9a282b1cb 100644
--- a/ironic/tests/unit/drivers/modules/ibmc/base.py
+++ b/ironic/tests/unit/drivers/modules/ibmc/base.py
@@ -12,7 +12,7 @@
# under the License.
"""Test base class for iBMC Driver."""
-import mock
+from unittest import mock
from ironic.drivers.modules.ibmc import utils
from ironic.tests.unit.db import base as db_base
@@ -28,7 +28,8 @@ class IBMCTestCase(db_base.DbTestCase):
self.config(enabled_hardware_types=['ibmc'],
enabled_power_interfaces=['ibmc'],
enabled_management_interfaces=['ibmc'],
- enabled_vendor_interfaces=['ibmc'])
+ enabled_vendor_interfaces=['ibmc'],
+ enabled_raid_interfaces=['ibmc'])
self.node = obj_utils.create_test_node(
self.context, driver='ibmc', driver_info=self.driver_info)
self.ibmc = utils.parse_driver_info(self.node)
diff --git a/ironic/tests/unit/drivers/modules/ibmc/test_management.py b/ironic/tests/unit/drivers/modules/ibmc/test_management.py
index d45a23304..b4fe14040 100644
--- a/ironic/tests/unit/drivers/modules/ibmc/test_management.py
+++ b/ironic/tests/unit/drivers/modules/ibmc/test_management.py
@@ -13,8 +13,8 @@
"""Test class for iBMC Management interface."""
import itertools
+from unittest import mock
-import mock
from oslo_utils import importutils
from ironic.common import boot_devices
diff --git a/ironic/tests/unit/drivers/modules/ibmc/test_power.py b/ironic/tests/unit/drivers/modules/ibmc/test_power.py
index d7d68a704..681853a94 100644
--- a/ironic/tests/unit/drivers/modules/ibmc/test_power.py
+++ b/ironic/tests/unit/drivers/modules/ibmc/test_power.py
@@ -12,7 +12,8 @@
# under the License.
"""Test class for iBMC Power interface."""
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/ibmc/test_raid.py b/ironic/tests/unit/drivers/modules/ibmc/test_raid.py
new file mode 100644
index 000000000..fd66b8fe2
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/ibmc/test_raid.py
@@ -0,0 +1,167 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test class for iBMC RAID interface."""
+
+from unittest import mock
+
+from oslo_utils import importutils
+
+from ironic.common import exception
+from ironic.conductor import task_manager
+from ironic.drivers.modules.ilo import raid as ilo_raid
+from ironic.tests.unit.db import utils as db_utils
+from ironic.tests.unit.drivers.modules.ibmc import base
+
+constants = importutils.try_import('ibmc_client.constants')
+ibmc_client = importutils.try_import('ibmc_client')
+ibmc_error = importutils.try_import('ibmc_client.exceptions')
+
+INFO_DICT = db_utils.get_test_ilo_info()
+
+
+class IbmcRAIDTestCase(base.IBMCTestCase):
+
+ def setUp(self):
+ super(IbmcRAIDTestCase, self).setUp()
+ self.driver = mock.Mock(raid=ilo_raid.Ilo5RAID())
+ self.target_raid_config = {
+ "logical_disks": [
+ {
+ 'size_gb': 200,
+ 'raid_level': 0,
+ 'is_root_volume': True
+ },
+ {
+ 'size_gb': 'MAX',
+ 'raid_level': 5
+ }
+ ]
+ }
+ self.node.target_raid_config = self.target_raid_config
+ self.node.save()
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_create_configuration_without_delete(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.apply_raid_configuration.return_value = None
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ result = task.driver.raid.create_configuration(
+ task, create_root_volume=True, create_nonroot_volumes=True,
+ delete_existing=False)
+ self.assertIsNone(result, "synchronous create raid configuration "
+ "should return None")
+
+ conn.system.storage.apply_raid_configuration.assert_called_once_with(
+ self.node.target_raid_config.get('logical_disks')
+ )
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_create_configuration_with_delete(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.delete_all_raid_configuration.return_value = None
+ conn.system.storage.apply_raid_configuration.return_value = None
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ result = task.driver.raid.create_configuration(
+ task, create_root_volume=True, create_nonroot_volumes=True,
+ delete_existing=True)
+ self.assertIsNone(result, "synchronous create raid configuration "
+ "should return None")
+
+ conn.system.storage.delete_all_raid_configuration.assert_called_once()
+ conn.system.storage.apply_raid_configuration.assert_called_once_with(
+ self.node.target_raid_config.get('logical_disks')
+ )
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_create_configuration_without_nonroot(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.delete_all_raid_configuration.return_value = None
+ conn.system.storage.apply_raid_configuration.return_value = None
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ result = task.driver.raid.create_configuration(
+ task, create_root_volume=True, create_nonroot_volumes=False,
+ delete_existing=True)
+ self.assertIsNone(result, "synchronous create raid configuration "
+ "should return None")
+
+ conn.system.storage.delete_all_raid_configuration.assert_called_once()
+ conn.system.storage.apply_raid_configuration.assert_called_once_with(
+ [{'size_gb': 200, 'raid_level': 0, 'is_root_volume': True}]
+ )
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_create_configuration_without_root(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.delete_all_raid_configuration.return_value = None
+ conn.system.storage.apply_raid_configuration.return_value = None
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ result = task.driver.raid.create_configuration(
+ task, create_root_volume=False, create_nonroot_volumes=True,
+ delete_existing=True)
+ self.assertIsNone(result, "synchronous create raid configuration "
+ "should return None")
+
+ conn.system.storage.delete_all_raid_configuration.assert_called_once()
+ conn.system.storage.apply_raid_configuration.assert_called_once_with(
+ [{'size_gb': 'MAX', 'raid_level': 5}]
+ )
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_create_configuration_failed(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.delete_all_raid_configuration.return_value = None
+ conn.system.storage.apply_raid_configuration.side_effect = (
+ ibmc_error.IBMCClientError
+ )
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.assertRaisesRegex(
+ exception.IBMCError, 'create iBMC RAID configuration',
+ task.driver.raid.create_configuration, task,
+ create_root_volume=True, create_nonroot_volumes=True,
+ delete_existing=True)
+
+ conn.system.storage.delete_all_raid_configuration.assert_called_once()
+ conn.system.storage.apply_raid_configuration.assert_called_once_with(
+ self.node.target_raid_config.get('logical_disks')
+ )
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_delete_configuration_success(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.delete_all_raid_configuration.return_value = None
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ result = task.driver.raid.delete_configuration(task)
+ self.assertIsNone(result, "synchronous delete raid configuration "
+ "should return None")
+
+ conn.system.storage.delete_all_raid_configuration.assert_called_once()
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_sync_delete_configuration_failed(self, connect_ibmc):
+ conn = self.mock_ibmc_conn(connect_ibmc)
+ conn.system.storage.delete_all_raid_configuration.side_effect = (
+ ibmc_error.IBMCClientError
+ )
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.assertRaisesRegex(
+ exception.IBMCError, 'delete iBMC RAID configuration',
+ task.driver.raid.delete_configuration, task)
+
+ conn.system.storage.delete_all_raid_configuration.assert_called_once()
diff --git a/ironic/tests/unit/drivers/modules/ibmc/test_utils.py b/ironic/tests/unit/drivers/modules/ibmc/test_utils.py
index 87df0e6e0..f04d37e88 100644
--- a/ironic/tests/unit/drivers/modules/ibmc/test_utils.py
+++ b/ironic/tests/unit/drivers/modules/ibmc/test_utils.py
@@ -14,8 +14,8 @@
import copy
import os
+from unittest import mock
-import mock
from oslo_utils import importutils
from ironic.common import exception
@@ -144,8 +144,8 @@ class IBMCUtilsTestCase(base.IBMCTestCase):
conn = self.mock_ibmc_conn(connect_ibmc)
# Mocks
conn.system.get.side_effect = [
- ibmc_error.ConnectionError(url=self.ibmc['address'],
- error='Failed to connect to host'),
+ ibmc_error.IBMCConnectionError(url=self.ibmc['address'],
+ error='Failed to connect to host'),
mock.PropertyMock(
boot_source_override=mock.PropertyMock(
target=constants.BOOT_SOURCE_TARGET_PXE,
diff --git a/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py b/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py
index f4e6e99d3..71ba47a7d 100644
--- a/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py
+++ b/ironic/tests/unit/drivers/modules/ibmc/test_vendor.py
@@ -12,7 +12,8 @@
# under the License.
"""Test class for iBMC vendor interface."""
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.conductor import task_manager
@@ -56,6 +57,23 @@ class IBMCVendorTestCase(base.IBMCTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
seq = task.driver.vendor.boot_up_seq(task)
- conn.system.get.assert_called_once()
+ conn.system.get.assert_called_once_with()
connect_ibmc.assert_called_once_with(**self.ibmc)
self.assertEqual(expected, seq)
+
+ @mock.patch.object(ibmc_client, 'connect', autospec=True)
+ def test_list_raid_controller(self, connect_ibmc):
+ # Mocks
+ conn = self.mock_ibmc_conn(connect_ibmc)
+
+ ctrl = mock.Mock()
+ summary = ctrl.summary.return_value
+ conn.system.storage.list.return_value = [ctrl]
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ summries = task.driver.vendor.get_raid_controller_list(task)
+ ctrl.summary.assert_called_once_with()
+ conn.system.storage.list.assert_called_once_with()
+ connect_ibmc.assert_called_once_with(**self.ibmc)
+ self.assertEqual([summary], summries)
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_bios.py b/ironic/tests/unit/drivers/modules/ilo/test_bios.py
index f3c69a752..b65df2f88 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_bios.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_bios.py
@@ -15,7 +15,8 @@
"""Test class for IloPower module."""
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import importutils
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_boot.py b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
index 056467ad2..f348dd52a 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_boot.py
@@ -17,9 +17,9 @@
import io
import tempfile
+from unittest import mock
from ironic_lib import utils as ironic_utils
-import mock
from oslo_config import cfg
from ironic.common import boot_devices
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_common.py b/ironic/tests/unit/drivers/modules/ilo/test_common.py
index 466f35a0c..d5f486aba 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_common.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_common.py
@@ -21,9 +21,9 @@ import io
import os
import shutil
import tempfile
+from unittest import mock
from ironic_lib import utils as ironic_utils
-import mock
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
@@ -436,6 +436,29 @@ class IloCommonMethodsTestCase(BaseIloTest):
get_ilo_object_mock.assert_called_once_with(self.node)
get_pending_boot_mode_mock.assert_called_once_with()
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_current_boot_mode(self, get_ilo_object_mock):
+ ilo_object_mock = get_ilo_object_mock.return_value
+ get_current_boot_mode_mock = ilo_object_mock.get_current_boot_mode
+ get_current_boot_mode_mock.return_value = 'LEGACY'
+ ret = ilo_common.get_current_boot_mode(self.node)
+ self.assertEqual('bios', ret)
+ get_ilo_object_mock.assert_called_once_with(self.node)
+ get_current_boot_mode_mock.assert_called_once_with()
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_current_boot_mode_fail(self, get_ilo_object_mock):
+ ilo_object_mock = get_ilo_object_mock.return_value
+ get_current_boot_mode_mock = ilo_object_mock.get_current_boot_mode
+ exc = ilo_error.IloError('error')
+ get_current_boot_mode_mock.side_effect = exc
+ self.assertRaises(exception.IloOperationError,
+ ilo_common.get_current_boot_mode, self.node)
+ get_ilo_object_mock.assert_called_once_with(self.node)
+ get_current_boot_mode_mock.assert_called_once_with()
+
@mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
autospec=True)
def test_update_boot_mode_instance_info_exists(self,
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_console.py b/ironic/tests/unit/drivers/modules/ilo/test_console.py
index 2db07c0b2..082a39e78 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_console.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_console.py
@@ -15,7 +15,7 @@
"""Test class for common methods used by iLO modules."""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_firmware_processor.py b/ironic/tests/unit/drivers/modules/ilo/test_firmware_processor.py
index 89aa96f9f..2182e2b9a 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_firmware_processor.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_firmware_processor.py
@@ -16,9 +16,10 @@
import builtins
import io
+from unittest import mock
from urllib import parse as urlparse
-import mock
+
from oslo_utils import importutils
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_inspect.py b/ironic/tests/unit/drivers/modules/ilo/test_inspect.py
index deb3f2e2d..a10c34bd9 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_inspect.py
@@ -15,7 +15,7 @@
"""Test class for Management Interface used by iLO modules."""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_management.py b/ironic/tests/unit/drivers/modules/ilo/test_management.py
index fd59abbce..26b683058 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_management.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_management.py
@@ -14,11 +14,14 @@
"""Test class for Management Interface used by iLO modules."""
-import mock
+from unittest import mock
+
+import ddt
from oslo_utils import importutils
from oslo_utils import uuidutils
from ironic.common import boot_devices
+from ironic.common import boot_modes
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
@@ -40,6 +43,7 @@ ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
+@ddt.ddt
class IloManagementTestCase(test_common.BaseIloTest):
def setUp(self):
@@ -823,177 +827,388 @@ class IloManagementTestCase(test_common.BaseIloTest):
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
autospec=True)
- @mock.patch.object(agent_base, 'execute_clean_step', autospec=True)
- def test_update_firmware_sum_mode_with_component(
- self, execute_mock, attach_vmedia_mock):
+ @mock.patch.object(agent_base, 'execute_step', autospec=True)
+ def _test_write_firmware_sum_mode_with_component(
+ self, execute_mock, attach_vmedia_mock, step_type='clean'):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- execute_mock.return_value = states.CLEANWAIT
# | GIVEN |
firmware_update_args = {
'url': 'http://any_url',
'checksum': 'xxxx',
'component': ['CP02345.scexe', 'CP02567.exe']}
- clean_step = {'step': 'update_firmware',
- 'interface': 'management',
- 'args': firmware_update_args}
- task.node.clean_step = clean_step
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ step['step'] = 'update_firmware_sum'
+ task.node.provision_state = states.CLEANING
+ execute_mock.return_value = states.CLEANWAIT
+ task.node.clean_step = step
+ func = task.driver.management.update_firmware_sum
+ exp_ret_state = states.CLEANWAIT
+ else:
+ step['step'] = 'flash_firmware_sum'
+ task.node.provision_state = states.DEPLOYING
+ execute_mock.return_value = states.DEPLOYWAIT
+ task.node.deploy_step = step
+ func = task.driver.management.flash_firmware_sum
+ exp_ret_state = states.DEPLOYWAIT
# | WHEN |
- return_value = task.driver.management.update_firmware_sum(
- task, **firmware_update_args)
+ return_value = func(task, **firmware_update_args)
# | THEN |
attach_vmedia_mock.assert_any_call(
task.node, 'CDROM', 'http://any_url')
- self.assertEqual(states.CLEANWAIT, return_value)
- execute_mock.assert_called_once_with(task, clean_step)
+ self.assertEqual(exp_ret_state, return_value)
+ execute_mock.assert_called_once_with(task, step, step_type)
+
+ def test_update_firmware_sum_mode_with_component(self):
+ self._test_write_firmware_sum_mode_with_component(step_type='clean')
+
+ def test_flash_firmware_sum_mode_with_component(self):
+ self._test_write_firmware_sum_mode_with_component(step_type='deploy')
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
autospec=True)
@mock.patch.object(ilo_management.firmware_processor,
'get_swift_url', autospec=True)
- @mock.patch.object(agent_base, 'execute_clean_step', autospec=True)
- def test_update_firmware_sum_mode_swift_url(
- self, execute_mock, swift_url_mock, attach_vmedia_mock):
+ @mock.patch.object(agent_base, 'execute_step', autospec=True)
+ def _test_write_firmware_sum_mode_swift_url(
+ self, execute_mock, swift_url_mock, attach_vmedia_mock,
+ step_type='clean'):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- swift_url_mock.return_value = "http://path-to-file"
- execute_mock.return_value = states.CLEANWAIT
# | GIVEN |
+ swift_url_mock.return_value = "http://path-to-file"
firmware_update_args = {
'url': 'swift://container/object',
'checksum': 'xxxx',
'components': ['CP02345.scexe', 'CP02567.exe']}
- clean_step = {'step': 'update_firmware',
- 'interface': 'management',
- 'args': firmware_update_args}
- task.node.clean_step = clean_step
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ task.node.provision_state = states.CLEANING
+ execute_mock.return_value = states.CLEANWAIT
+ step['step'] = 'update_firmware_sum',
+ task.node.clean_step = step
+ func = task.driver.management.update_firmware_sum
+ exp_ret_state = states.CLEANWAIT
+ args_data = task.node.clean_step['args']
+ else:
+ task.node.provision_state = states.DEPLOYING
+ execute_mock.return_value = states.DEPLOYWAIT
+ step['step'] = 'flash_firmware_sum',
+ task.node.deploy_step = step
+ func = task.driver.management.flash_firmware_sum
+ exp_ret_state = states.DEPLOYWAIT
+ args_data = task.node.deploy_step['args']
# | WHEN |
- return_value = task.driver.management.update_firmware_sum(
- task, **firmware_update_args)
+ return_value = func(task, **firmware_update_args)
# | THEN |
attach_vmedia_mock.assert_any_call(
task.node, 'CDROM', 'http://path-to-file')
- self.assertEqual(states.CLEANWAIT, return_value)
- self.assertEqual(task.node.clean_step['args']['url'],
- "http://path-to-file")
+ self.assertEqual(exp_ret_state, return_value)
+ self.assertEqual(args_data['url'], "http://path-to-file")
+
+ def test_write_firmware_sum_mode_swift_url_clean(self):
+ self._test_write_firmware_sum_mode_swift_url(step_type='clean')
+
+ def test_write_firmware_sum_mode_swift_url_deploy(self):
+ self._test_write_firmware_sum_mode_swift_url(step_type='deploy')
@mock.patch.object(ilo_common, 'attach_vmedia', spec_set=True,
autospec=True)
- @mock.patch.object(agent_base, 'execute_clean_step', autospec=True)
- def test_update_firmware_sum_mode_without_component(
- self, execute_mock, attach_vmedia_mock):
+ @mock.patch.object(agent_base, 'execute_step', autospec=True)
+ def _test_write_firmware_sum_mode_without_component(
+ self, execute_mock, attach_vmedia_mock, step_type='clean'):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- execute_mock.return_value = states.CLEANWAIT
# | GIVEN |
firmware_update_args = {
'url': 'any_valid_url',
'checksum': 'xxxx'}
- clean_step = {'step': 'update_firmware',
- 'interface': 'management',
- 'args': firmware_update_args}
- task.node.clean_step = clean_step
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ task.node.provision_state = states.CLEANING
+ execute_mock.return_value = states.CLEANWAIT
+ step['step'] = 'update_firmware_sum'
+ task.node.clean_step = step
+ func = task.driver.management.update_firmware_sum
+ exp_ret_state = states.CLEANWAIT
+ else:
+ task.node.provision_state = states.DEPLOYING
+ execute_mock.return_value = states.DEPLOYWAIT
+ step['step'] = 'flash_firmware_sum'
+ task.node.deploy_step = step
+ func = task.driver.management.flash_firmware_sum
+ exp_ret_state = states.DEPLOYWAIT
# | WHEN |
- return_value = task.driver.management.update_firmware_sum(
- task, **firmware_update_args)
+ return_value = func(task, **firmware_update_args)
# | THEN |
attach_vmedia_mock.assert_any_call(
task.node, 'CDROM', 'any_valid_url')
- self.assertEqual(states.CLEANWAIT, return_value)
- execute_mock.assert_called_once_with(task, clean_step)
-
- def test_update_firmware_sum_mode_invalid_component(self):
+ self.assertEqual(exp_ret_state, return_value)
+ execute_mock.assert_called_once_with(task, step, step_type)
+
+ def test_write_firmware_sum_mode_without_component_clean(self):
+ self._test_write_firmware_sum_mode_without_component(
+ step_type='clean')
+
+ def test_write_firmware_sum_mode_without_component_deploy(self):
+ self._test_write_firmware_sum_mode_without_component(
+ step_type='deploy')
+
+ def _test_write_firmware_sum_mode_invalid_component(self,
+ step_type='clean'):
+ # | GIVEN |
+ firmware_update_args = {
+ 'url': 'any_valid_url',
+ 'checksum': 'xxxx',
+ 'components': ['CP02345']}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- # | GIVEN |
- firmware_update_args = {
- 'url': 'any_valid_url',
- 'checksum': 'xxxx',
- 'components': ['CP02345']}
# | WHEN & THEN |
+ if step_type == 'clean':
+ func = task.driver.management.update_firmware_sum
+ else:
+ func = task.driver.management.flash_firmware_sum
self.assertRaises(exception.InvalidParameterValue,
- task.driver.management.update_firmware_sum,
- task,
- **firmware_update_args)
+ func, task, **firmware_update_args)
+
+ def test_write_firmware_sum_mode_invalid_component_clean(self):
+ self._test_write_firmware_sum_mode_invalid_component(
+ step_type='clean')
+
+ def test_write_firmware_sum_mode_invalid_component_deploy(self):
+ self._test_write_firmware_sum_mode_invalid_component(
+ step_type='deploy')
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
- def test__update_firmware_sum_final_with_logs(self, store_mock):
+ def _test__write_firmware_sum_final_with_logs(self, store_mock,
+ step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
- command = {'command_status': 'SUCCEEDED',
- 'command_result': {
- 'clean_result': {'Log Data': 'aaaabbbbcccdddd'}}
- }
+ firmware_update_args = {
+ 'url': 'any_valid_url',
+ 'checksum': 'xxxx'}
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ step['step'] = 'update_firmware_sum'
+ node_state = states.CLEANWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'clean_step': step,
+ }
+ }
+ exp_label = 'update_firmware_sum'
+ else:
+ step['step'] = 'flash_firmware_sum'
+ node_state = states.DEPLOYWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'deploy_step': step,
+ }
+ }
+ exp_label = 'flash_firmware_sum'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
store_mock.assert_called_once_with(task.node, 'aaaabbbbcccdddd',
- label='update_firmware_sum')
+ label=exp_label)
+
+ def test__write_firmware_sum_final_with_logs_clean(self):
+ self._test__write_firmware_sum_final_with_logs(step_type='clean')
+
+ def test__write_firmware_sum_final_with_logs_deploy(self):
+ self._test__write_firmware_sum_final_with_logs(step_type='deploy')
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
- def test__update_firmware_sum_final_without_logs(self, store_mock):
+ def _test__write_firmware_sum_final_without_logs(self, store_mock,
+ step_type='clean'):
self.config(deploy_logs_collect='on_failure', group='agent')
- command = {'command_status': 'SUCCEEDED',
- 'command_result': {
- 'clean_result': {'Log Data': 'aaaabbbbcccdddd'}}
- }
+ firmware_update_args = {
+ 'url': 'any_valid_url',
+ 'checksum': 'xxxx'}
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ step['step'] = 'update_firmware_sum'
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'clean_step': step,
+ }
+ }
+ else:
+ step['step'] = 'flash_firmware_sum'
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'deploy_step': step,
+ }
+ }
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management._update_firmware_sum_final(
task, command)
self.assertFalse(store_mock.called)
+ def test__write_firmware_sum_final_without_logs_clean(self):
+ self._test__write_firmware_sum_final_without_logs(step_type='clean')
+
+ def test__write_firmware_sum_final_without_logs_deploy(self):
+ self._test__write_firmware_sum_final_without_logs(step_type='deploy')
+
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
- def test__update_firmware_sum_final_swift_error(self, store_mock,
- log_mock):
+ def _test__write_firmware_sum_final_swift_error(self, store_mock,
+ log_mock,
+ step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
- command = {'command_status': 'SUCCEEDED',
- 'command_result': {
- 'clean_result': {'Log Data': 'aaaabbbbcccdddd'}}
- }
+ firmware_update_args = {
+ 'url': 'any_valid_url',
+ 'checksum': 'xxxx'}
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ step['step'] = 'update_firmware_sum'
+ node_state = states.CLEANWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'clean_step': step,
+ }
+ }
+ else:
+ step['step'] = 'flash_firmware_sum'
+ node_state = states.DEPLOYWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'deploy_step': step,
+ }
+ }
store_mock.side_effect = exception.SwiftOperationError('Error')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
self.assertTrue(log_mock.error.called)
+ def test__write_firmware_sum_final_swift_error_clean(self):
+ self._test__write_firmware_sum_final_swift_error(step_type='clean')
+
+ def test__write_firmware_sum_final_swift_error_deploy(self):
+ self._test__write_firmware_sum_final_swift_error(step_type='deploy')
+
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
- def test__update_firmware_sum_final_environment_error(self, store_mock,
- log_mock):
+ def _test__write_firmware_sum_final_environment_error(self, store_mock,
+ log_mock,
+ step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
- command = {'command_status': 'SUCCEEDED',
- 'command_result': {
- 'clean_result': {'Log Data': 'aaaabbbbcccdddd'}}
- }
+ firmware_update_args = {
+ 'url': 'any_valid_url',
+ 'checksum': 'xxxx'}
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ step['step'] = 'update_firmware_sum'
+ node_state = states.CLEANWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'clean_step': step,
+ }
+ }
+ else:
+ step['step'] = 'flash_firmware_sum'
+ node_state = states.DEPLOYWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'deploy_step': step,
+ }
+ }
store_mock.side_effect = EnvironmentError('Error')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
self.assertTrue(log_mock.exception.called)
+ def test__write_firmware_sum_final_environment_error_clean(self):
+ self._test__write_firmware_sum_final_environment_error(
+ step_type='clean')
+
+ def test__write_firmware_sum_final_environment_error_deploy(self):
+ self._test__write_firmware_sum_final_environment_error(
+ step_type='deploy')
+
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs')
- def test__update_firmware_sum_final_unknown_exception(self, store_mock,
- log_mock):
+ def _test__write_firmware_sum_final_unknown_exception(self, store_mock,
+ log_mock,
+ step_type='clean'):
self.config(deploy_logs_collect='always', group='agent')
- command = {'command_status': 'SUCCEEDED',
- 'command_result': {
- 'clean_result': {'Log Data': 'aaaabbbbcccdddd'}}
- }
+ firmware_update_args = {
+ 'url': 'any_valid_url',
+ 'checksum': 'xxxx'}
+ step = {'interface': 'management',
+ 'args': firmware_update_args}
+ if step_type == 'clean':
+ step['step'] = 'update_firmware_sum'
+ node_state = states.CLEANWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'clean_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'clean_step': step,
+ }
+ }
+ else:
+ step['step'] = 'flash_firmware_sum'
+ node_state = states.DEPLOYWAIT
+ command = {
+ 'command_status': 'SUCCEEDED',
+ 'command_result': {
+ 'deploy_result': {'Log Data': 'aaaabbbbcccdddd'},
+ 'deploy_step': step,
+ }
+ }
store_mock.side_effect = Exception('Error')
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
+ task.node.provision_state = node_state
task.driver.management._update_firmware_sum_final(
task, command)
self.assertTrue(log_mock.exception.called)
+ def test__write_firmware_sum_final_unknown_exception_clean(self):
+ self._test__write_firmware_sum_final_unknown_exception(
+ step_type='clean')
+
+ def test__write_firmware_sum_final_unknown_exception_deploy(self):
+ self._test__write_firmware_sum_final_unknown_exception(
+ step_type='deploy')
+
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_iscsi_boot_target_with_auth(self, get_ilo_object_mock):
@@ -1180,6 +1395,88 @@ class IloManagementTestCase(test_common.BaseIloTest):
task.driver.management.inject_nmi,
task)
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ @ddt.data((ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY,
+ ['bios']),
+ (ilo_common.SUPPORTED_BOOT_MODE_UEFI_ONLY,
+ ['uefi']),
+ (ilo_common.SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI,
+ ['uefi', 'bios']))
+ @ddt.unpack
+ def test_get_supported_boot_modes(self, boot_modes_val,
+ exp_boot_modes,
+ get_ilo_object_mock):
+ ilo_object_mock = get_ilo_object_mock.return_value
+ ilo_object_mock.get_supported_boot_mode.return_value = boot_modes_val
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ supported_boot_modes = (
+ task.driver.management.get_supported_boot_modes(task))
+ self.assertEqual(exp_boot_modes, supported_boot_modes)
+
+ @mock.patch.object(ilo_common, 'set_boot_mode', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ilo_management.IloManagement,
+ 'get_supported_boot_modes',
+ spec_set=True, autospec=True)
+ def test_set_boot_mode(self, supp_boot_modes_mock,
+ set_boot_mode_mock):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ exp_boot_modes = [boot_modes.UEFI, boot_modes.LEGACY_BIOS]
+ supp_boot_modes_mock.return_value = exp_boot_modes
+
+ for mode in exp_boot_modes:
+ task.driver.management.set_boot_mode(task, mode=mode)
+ supp_boot_modes_mock.assert_called_once_with(mock.ANY, task)
+ set_boot_mode_mock.assert_called_once_with(task.node, mode)
+ set_boot_mode_mock.reset_mock()
+ supp_boot_modes_mock.reset_mock()
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ @mock.patch.object(ilo_management.IloManagement,
+ 'get_supported_boot_modes',
+ spec_set=True, autospec=True)
+ def test_set_boot_mode_fail(self, supp_boot_modes_mock,
+ get_ilo_object_mock):
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ ilo_mock_obj.get_pending_boot_mode.return_value = 'legacy'
+ exc = ilo_error.IloError('error')
+ ilo_mock_obj.set_pending_boot_mode.side_effect = exc
+ exp_boot_modes = [boot_modes.UEFI, boot_modes.LEGACY_BIOS]
+ supp_boot_modes_mock.return_value = exp_boot_modes
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaisesRegex(
+ exception.IloOperationError, 'uefi as boot mode failed',
+ task.driver.management.set_boot_mode, task, boot_modes.UEFI)
+ supp_boot_modes_mock.assert_called_once_with(mock.ANY, task)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_boot_mode(self, get_ilo_object_mock):
+ expected = 'bios'
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ ilo_mock_obj.get_current_boot_mode.return_value = 'LEGACY'
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ response = task.driver.management.get_boot_mode(task)
+ self.assertEqual(expected, response)
+
+ @mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
+ autospec=True)
+ def test_get_boot_mode_fail(self, get_ilo_object_mock):
+ ilo_mock_obj = get_ilo_object_mock.return_value
+ exc = ilo_error.IloError('error')
+ ilo_mock_obj.get_current_boot_mode.side_effect = exc
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaisesRegex(
+ exception.IloOperationError, 'Get current boot mode',
+ task.driver.management.get_boot_mode, task)
+
class Ilo5ManagementTestCase(db_base.DbTestCase):
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_power.py b/ironic/tests/unit/drivers/modules/ilo/test_power.py
index 3fe38da97..ded22bd6c 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_power.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_power.py
@@ -15,7 +15,8 @@
"""Test class for IloPower module."""
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_raid.py b/ironic/tests/unit/drivers/modules/ilo/test_raid.py
index 7e89c2160..6485b5d1e 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_raid.py
@@ -14,7 +14,8 @@
"""Test class for Raid Interface used by iLO5."""
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
index 2a9ebc61f..05a735d21 100644
--- a/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
+++ b/ironic/tests/unit/drivers/modules/ilo/test_vendor.py
@@ -15,7 +15,7 @@
"""Test class for vendor methods used by iLO modules."""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/intel_ipmi/test_management.py b/ironic/tests/unit/drivers/modules/intel_ipmi/test_management.py
index 589906777..c42bfa9ec 100644
--- a/ironic/tests/unit/drivers/modules/intel_ipmi/test_management.py
+++ b/ironic/tests/unit/drivers/modules/intel_ipmi/test_management.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_bios.py b/ironic/tests/unit/drivers/modules/irmc/test_bios.py
index 9876317f8..093bcdea4 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_bios.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_bios.py
@@ -16,7 +16,7 @@
Test class for IRMC BIOS configuration
"""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_boot.py b/ironic/tests/unit/drivers/modules/irmc/test_boot.py
index fd83b8cc8..d8fdc2c52 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_boot.py
@@ -20,9 +20,9 @@ import io
import os
import shutil
import tempfile
+from unittest import mock
from ironic_lib import utils as ironic_utils
-import mock
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -66,13 +66,12 @@ PARSED_IFNO = {
}
+@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
+ autospec=True)
class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
-
boot_interface = 'irmc-virtual-media'
def setUp(self):
- irmc_boot.check_share_fs_mounted_patcher.start()
- self.addCleanup(irmc_boot.check_share_fs_mounted_patcher.stop)
super(IRMCDeployPrivateMethodsTestCase, self).setUp()
CONF.irmc.remote_image_share_root = '/remote_image_share_root'
@@ -84,7 +83,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
CONF.irmc.remote_image_user_domain = 'local'
@mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True)
- def test__parse_config_option(self, isdir_mock):
+ def test__parse_config_option(self, isdir_mock,
+ check_share_fs_mounted_mock):
isdir_mock.return_value = True
result = irmc_boot._parse_config_option()
@@ -93,7 +93,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertIsNone(result)
@mock.patch.object(os.path, 'isdir', spec_set=True, autospec=True)
- def test__parse_config_option_non_existed_root(self, isdir_mock):
+ def test__parse_config_option_non_existed_root(
+ self, isdir_mock, check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/non_existed_root'
isdir_mock.return_value = False
@@ -102,7 +103,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
isdir_mock.assert_called_once_with('/non_existed_root')
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
- def test__parse_driver_info_in_share(self, isfile_mock):
+ def test__parse_driver_info_in_share(self, isfile_mock,
+ check_share_fs_mounted_mock):
"""With required 'irmc_deploy_iso' in share."""
isfile_mock.return_value = True
self.node.driver_info['irmc_deploy_iso'] = 'deploy.iso'
@@ -118,10 +120,11 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_is_image_href_ordinary_file_name',
spec_set=True, autospec=True)
def test__parse_driver_info_not_in_share(
- self, is_image_href_ordinary_file_name_mock):
+ self, is_image_href_ordinary_file_name_mock,
+ check_share_fs_mounted_mock):
"""With required 'irmc_deploy_iso' not in share."""
- self.node.driver_info[
- 'irmc_rescue_iso'] = 'bc784057-a140-4130-add3-ef890457e6b3'
+ self.node.driver_info['irmc_rescue_iso'] = (
+ 'bc784057-a140-4130-add3-ef890457e6b3')
driver_info_expected = {'irmc_rescue_iso':
'bc784057-a140-4130-add3-ef890457e6b3'}
is_image_href_ordinary_file_name_mock.return_value = False
@@ -132,7 +135,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(driver_info_expected, driver_info_actual)
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
- def test__parse_driver_info_with_iso_invalid(self, isfile_mock):
+ def test__parse_driver_info_with_iso_invalid(self, isfile_mock,
+ check_share_fs_mounted_mock):
"""With required 'irmc_deploy_iso' non existed."""
isfile_mock.return_value = False
@@ -148,7 +152,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
task.node, mode='deploy')
self.assertEqual(error_msg, str(e))
- def test__parse_driver_info_with_iso_missing(self):
+ def test__parse_driver_info_with_iso_missing(self,
+ check_share_fs_mounted_mock):
"""With required 'irmc_rescue_iso' empty."""
self.node.driver_info['irmc_rescue_iso'] = None
@@ -160,7 +165,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.node, mode='rescue')
self.assertEqual(error_msg, str(e))
- def test__parse_instance_info_with_boot_iso_file_name_ok(self):
+ def test__parse_instance_info_with_boot_iso_file_name_ok(
+ self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' file name."""
CONF.irmc.remote_image_share_root = '/etc'
self.node.instance_info['irmc_boot_iso'] = 'hosts'
@@ -169,7 +175,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(instance_info_expected, instance_info_actual)
- def test__parse_instance_info_without_boot_iso_ok(self):
+ def test__parse_instance_info_without_boot_iso_ok(
+ self, check_share_fs_mounted_mock):
"""With optional no 'irmc_boot_iso' file name."""
CONF.irmc.remote_image_share_root = '/etc'
@@ -179,7 +186,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(instance_info_expected, instance_info_actual)
- def test__parse_instance_info_with_boot_iso_uuid_ok(self):
+ def test__parse_instance_info_with_boot_iso_uuid_ok(
+ self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' glance uuid."""
self.node.instance_info[
'irmc_boot_iso'] = 'bc784057-a140-4130-add3-ef890457e6b3'
@@ -189,7 +197,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(instance_info_expected, instance_info_actual)
- def test__parse_instance_info_with_boot_iso_glance_ok(self):
+ def test__parse_instance_info_with_boot_iso_glance_ok(
+ self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' glance url."""
self.node.instance_info['irmc_boot_iso'] = (
'glance://bc784057-a140-4130-add3-ef890457e6b3')
@@ -200,7 +209,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(instance_info_expected, instance_info_actual)
- def test__parse_instance_info_with_boot_iso_http_ok(self):
+ def test__parse_instance_info_with_boot_iso_http_ok(
+ self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' http url."""
self.node.driver_info[
'irmc_deploy_iso'] = 'http://irmc_boot_iso'
@@ -209,7 +219,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(driver_info_expected, driver_info_actual)
- def test__parse_instance_info_with_boot_iso_https_ok(self):
+ def test__parse_instance_info_with_boot_iso_https_ok(
+ self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' https url."""
self.node.instance_info[
'irmc_boot_iso'] = 'https://irmc_boot_iso'
@@ -218,7 +229,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(instance_info_expected, instance_info_actual)
- def test__parse_instance_info_with_boot_iso_file_url_ok(self):
+ def test__parse_instance_info_with_boot_iso_file_url_ok(
+ self, check_share_fs_mounted_mock):
"""With optional 'irmc_boot_iso' file url."""
self.node.instance_info[
'irmc_boot_iso'] = 'file://irmc_boot_iso'
@@ -228,7 +240,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual(instance_info_expected, instance_info_actual)
@mock.patch.object(os.path, 'isfile', spec_set=True, autospec=True)
- def test__parse_instance_info_with_boot_iso_invalid(self, isfile_mock):
+ def test__parse_instance_info_with_boot_iso_invalid(
+ self, isfile_mock, check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/etc'
isfile_mock.return_value = False
@@ -249,7 +262,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
spec_set=True, autospec=True)
@mock.patch('os.path.isfile', autospec=True)
def test_parse_deploy_info_ok(self, mock_isfile,
- get_image_instance_info_mock):
+ get_image_instance_info_mock,
+ check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/etc'
get_image_instance_info_mock.return_value = {'a': 'b'}
driver_info_expected = {'a': 'b',
@@ -276,7 +290,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__setup_vmedia_with_file_deploy(self,
fetch_mock,
setup_vmedia_mock,
- set_boot_device_mock):
+ set_boot_device_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['irmc_deploy_iso'] = 'deploy_iso_filename'
@@ -302,7 +317,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__setup_vmedia_with_file_rescue(self,
fetch_mock,
setup_vmedia_mock,
- set_boot_device_mock):
+ set_boot_device_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['irmc_rescue_iso'] = 'rescue_iso_filename'
@@ -329,7 +345,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self,
fetch_mock,
setup_vmedia_mock,
- set_boot_device_mock):
+ set_boot_device_mock,
+ check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/'
with task_manager.acquire(self.context, self.node.uuid,
@@ -361,7 +378,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self,
fetch_mock,
setup_vmedia_mock,
- set_boot_device_mock):
+ set_boot_device_mock,
+ check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/'
with task_manager.acquire(self.context, self.node.uuid,
@@ -383,7 +401,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
set_boot_device_mock.assert_called_once_with(
task, boot_devices.CDROM)
- def test__get_iso_name(self):
+ def test__get_iso_name(self, check_share_fs_mounted_mock):
actual = irmc_boot._get_iso_name(self.node, label='deploy')
expected = "deploy-%s.iso" % self.node.uuid
self.assertEqual(expected, actual)
@@ -402,7 +420,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
fetch_mock,
image_props_mock,
boot_mode_mock,
- create_boot_iso_mock):
+ create_boot_iso_mock,
+ check_share_fs_mounted_mock):
deploy_info_mock.return_value = {'irmc_boot_iso': 'irmc_boot.iso'}
with task_manager.acquire(self.context, self.node.uuid) as task:
irmc_boot._prepare_boot_iso(task, 'root-uuid')
@@ -433,8 +452,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
fetch_mock,
image_props_mock,
boot_mode_mock,
- create_boot_iso_mock):
-
+ create_boot_iso_mock,
+ check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/'
image = '733d1c44-a2ea-414b-aca7-69decf20d810'
is_image_href_ordinary_file_name_mock.return_value = False
@@ -471,7 +490,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
fetch_mock,
image_props_mock,
boot_mode_mock,
- create_boot_iso_mock):
+ create_boot_iso_mock,
+ check_share_fs_mounted_mock):
CONF.pxe.pxe_append_params = 'kernel-params'
deploy_info_mock.return_value = \
@@ -502,7 +522,7 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self.assertEqual("boot-%s.iso" % self.node.uuid,
task.node.driver_internal_info['irmc_boot_iso'])
- def test__get_floppy_image_name(self):
+ def test__get_floppy_image_name(self, check_share_fs_mounted_mock):
actual = irmc_boot._get_floppy_image_name(self.node)
expected = "image-%s.img" % self.node.uuid
self.assertEqual(expected, actual)
@@ -515,7 +535,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__prepare_floppy_image(self,
tempfile_mock,
create_vfat_image_mock,
- copyfile_mock):
+ copyfile_mock,
+ check_share_fs_mounted_mock):
mock_image_file_handle = mock.MagicMock(spec=io.BytesIO)
mock_image_file_obj = mock.MagicMock()
mock_image_file_obj.name = 'image-tmp-file'
@@ -542,7 +563,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test__prepare_floppy_image_exception(self,
tempfile_mock,
create_vfat_image_mock,
- copyfile_mock):
+ copyfile_mock,
+ check_share_fs_mounted_mock):
mock_image_file_handle = mock.MagicMock(spec=io.BytesIO)
mock_image_file_obj = mock.MagicMock()
mock_image_file_obj.name = 'image-tmp-file'
@@ -572,7 +594,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test_attach_boot_iso_if_needed(
self,
setup_vmedia_mock,
- set_boot_device_mock):
+ set_boot_device_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.ACTIVE
@@ -589,7 +612,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
def test_attach_boot_iso_if_needed_on_rebuild(
self,
setup_vmedia_mock,
- set_boot_device_mock):
+ set_boot_device_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.DEPLOYING
@@ -608,12 +632,14 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
autospec=True)
@mock.patch.object(irmc_boot, '_detach_virtual_cd', spec_set=True,
autospec=True)
- def test__setup_vmedia_for_boot_with_parameters(self,
- _detach_virtual_cd_mock,
- _detach_virtual_fd_mock,
- _prepare_floppy_image_mock,
- _attach_virtual_fd_mock,
- _attach_virtual_cd_mock):
+ def test__setup_vmedia_for_boot_with_parameters(
+ self,
+ _detach_virtual_cd_mock,
+ _detach_virtual_fd_mock,
+ _prepare_floppy_image_mock,
+ _attach_virtual_fd_mock,
+ _attach_virtual_cd_mock,
+ check_share_fs_mounted_mock):
parameters = {'a': 'b'}
iso_filename = 'deploy_iso_or_boot_iso'
_prepare_floppy_image_mock.return_value = 'floppy_file_name'
@@ -640,8 +666,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
self,
_detach_virtual_cd_mock,
_detach_virtual_fd_mock,
- _attach_virtual_cd_mock):
-
+ _attach_virtual_cd_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._setup_vmedia_for_boot(task, 'bootable_iso_filename')
@@ -667,7 +693,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
_detach_virtual_fd_mock,
_remove_share_file_mock,
_get_floppy_image_name_mock,
- _get_iso_name_mock):
+ _get_iso_name_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
irmc_boot._cleanup_vmedia_boot(task)
@@ -686,7 +713,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(ironic_utils, 'unlink_without_raise', spec_set=True,
autospec=True)
- def test__remove_share_file(self, unlink_without_raise_mock):
+ def test__remove_share_file(self, unlink_without_raise_mock,
+ check_share_fs_mounted_mock):
CONF.irmc.remote_image_share_root = '/share'
irmc_boot._remove_share_file("boot.iso")
@@ -695,7 +723,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__attach_virtual_cd_ok(self, get_irmc_client_mock):
+ def test__attach_virtual_cd_ok(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_boot.scci.get_virtual_cd_set_params_cmd = (
mock.MagicMock(sepc_set=[]))
@@ -730,7 +759,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__attach_virtual_cd_fail(self, get_irmc_client_mock):
+ def test__attach_virtual_cd_fail(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
@@ -747,7 +777,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__detach_virtual_cd_ok(self, get_irmc_client_mock):
+ def test__detach_virtual_cd_ok(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -758,7 +789,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__detach_virtual_cd_fail(self, get_irmc_client_mock):
+ def test__detach_virtual_cd_fail(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
@@ -773,7 +805,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__attach_virtual_fd_ok(self, get_irmc_client_mock):
+ def test__attach_virtual_fd_ok(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_boot.scci.get_virtual_fd_set_params_cmd = (
mock.MagicMock(sepc_set=[]))
@@ -809,7 +842,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__attach_virtual_fd_fail(self, get_irmc_client_mock):
+ def test__attach_virtual_fd_fail(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
@@ -826,7 +860,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__detach_virtual_fd_ok(self, get_irmc_client_mock):
+ def test__detach_virtual_fd_ok(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
@@ -837,7 +872,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_common, 'get_irmc_client', spec_set=True,
autospec=True)
- def test__detach_virtual_fd_fail(self, get_irmc_client_mock):
+ def test__detach_virtual_fd_fail(self, get_irmc_client_mock,
+ check_share_fs_mounted_mock):
irmc_client = get_irmc_client_mock.return_value
irmc_client.side_effect = Exception("fake error")
irmc_boot.scci.SCCIClientError = Exception
@@ -852,7 +888,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_parse_config_option', spec_set=True,
autospec=True)
- def test_check_share_fs_mounted_ok(self, parse_conf_mock):
+ def test_check_share_fs_mounted_ok(self, parse_conf_mock,
+ check_share_fs_mounted_mock):
# Note(naohirot): mock.patch.stop() and mock.patch.start() don't work.
# therefor monkey patching is used to
# irmc_boot.check_share_fs_mounted.
@@ -868,7 +905,8 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_parse_config_option', spec_set=True,
autospec=True)
- def test_check_share_fs_mounted_exception(self, parse_conf_mock):
+ def test_check_share_fs_mounted_exception(self, parse_conf_mock,
+ check_share_fs_mounted_mock):
# Note(naohirot): mock.patch.stop() and mock.patch.start() don't work.
# therefor monkey patching is used to
# irmc_boot.check_share_fs_mounted.
@@ -883,13 +921,12 @@ class IRMCDeployPrivateMethodsTestCase(test_common.BaseIRMCTest):
parse_conf_mock.assert_called_once_with()
+@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
+ autospec=True)
class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
-
boot_interface = 'irmc-virtual-media'
def setUp(self):
- irmc_boot.check_share_fs_mounted_patcher.start()
- self.addCleanup(irmc_boot.check_share_fs_mounted_patcher.stop)
super(IRMCVirtualMediaBootTestCase, self).setUp()
@mock.patch.object(deploy_utils, 'validate_image_properties',
@@ -898,13 +935,11 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
- @mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
- autospec=True)
def test_validate_whole_disk_image(self,
- check_share_fs_mounted_mock,
deploy_info_mock,
is_glance_image_mock,
- validate_prop_mock):
+ validate_prop_mock,
+ check_share_fs_mounted_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
@@ -912,7 +947,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
task.node.driver_internal_info = {'is_whole_disk_image': True}
task.driver.boot.validate(task)
- check_share_fs_mounted_mock.assert_called_once_with()
+ self.assertEqual(check_share_fs_mounted_mock.call_count, 2)
deploy_info_mock.assert_called_once_with(task.node)
self.assertFalse(is_glance_image_mock.called)
validate_prop_mock.assert_called_once_with(task.context,
@@ -924,13 +959,11 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
- @mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
- autospec=True)
def test_validate_glance_image(self,
- check_share_fs_mounted_mock,
deploy_info_mock,
is_glance_image_mock,
- validate_prop_mock):
+ validate_prop_mock,
+ check_share_fs_mounted_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
deploy_info_mock.return_value = d_info
is_glance_image_mock.return_value = True
@@ -938,7 +971,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
shared=False) as task:
task.driver.boot.validate(task)
- check_share_fs_mounted_mock.assert_called_once_with()
+ self.assertEqual(check_share_fs_mounted_mock.call_count, 2)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, ['kernel_id', 'ramdisk_id'])
@@ -949,13 +982,11 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
@mock.patch.object(irmc_boot, '_parse_deploy_info', spec_set=True,
autospec=True)
- @mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
- autospec=True)
def test_validate_non_glance_image(self,
- check_share_fs_mounted_mock,
deploy_info_mock,
is_glance_image_mock,
- validate_prop_mock):
+ validate_prop_mock,
+ check_share_fs_mounted_mock):
d_info = {'image_source': '733d1c44-a2ea-414b-aca7-69decf20d810'}
deploy_info_mock.return_value = d_info
is_glance_image_mock.return_value = False
@@ -963,7 +994,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
shared=False) as task:
task.driver.boot.validate(task)
- check_share_fs_mounted_mock.assert_called_once_with()
+ self.assertEqual(check_share_fs_mounted_mock.call_count, 2)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(
task.context, d_info, ['kernel', 'ramdisk'])
@@ -1004,24 +1035,28 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
self.assertEqual(1 if provision_state == states.DEPLOYING else 0,
mock_backup_bios.call_count)
- def test_prepare_ramdisk_glance_image_deploying(self):
+ def test_prepare_ramdisk_glance_image_deploying(
+ self, check_share_fs_mounted_mock):
self.node.provision_state = states.DEPLOYING
self.node.save()
self._test_prepare_ramdisk()
- def test_prepare_ramdisk_glance_image_rescuing(self):
+ def test_prepare_ramdisk_glance_image_rescuing(
+ self, check_share_fs_mounted_mock):
self.node.provision_state = states.RESCUING
self.node.save()
self._test_prepare_ramdisk(mode='rescue')
- def test_prepare_ramdisk_glance_image_cleaning(self):
+ def test_prepare_ramdisk_glance_image_cleaning(
+ self, check_share_fs_mounted_mock):
self.node.provision_state = states.CLEANING
self.node.save()
self._test_prepare_ramdisk()
@mock.patch.object(irmc_boot, '_setup_vmedia', spec_set=True,
autospec=True)
- def test_prepare_ramdisk_not_deploying_not_cleaning(self, mock_is_image):
+ def test_prepare_ramdisk_not_deploying_not_cleaning(
+ self, mock_is_image, check_share_fs_mounted_mock):
"""Ensure deploy ops are blocked when not deploying and not cleaning"""
for state in states.STABLE_STATES:
@@ -1036,7 +1071,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
- def test_clean_up_ramdisk(self, _cleanup_vmedia_boot_mock):
+ def test_clean_up_ramdisk(self, _cleanup_vmedia_boot_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.boot.clean_up_ramdisk(task)
@@ -1059,12 +1095,14 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
boot_devices.DISK,
persistent=True)
- def test_prepare_instance_whole_disk_image_local(self):
+ def test_prepare_instance_whole_disk_image_local(
+ self, check_share_fs_mounted_mock):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
self._test_prepare_instance_whole_disk_image()
- def test_prepare_instance_whole_disk_image(self):
+ def test_prepare_instance_whole_disk_image(self,
+ check_share_fs_mounted_mock):
self._test_prepare_instance_whole_disk_image()
@mock.patch.object(irmc_boot.IRMCVirtualMediaBoot,
@@ -1073,7 +1111,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_prepare_instance_partition_image(
- self, _cleanup_vmedia_boot_mock, _configure_vmedia_mock):
+ self, _cleanup_vmedia_boot_mock, _configure_vmedia_mock,
+ check_share_fs_mounted_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'netboot'}}
self.node.driver_internal_info = {'root_uuid_or_disk_id': "some_uuid"}
@@ -1091,7 +1130,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_remove_share_file', spec_set=True,
autospec=True)
def test_clean_up_instance(self, _remove_share_file_mock,
- _cleanup_vmedia_boot_mock):
+ _cleanup_vmedia_boot_mock,
+ check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['irmc_boot_iso'] = 'glance://deploy_iso'
@@ -1114,7 +1154,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
def test__configure_vmedia_boot(self,
_prepare_boot_iso_mock,
_setup_vmedia_for_boot_mock,
- node_set_boot_device):
+ node_set_boot_device,
+ check_share_fs_mounted_mock):
root_uuid_or_disk_id = {'root uuid': 'root_uuid'}
with task_manager.acquire(self.context, self.node.uuid,
@@ -1130,7 +1171,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
node_set_boot_device.assert_called_once_with(
task, boot_devices.CDROM, persistent=True)
- def test_remote_image_share_type_values(self):
+ def test_remote_image_share_type_values(
+ self, check_share_fs_mounted_mock):
cfg.CONF.set_override('remote_image_share_type', 'cifs', 'irmc')
cfg.CONF.set_override('remote_image_share_type', 'nfs', 'irmc')
self.assertRaises(ValueError, cfg.CONF.set_override,
@@ -1145,7 +1187,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
def test_prepare_instance_with_secure_boot(self, mock_cleanup_vmedia_boot,
mock_configure_vmedia_boot,
- mock_set_secure_boot_mode):
+ mock_set_secure_boot_mode,
+ check_share_fs_mounted_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "12312642"}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
@@ -1173,7 +1216,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
def test_prepare_instance_with_secure_boot_false(
self, mock_cleanup_vmedia_boot, mock_configure_vmedia_boot,
- mock_set_secure_boot_mode):
+ mock_set_secure_boot_mode, check_share_fs_mounted_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "12312642"}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
@@ -1200,7 +1243,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
autospec=True)
def test_prepare_instance_without_secure_boot(
self, mock_cleanup_vmedia_boot, mock_configure_vmedia_boot,
- mock_set_secure_boot_mode):
+ mock_set_secure_boot_mode, check_share_fs_mounted_mock):
self.node.driver_internal_info = {'root_uuid_or_disk_id': "12312642"}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
@@ -1223,7 +1266,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_clean_up_instance_with_secure_boot(self, mock_cleanup_vmedia_boot,
- mock_set_secure_boot_mode):
+ mock_set_secure_boot_mode,
+ check_share_fs_mounted_mock):
self.node.provision_state = states.DELETING
self.node.target_provision_state = states.AVAILABLE
self.node.instance_info = {
@@ -1244,7 +1288,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_clean_up_instance_with_secure_boot_false(
- self, mock_cleanup_vmedia_boot, mock_set_secure_boot_mode):
+ self, mock_cleanup_vmedia_boot, mock_set_secure_boot_mode,
+ check_share_fs_mounted_mock):
self.node.provision_state = states.DELETING
self.node.target_provision_state = states.AVAILABLE
self.node.instance_info = {
@@ -1264,7 +1309,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot', spec_set=True,
autospec=True)
def test_clean_up_instance_without_secure_boot(
- self, mock_cleanup_vmedia_boot, mock_set_secure_boot_mode):
+ self, mock_cleanup_vmedia_boot, mock_set_secure_boot_mode,
+ check_share_fs_mounted_mock):
self.node.provision_state = states.DELETING
self.node.target_provision_state = states.AVAILABLE
self.node.save()
@@ -1276,7 +1322,7 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(os.path, 'isfile', return_value=True,
autospec=True)
- def test_validate_rescue(self, mock_isfile):
+ def test_validate_rescue(self, mock_isfile, check_share_fs_mounted_mock):
driver_info = self.node.driver_info
driver_info['irmc_rescue_iso'] = 'rescue.iso'
self.node.driver_info = driver_info
@@ -1284,7 +1330,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.validate_rescue(task)
- def test_validate_rescue_no_rescue_ramdisk(self):
+ def test_validate_rescue_no_rescue_ramdisk(
+ self, check_share_fs_mounted_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaisesRegex(exception.MissingParameterValue,
'Missing.*irmc_rescue_iso',
@@ -1292,7 +1339,8 @@ class IRMCVirtualMediaBootTestCase(test_common.BaseIRMCTest):
@mock.patch.object(os.path, 'isfile', return_value=False,
autospec=True)
- def test_validate_rescue_ramdisk_not_exist(self, mock_isfile):
+ def test_validate_rescue_ramdisk_not_exist(
+ self, mock_isfile, check_share_fs_mounted_mock):
driver_info = self.node.driver_info
driver_info['irmc_rescue_iso'] = 'rescue.iso'
self.node.driver_info = driver_info
@@ -1455,16 +1503,15 @@ class IRMCPXEBootTestCase(test_common.BaseIRMCTest):
task.driver.boot, task)
+@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
+ autospec=True)
@mock.patch.object(irmc_boot, 'viom',
spec_set=mock_specs.SCCICLIENT_VIOM_SPEC)
class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
-
boot_interface = 'irmc-virtual-media'
def setUp(self):
super(IRMCVirtualMediaBootWithVolumeTestCase, self).setUp()
- irmc_boot.check_share_fs_mounted_patcher.start()
- self.addCleanup(irmc_boot.check_share_fs_mounted_patcher.stop)
driver_info = INFO_DICT
d_in_info = dict(boot_from_volume='volume-uuid')
self.config(enabled_storage_interfaces=['cinder'])
@@ -1580,14 +1627,15 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.validate(task)
- def test_validate_iscsi(self, mock_viom):
+ def test_validate_iscsi(self, mock_viom, check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_resources()
self._call_validate()
self.assertEqual([mock.call('LAN0-1'), mock.call('CNA1-1')],
mock_viom.validate_physical_port_id.call_args_list)
- def test_validate_no_physical_id_in_lan_port(self, mock_viom):
+ def test_validate_no_physical_id_in_lan_port(self, mock_viom,
+ check_share_fs_mounted_mock):
self._create_port(physical_id=None)
self._create_iscsi_resources()
self.assertRaises(exception.MissingParameterValue,
@@ -1595,8 +1643,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
- def test_validate_invalid_physical_id_in_lan_port(self, mock_scci,
- mock_viom):
+ def test_validate_invalid_physical_id_in_lan_port(
+ self, mock_scci, mock_viom, check_share_fs_mounted_mock):
self._create_port(physical_id='wrong-id')
self._create_iscsi_resources()
@@ -1606,7 +1654,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.InvalidParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_no_ip(self, mock_viom):
+ def test_validate_iscsi_connector_no_ip(self, mock_viom,
+ check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_iqn_connector()
self._create_iscsi_target()
@@ -1614,7 +1663,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.MissingParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_no_iqn(self, mock_viom):
+ def test_validate_iscsi_connector_no_iqn(self, mock_viom,
+ check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_ip_connector(physical_id='CNA1-1')
self._create_iscsi_target()
@@ -1622,7 +1672,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.MissingParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_no_netmask(self, mock_viom):
+ def test_validate_iscsi_connector_no_netmask(self, mock_viom,
+ check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_iqn_connector()
self._create_iscsi_ip_connector(network_size=None)
@@ -1631,7 +1682,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.MissingParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_invalid_netmask(self, mock_viom):
+ def test_validate_iscsi_connector_invalid_netmask(
+ self, mock_viom, check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_iqn_connector()
self._create_iscsi_ip_connector(network_size='worng-netmask')
@@ -1640,7 +1692,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.InvalidParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_too_small_netmask(self, mock_viom):
+ def test_validate_iscsi_connector_too_small_netmask(
+ self, mock_viom, check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_iqn_connector()
self._create_iscsi_ip_connector(network_size='0')
@@ -1649,7 +1702,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.InvalidParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_too_large_netmask(self, mock_viom):
+ def test_validate_iscsi_connector_too_large_netmask(
+ self, mock_viom, check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_iqn_connector()
self._create_iscsi_ip_connector(network_size='32')
@@ -1658,7 +1712,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.assertRaises(exception.InvalidParameterValue,
self._call_validate)
- def test_validate_iscsi_connector_no_physical_id(self, mock_viom):
+ def test_validate_iscsi_connector_no_physical_id(
+ self, mock_viom, check_share_fs_mounted_mock):
self._create_port()
self._create_iscsi_iqn_connector(physical_id=None)
self._create_iscsi_ip_connector()
@@ -1668,7 +1723,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self._call_validate)
@mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id')
- def test_prepare_ramdisk_skip(self, mock_nic, mock_viom):
+ def test_prepare_ramdisk_skip(self, mock_nic, mock_viom,
+ check_share_fs_mounted_mock):
self._create_iscsi_resources()
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.provision_state = states.DEPLOYING
@@ -1676,7 +1732,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_nic.assert_not_called()
@mock.patch.object(irmc_boot, '_cleanup_vmedia_boot')
- def test_prepare_instance(self, mock_clean, mock_viom):
+ def test_prepare_instance(self, mock_clean, mock_viom,
+ check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_iscsi_resources()
@@ -1711,7 +1768,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_viom.VIOMConfiguration.assert_called_once_with(
PARSED_IFNO, identification=self.node.uuid)
- def test__configure_boot_from_volume_iscsi(self, mock_viom):
+ def test__configure_boot_from_volume_iscsi(self, mock_viom,
+ check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_iscsi_resources()
@@ -1734,7 +1792,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_viom.validate_physical_port_id.assert_called_once_with('CNA1-1')
self._assert_viom_apply(mock_viom, mock_conf)
- def test__configure_boot_from_volume_multi_lan_ports(self, mock_viom):
+ def test__configure_boot_from_volume_multi_lan_ports(
+ self, mock_viom, check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_port(physical_id='LAN0-2',
@@ -1760,7 +1819,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_viom.validate_physical_port_id.assert_called_once_with('CNA1-1')
self._assert_viom_apply(mock_viom, mock_conf)
- def test__configure_boot_from_volume_iscsi_no_portal_port(self, mock_viom):
+ def test__configure_boot_from_volume_iscsi_no_portal_port(
+ self, mock_viom, check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_iscsi_iqn_connector()
@@ -1786,7 +1846,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_viom.validate_physical_port_id.assert_called_once_with('CNA1-1')
self._assert_viom_apply(mock_viom, mock_conf)
- def test__configure_boot_from_volume_iscsi_chap(self, mock_viom):
+ def test__configure_boot_from_volume_iscsi_chap(
+ self, mock_viom, check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_iscsi_iqn_connector()
@@ -1814,7 +1875,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_viom.validate_physical_port_id.assert_called_once_with('CNA1-1')
self._assert_viom_apply(mock_viom, mock_conf)
- def test__configure_boot_from_volume_fc(self, mock_viom):
+ def test__configure_boot_from_volume_fc(self, mock_viom,
+ check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_fc_connector()
@@ -1833,8 +1895,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
@mock.patch.object(irmc_boot, 'scci',
spec_set=mock_specs.SCCICLIENT_IRMC_SCCI_SPEC)
- def test__configure_boot_from_volume_apply_error(self, mock_scci,
- mock_viom):
+ def test__configure_boot_from_volume_apply_error(
+ self, mock_scci, mock_viom, check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
self._create_port()
self._create_fc_connector()
@@ -1856,7 +1918,7 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
mock_viom.validate_physical_port_id.assert_called_once_with('FC2-1')
self._assert_viom_apply(mock_viom, mock_conf)
- def test_clean_up_instance(self, mock_viom):
+ def test_clean_up_instance(self, mock_viom, check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot.clean_up_instance(task)
@@ -1865,7 +1927,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.node.uuid)
mock_conf.terminate.assert_called_once_with(reboot=False)
- def test_clean_up_instance_error(self, mock_viom):
+ def test_clean_up_instance_error(self, mock_viom,
+ check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
mock_conf.terminate.side_effect = Exception('fake error')
irmc_boot.scci.SCCIError = Exception
@@ -1878,7 +1941,8 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
self.node.uuid)
mock_conf.terminate.assert_called_once_with(reboot=False)
- def test__cleanup_boot_from_volume(self, mock_viom):
+ def test__cleanup_boot_from_volume(self, mock_viom,
+ check_share_fs_mounted_mock):
mock_conf = self._create_mock_conf(mock_viom)
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.boot._cleanup_boot_from_volume(task)
@@ -1889,7 +1953,6 @@ class IRMCVirtualMediaBootWithVolumeTestCase(test_common.BaseIRMCTest):
class IRMCPXEBootBasicTestCase(test_pxe.PXEBootTestCase):
-
boot_interface = 'irmc-pxe'
# NOTE(etingof): add driver-specific configuration
driver_info = dict(test_pxe.PXEBootTestCase.driver_info)
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_common.py b/ironic/tests/unit/drivers/modules/irmc/test_common.py
index 11c0da446..3f0c3d94a 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_common.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_common.py
@@ -16,7 +16,8 @@
Test class for common methods used by iRMC modules.
"""
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import uuidutils
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_inspect.py b/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
index b0bd206a3..0a6eeb399 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_inspect.py
@@ -16,7 +16,7 @@
Test class for iRMC Inspection Driver
"""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_management.py b/ironic/tests/unit/drivers/modules/irmc/test_management.py
index e1c3fcc42..c4b152ae9 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_management.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_management.py
@@ -17,10 +17,9 @@ Test class for iRMC Management Driver
"""
import os
+from unittest import mock
import xml.etree.ElementTree as ET
-import mock
-
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_periodic_task.py b/ironic/tests/unit/drivers/modules/irmc/test_periodic_task.py
index 15575523c..52230259d 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_periodic_task.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_periodic_task.py
@@ -16,7 +16,8 @@
Test class for iRMC periodic tasks
"""
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_power.py b/ironic/tests/unit/drivers/modules/irmc/test_power.py
index dfc112c8c..db335e941 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_power.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_power.py
@@ -16,7 +16,7 @@
Test class for iRMC Power Driver
"""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.common import states
diff --git a/ironic/tests/unit/drivers/modules/irmc/test_raid.py b/ironic/tests/unit/drivers/modules/irmc/test_raid.py
index 8dc2421ed..7698c256a 100644
--- a/ironic/tests/unit/drivers/modules/irmc/test_raid.py
+++ b/ironic/tests/unit/drivers/modules/irmc/test_raid.py
@@ -16,7 +16,7 @@
Test class for IRMC RAID configuration
"""
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/network/json_samples/network_data.json b/ironic/tests/unit/drivers/modules/network/json_samples/network_data.json
new file mode 100644
index 000000000..efce35ddd
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/network/json_samples/network_data.json
@@ -0,0 +1,113 @@
+{
+ "links": [
+ {
+ "id": "interface2",
+ "type": "vif",
+ "ethernet_mac_address": "a0:36:9f:2c:e8:70",
+ "vif_id": "e1c90e9f-eafc-4e2d-8ec9-58b91cebb53d",
+ "mtu": 1500
+ },
+ {
+ "id": "interface0",
+ "type": "phy",
+ "ethernet_mac_address": "a0:36:9f:2c:e8:80",
+ "mtu": 9000
+ },
+ {
+ "id": "interface1",
+ "type": "phy",
+ "ethernet_mac_address": "a0:36:9f:2c:e8:81",
+ "mtu": 9000
+ },
+ {
+ "id": "bond0",
+ "type": "bond",
+ "bond_links": [
+ "interface0",
+ "interface1"
+ ],
+ "ethernet_mac_address": "a0:36:9f:2c:e8:82",
+ "bond_mode": "802.1ad",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bond_miimon": 100
+ },
+ {
+ "id": "vlan0",
+ "type": "vlan",
+ "vlan_link": "bond0",
+ "vlan_id": 101,
+ "vlan_mac_address": "a0:36:9f:2c:e8:80",
+ "vif_id": "e1c90e9f-eafc-4e2d-8ec9-58b91cebb53f"
+ }
+ ],
+ "networks": [
+ {
+ "id": "private-ipv4",
+ "type": "ipv4",
+ "link": "interface0",
+ "ip_address": "10.184.0.244",
+ "netmask": "255.255.240.0",
+ "routes": [
+ {
+ "network": "10.0.0.0",
+ "netmask": "255.0.0.0",
+ "gateway": "11.0.0.1"
+ },
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "23.253.157.1"
+ }
+ ],
+ "network_id": "da5bb487-5193-4a65-a3df-4a0055a8c0d7"
+ },
+ {
+ "id": "private-ipv4",
+ "type": "ipv6",
+ "link": "interface0",
+ "ip_address": "2001:cdba::3257:9652/24",
+ "routes": [
+ {
+ "network": "::",
+ "netmask": "::",
+ "gateway": "fd00::1"
+ },
+ {
+ "network": "::",
+ "netmask": "ffff:ffff:ffff::",
+ "gateway": "fd00::1:1"
+ }
+ ],
+ "network_id": "da5bb487-5193-4a65-a3df-4a0055a8c0d8"
+ },
+ {
+ "id": "publicnet-ipv4",
+ "type": "ipv4",
+ "link": "vlan0",
+ "ip_address": "23.253.157.244",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": [
+ "69.20.0.164",
+ "69.20.0.196"
+ ],
+ "routes": [
+ {
+ "network": "0.0.0.0",
+ "netmask": "0.0.0.0",
+ "gateway": "23.253.157.1"
+ }
+ ],
+ "network_id": "62611d6f-66cb-4270-8b1f-503ef0dd4736"
+ }
+ ],
+ "services": [
+ {
+ "type": "dns",
+ "address": "8.8.8.8"
+ },
+ {
+ "type": "dns",
+ "address": "8.8.4.4"
+ }
+ ]
+} \ No newline at end of file
diff --git a/ironic/tests/unit/drivers/modules/network/test_common.py b/ironic/tests/unit/drivers/modules/network/test_common.py
index eedd907f6..eefbd8a9d 100644
--- a/ironic/tests/unit/drivers/modules/network/test_common.py
+++ b/ironic/tests/unit/drivers/modules/network/test_common.py
@@ -10,7 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+import json
+import os
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -93,7 +96,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'anyphysnet'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[pg1.uuid, self.port.uuid] + [p.uuid for p in pg2_ports[:2]],
[p.uuid for p in free_port_like_objs])
@@ -106,7 +109,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
set()))
- self.assertItemsEqual(
+ self.assertCountEqual(
[pg1.uuid, self.port.uuid] + [p.uuid for p in pg2_ports[:2]],
[p.uuid for p in free_port_like_objs])
@@ -119,7 +122,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'notaphysnet'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[self.port.uuid],
[p.uuid for p in free_port_like_objs])
@@ -132,7 +135,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'physnet1'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[pg1.uuid, self.port.uuid],
[p.uuid for p in free_port_like_objs])
@@ -145,7 +148,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'physnet2'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[self.port.uuid] + [p.uuid for p in pg2_ports[:2]],
[p.uuid for p in free_port_like_objs])
@@ -158,7 +161,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'physnet3'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[self.port.uuid], [p.uuid for p in free_port_like_objs])
def test__get_free_portgroups_and_ports_all_physnets(self):
@@ -171,7 +174,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
physnets))
- self.assertItemsEqual(
+ self.assertCountEqual(
[pg1.uuid, self.port.uuid] + [p.uuid for p in pg2_ports[:2]],
[p.uuid for p in free_port_like_objs])
@@ -182,7 +185,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'anyphysnet'}))
- self.assertItemsEqual([], free_port_like_objs)
+ self.assertCountEqual([], free_port_like_objs)
@mock.patch.object(neutron_common, 'validate_port_info', autospec=True)
def test__get_free_portgroups_and_ports_neutron(self, vpi_mock):
@@ -191,7 +194,7 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'anyphysnet'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[self.port.uuid], [p.uuid for p in free_port_like_objs])
@mock.patch.object(neutron_common, 'validate_port_info', autospec=True)
@@ -203,9 +206,62 @@ class TestCommonFunctions(db_base.DbTestCase):
free_port_like_objs = (
common._get_free_portgroups_and_ports(task, self.vif_id,
{'anyphysnet'}))
- self.assertItemsEqual(
+ self.assertCountEqual(
[self.port.uuid], [p.uuid for p in free_port_like_objs])
+ def test__get_free_portgroups_and_ports_port_uuid(self):
+ self.node.network_interface = 'flat'
+ self.node.save()
+ pg1, pg1_ports, pg2, pg2_ports, pg3, pg3_ports = self._objects_setup(
+ set_physnets=False)
+ with task_manager.acquire(self.context, self.node.id) as task:
+ free_port_like_objs = (
+ common._get_free_portgroups_and_ports(
+ task, self.vif_id, {}, {'port_uuid': self.port.uuid}))
+ self.assertCountEqual(
+ [self.port.uuid],
+ [p.uuid for p in free_port_like_objs])
+
+ def test__get_free_portgroups_and_ports_portgroup_uuid(self):
+ self.node.network_interface = 'flat'
+ self.node.save()
+ pg1, pg1_ports, pg2, pg2_ports, pg3, pg3_ports = self._objects_setup(
+ set_physnets=False)
+ with task_manager.acquire(self.context, self.node.id) as task:
+ free_port_like_objs = (
+ common._get_free_portgroups_and_ports(
+ task, self.vif_id, {}, {'portgroup_uuid': pg1.uuid}))
+ self.assertCountEqual(
+ [pg1.uuid],
+ [p.uuid for p in free_port_like_objs])
+
+ def test__get_free_portgroups_and_ports_portgroup_uuid_attached_vifs(self):
+ self.node.network_interface = 'flat'
+ self.node.save()
+ pg1, pg1_ports, pg2, pg2_ports, pg3, pg3_ports = self._objects_setup(
+ set_physnets=False)
+ with task_manager.acquire(self.context, self.node.id) as task:
+ free_port_like_objs = (
+ common._get_free_portgroups_and_ports(
+ task, self.vif_id, {}, {'portgroup_uuid': pg2.uuid}))
+ self.assertCountEqual(
+ [],
+ [p.uuid for p in free_port_like_objs])
+
+ def test__get_free_portgroups_and_ports_no_matching_uuid(self):
+ self.node.network_interface = 'flat'
+ self.node.save()
+ pg1, pg1_ports, pg2, pg2_ports, pg3, pg3_ports = self._objects_setup(
+ set_physnets=False)
+ with task_manager.acquire(self.context, self.node.id) as task:
+ free_port_like_objs = (
+ common._get_free_portgroups_and_ports(
+ task, self.vif_id, {},
+ {'port_uuid': uuidutils.generate_uuid()}))
+ self.assertCountEqual(
+ [],
+ [p.uuid for p in free_port_like_objs])
+
@mock.patch.object(neutron_common, 'validate_port_info', autospec=True,
return_value=True)
def test_get_free_port_like_object_ports(self, vpi_mock):
@@ -469,6 +525,10 @@ class TestVifPortIDMixin(db_base.DbTestCase):
address='52:54:00:cf:2d:32',
extra={'vif_port_id': uuidutils.generate_uuid(),
'client-id': 'fake1'})
+ network_data_file = os.path.join(
+ os.path.dirname(__file__), 'json_samples', 'network_data.json')
+ with open(network_data_file, 'rb') as fl:
+ self.network_data = json.load(fl)
def test__save_vif_to_port_like_obj_port(self):
self.port.extra = {}
@@ -599,7 +659,7 @@ class TestVifPortIDMixin(db_base.DbTestCase):
address='52:54:00:cf:2d:01', uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
vifs = self.interface.vif_list(task)
- self.assertItemsEqual([{'id': pg_vif_id}, {'id': vif_id}], vifs)
+ self.assertCountEqual([{'id': pg_vif_id}, {'id': vif_id}], vifs)
def test_vif_list_internal(self):
vif_id = uuidutils.generate_uuid()
@@ -615,7 +675,7 @@ class TestVifPortIDMixin(db_base.DbTestCase):
address='52:54:00:cf:2d:01', uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
vifs = self.interface.vif_list(task)
- self.assertItemsEqual([{'id': pg_vif_id}, {'id': vif_id}], vifs)
+ self.assertCountEqual([{'id': pg_vif_id}, {'id': vif_id}], vifs)
def test_vif_list_extra_and_internal_priority(self):
vif_id = uuidutils.generate_uuid()
@@ -679,6 +739,14 @@ class TestVifPortIDMixin(db_base.DbTestCase):
vif = self.interface.get_current_vif(task, self.port)
self.assertIsNone(vif)
+ def test_get_node_network_data_complete(self):
+ self.node.network_data = self.network_data
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.id) as task:
+ network_data = self.interface.get_node_network_data(task)
+
+ self.assertEqual(self.network_data, network_data)
+
class TestNeutronVifPortIDMixin(db_base.DbTestCase):
@@ -695,7 +763,8 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00',
'mac_address': '52:54:00:cf:2d:32'}
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@@ -711,10 +780,12 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
mock_upa.assert_called_once_with(
"fake_vif_id", self.port.address, context=task.context)
self.assertFalse(mock_gpbpi.called)
- mock_gfp.assert_called_once_with(task, 'fake_vif_id', set())
+ mock_gfp.assert_called_once_with(task, 'fake_vif_id', set(),
+ {'id': 'fake_vif_id'})
mock_save.assert_called_once_with(self.port, "fake_vif_id")
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@@ -727,10 +798,12 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.NoFreePhysicalPorts,
self.interface.vif_attach, task, vif)
- mock_gfp.assert_called_once_with(task, 'fake_vif_id', set())
+ mock_gfp.assert_called_once_with(task, 'fake_vif_id', set(),
+ {'id': 'fake_vif_id'})
self.assertFalse(mock_save.called)
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@@ -750,10 +823,12 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
"fake_vif_id", self.port.address, context=task.context)
mock_gpbpi.assert_called_once_with(mock_client.return_value,
'fake_vif_id')
- mock_gfp.assert_called_once_with(task, 'fake_vif_id', {'physnet1'})
+ mock_gfp.assert_called_once_with(task, 'fake_vif_id', {'physnet1'},
+ {'id': 'fake_vif_id'})
mock_save.assert_called_once_with(self.port, "fake_vif_id")
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'plug_port_to_tenant_network', autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@@ -772,11 +847,13 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
mock_upa.assert_called_once_with(
"fake_vif_id", self.port.address, context=task.context)
self.assertFalse(mock_gpbpi.called)
- mock_gfp.assert_called_once_with(task, 'fake_vif_id', set())
+ mock_gfp.assert_called_once_with(task, 'fake_vif_id', set(),
+ {'id': 'fake_vif_id'})
mock_save.assert_called_once_with(self.port, "fake_vif_id")
mock_plug.assert_called_once_with(task, self.port, mock.ANY)
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'plug_port_to_tenant_network', autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@@ -798,14 +875,16 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
mock_upa.assert_called_once_with(
"fake_vif_id", self.port.address, context=task.context)
self.assertFalse(mock_gpbpi.called)
- mock_gfp.assert_called_once_with(task, 'fake_vif_id', set())
+ mock_gfp.assert_called_once_with(task, 'fake_vif_id', set(),
+ {'id': 'fake_vif_id'})
mock_save.assert_called_once_with(self.port, "fake_vif_id")
mock_plug.assert_called_once_with(task, self.port, mock.ANY)
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
- @mock.patch.object(neutron_common, 'update_port_address')
+ @mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@mock.patch.object(neutron_common, 'get_physnets_by_port_uuid',
autospec=True)
def test_vif_attach_portgroup_no_address(self, mock_gpbpi, mock_upa,
@@ -818,14 +897,16 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.vif_attach(task, vif)
mock_client.assert_called_once_with(context=task.context)
self.assertFalse(mock_gpbpi.called)
- mock_gfp.assert_called_once_with(task, 'fake_vif_id', set())
+ mock_gfp.assert_called_once_with(task, 'fake_vif_id', set(),
+ {'id': 'fake_vif_id'})
self.assertFalse(mock_client.return_value.show_port.called)
self.assertFalse(mock_upa.called)
mock_save.assert_called_once_with(pg, "fake_vif_id")
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
- @mock.patch.object(neutron_common, 'update_port_address')
+ @mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@mock.patch.object(neutron_common, 'get_physnets_by_port_uuid',
autospec=True)
def test_vif_attach_update_port_exception(self, mock_gpbpi, mock_upa,
@@ -846,10 +927,11 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
'fake_vif_id')
self.assertFalse(mock_save.called)
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
- @mock.patch.object(neutron_common, 'get_client')
- @mock.patch.object(neutron_common, 'update_port_address')
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
+ @mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@mock.patch.object(neutron_common, 'get_physnets_by_port_uuid',
autospec=True)
def test_vif_attach_portgroup_physnet_inconsistent(self, mock_gpbpi,
@@ -871,10 +953,11 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.assertFalse(mock_upa.called)
self.assertFalse(mock_save.called)
- @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_save_vif_to_port_like_obj',
+ autospec=True)
@mock.patch.object(common, 'get_free_port_like_object', autospec=True)
- @mock.patch.object(neutron_common, 'get_client')
- @mock.patch.object(neutron_common, 'update_port_address')
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
+ @mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@mock.patch.object(neutron_common, 'get_physnets_by_port_uuid',
autospec=True)
def test_vif_attach_multiple_segment_mappings(self, mock_gpbpi, mock_upa,
@@ -898,33 +981,39 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.assertFalse(mock_upa.called)
self.assertFalse(mock_save.called)
- @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
- @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id')
+ @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id',
+ autospec=True)
def test_vif_detach(self, mock_get, mock_unp, mock_clear):
mock_get.return_value = self.port
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_detach(task, 'fake_vif_id')
- mock_get.assert_called_once_with(task, 'fake_vif_id')
+ mock_get.assert_called_once_with(self.interface, task, 'fake_vif_id')
self.assertFalse(mock_unp.called)
mock_clear.assert_called_once_with(self.port)
- @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
- @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id')
+ @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id',
+ autospec=True)
def test_vif_detach_portgroup(self, mock_get, mock_unp, mock_clear):
pg = obj_utils.create_test_portgroup(
self.context, node_id=self.node.id)
mock_get.return_value = pg
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_detach(task, 'fake_vif_id')
- mock_get.assert_called_once_with(task, 'fake_vif_id')
+ mock_get.assert_called_once_with(self.interface, task, 'fake_vif_id')
self.assertFalse(mock_unp.called)
mock_clear.assert_called_once_with(pg)
- @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
- @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id')
+ @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id',
+ autospec=True)
def test_vif_detach_not_attached(self, mock_get, mock_unp, mock_clear):
mock_get.side_effect = exception.VifNotAttached(vif='fake-vif',
node='fake-node')
@@ -932,13 +1021,15 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.assertRaisesRegex(
exception.VifNotAttached, "it is not attached to it.",
self.interface.vif_detach, task, 'fake_vif_id')
- mock_get.assert_called_once_with(task, 'fake_vif_id')
+ mock_get.assert_called_once_with(self.interface, task, 'fake_vif_id')
self.assertFalse(mock_unp.called)
self.assertFalse(mock_clear.called)
- @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
- @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id')
+ @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id',
+ autospec=True)
def test_vif_detach_active_node(self, mock_get, mock_unp, mock_clear):
self.node.provision_state = states.ACTIVE
self.node.save()
@@ -947,12 +1038,14 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.vif_detach(task, 'fake_vif_id')
mock_unp.assert_called_once_with('fake_vif_id',
context=task.context)
- mock_get.assert_called_once_with(task, 'fake_vif_id')
+ mock_get.assert_called_once_with(self.interface, task, 'fake_vif_id')
mock_clear.assert_called_once_with(self.port)
- @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
- @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id')
+ @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id',
+ autospec=True)
def test_vif_detach_deleting_node(self, mock_get, mock_unp, mock_clear):
self.node.provision_state = states.DELETING
self.node.save()
@@ -961,12 +1054,14 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.vif_detach(task, 'fake_vif_id')
mock_unp.assert_called_once_with('fake_vif_id',
context=task.context)
- mock_get.assert_called_once_with(task, 'fake_vif_id')
+ mock_get.assert_called_once_with(self.interface, task, 'fake_vif_id')
mock_clear.assert_called_once_with(self.port)
- @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj')
+ @mock.patch.object(common.VIFPortIDMixin, '_clear_vif_from_port_like_obj',
+ autospec=True)
@mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
- @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id')
+ @mock.patch.object(common.VIFPortIDMixin, '_get_port_like_obj_by_vif_id',
+ autospec=True)
def test_vif_detach_active_node_failure(self, mock_get, mock_unp,
mock_clear):
self.node.provision_state = states.ACTIVE
@@ -978,7 +1073,7 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.vif_detach, task, 'fake_vif_id')
mock_unp.assert_called_once_with('fake_vif_id',
context=task.context)
- mock_get.assert_called_once_with(task, 'fake_vif_id')
+ mock_get.assert_called_once_with(self.interface, task, 'fake_vif_id')
mock_clear.assert_called_once_with(self.port)
@mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@@ -988,7 +1083,8 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.port_changed(task, self.port)
mac_update_mock.assert_called_once_with(
- self.port.extra['vif_port_id'], new_address,
+ self.port.extra['vif_port_id'],
+ new_address,
context=task.context)
@mock.patch.object(neutron_common, 'update_port_address', autospec=True)
@@ -1014,7 +1110,8 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.port_changed(task, self.port)
self.assertFalse(mac_update_mock.called)
- @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
+ @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
+ autospec=True)
def test_port_changed_client_id(self, dhcp_update_mock):
expected_extra = {'vif_port_id': 'fake-id', 'client-id': 'fake2'}
expected_dhcp_opts = [{'opt_name': '61', 'opt_value': 'fake2'}]
@@ -1022,9 +1119,10 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.port_changed(task, self.port)
dhcp_update_mock.assert_called_once_with(
- 'fake-id', expected_dhcp_opts, context=task.context)
+ mock.ANY, 'fake-id', expected_dhcp_opts, context=task.context)
- @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
+ @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
+ autospec=True)
def test_port_changed_extra_add_new_key(self, dhcp_update_mock):
self.port.extra = {'vif_port_id': 'fake-id'}
self.port.save()
@@ -1035,7 +1133,8 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.port_changed(task, self.port)
self.assertFalse(dhcp_update_mock.called)
- @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
+ @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
+ autospec=True)
def test_port_changed_client_id_fail(self, dhcp_update_mock):
self.port.extra = {'vif_port_id': 'fake-id', 'client-id': 'fake2'}
dhcp_update_mock.side_effect = (
@@ -1045,7 +1144,8 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.port_changed,
task, self.port)
- @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
+ @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
+ autospec=True)
def test_port_changed_client_id_no_vif_id(self, dhcp_update_mock):
self.port.extra = {'client-id': 'fake1'}
self.port.save()
@@ -1054,7 +1154,8 @@ class TestNeutronVifPortIDMixin(db_base.DbTestCase):
self.interface.port_changed(task, self.port)
self.assertFalse(dhcp_update_mock.called)
- @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts')
+ @mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.update_port_dhcp_opts',
+ autospec=True)
def test_port_changed_message_format_failure(self, dhcp_update_mock):
pg = obj_utils.create_test_portgroup(
self.context, node_id=self.node.id,
diff --git a/ironic/tests/unit/drivers/modules/network/test_flat.py b/ironic/tests/unit/drivers/modules/network/test_flat.py
index 186827166..66c75441c 100644
--- a/ironic/tests/unit/drivers/modules/network/test_flat.py
+++ b/ironic/tests/unit/drivers/modules/network/test_flat.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from neutronclient.common import exceptions as neutron_exceptions
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -37,34 +38,35 @@ class TestFlatInterface(db_base.DbTestCase):
internal_info={
'cleaning_vif_port_id': uuidutils.generate_uuid()})
- @mock.patch('%s.vif_list' % VIFMIXINPATH)
+ @mock.patch('%s.vif_list' % VIFMIXINPATH, autospec=True)
def test_vif_list(self, mock_vif_list):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_list(task)
- mock_vif_list.assert_called_once_with(task)
+ mock_vif_list.assert_called_once_with(self.interface, task)
- @mock.patch('%s.vif_attach' % VIFMIXINPATH)
+ @mock.patch('%s.vif_attach' % VIFMIXINPATH, autospec=True)
def test_vif_attach(self, mock_vif_attach):
vif = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_attach(task, vif)
- mock_vif_attach.assert_called_once_with(task, vif)
+ mock_vif_attach.assert_called_once_with(self.interface, task, vif)
- @mock.patch('%s.vif_detach' % VIFMIXINPATH)
+ @mock.patch('%s.vif_detach' % VIFMIXINPATH, autospec=True)
def test_vif_detach(self, mock_vif_detach):
vif_id = "vif"
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_detach(task, vif_id)
- mock_vif_detach.assert_called_once_with(task, vif_id)
+ mock_vif_detach.assert_called_once_with(
+ self.interface, task, vif_id)
- @mock.patch('%s.port_changed' % VIFMIXINPATH)
+ @mock.patch('%s.port_changed' % VIFMIXINPATH, autospec=True)
def test_vif_port_changed(self, mock_p_changed):
port = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.port_changed(task, port)
- mock_p_changed.assert_called_once_with(task, port)
+ mock_p_changed.assert_called_once_with(self.interface, task, port)
- @mock.patch.object(flat_interface, 'LOG')
+ @mock.patch.object(flat_interface, 'LOG', autospec=True)
def test_init_no_cleaning_network(self, mock_log):
self.config(cleaning_network=None, group='neutron')
flat_interface.FlatNetwork()
@@ -92,9 +94,9 @@ class TestFlatInterface(db_base.DbTestCase):
'cleaning network', context=task.context)
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron, 'add_ports_to_network')
- @mock.patch.object(neutron, 'rollback_ports')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron, 'add_ports_to_network', autospec=True)
+ @mock.patch.object(neutron, 'rollback_ports', autospec=True)
def test_add_cleaning_network(self, rollback_mock, add_mock,
validate_mock):
add_mock.return_value = {self.port.uuid: 'vif-port-id'}
@@ -112,9 +114,9 @@ class TestFlatInterface(db_base.DbTestCase):
self.port.internal_info['cleaning_vif_port_id'])
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron, 'add_ports_to_network')
- @mock.patch.object(neutron, 'rollback_ports')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron, 'add_ports_to_network', autospec=True)
+ @mock.patch.object(neutron, 'rollback_ports', autospec=True)
def test_add_cleaning_network_from_node(self, rollback_mock, add_mock,
validate_mock):
add_mock.return_value = {self.port.uuid: 'vif-port-id'}
@@ -138,8 +140,8 @@ class TestFlatInterface(db_base.DbTestCase):
self.port.internal_info['cleaning_vif_port_id'])
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron, 'remove_ports_from_network', autospec=True)
def test_remove_cleaning_network(self, remove_mock, validate_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.remove_cleaning_network(task)
@@ -152,8 +154,8 @@ class TestFlatInterface(db_base.DbTestCase):
self.assertNotIn('cleaning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron, 'remove_ports_from_network', autospec=True)
def test_remove_cleaning_network_from_node(self, remove_mock,
validate_mock):
cleaning_network_uuid = '3aea0de6-4b92-44da-9aa0-52d134c83fdf'
@@ -170,7 +172,7 @@ class TestFlatInterface(db_base.DbTestCase):
self.port.refresh()
self.assertNotIn('cleaning_vif_port_id', self.port.internal_info)
- @mock.patch.object(neutron, 'update_neutron_port')
+ @mock.patch.object(neutron, 'update_neutron_port', autospec=True)
def test__bind_flat_ports_set_binding_host_id(self, update_mock):
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
@@ -183,7 +185,7 @@ class TestFlatInterface(db_base.DbTestCase):
self.interface._bind_flat_ports(task)
update_mock.assert_called_once_with(self.context, 'foo', exp_body)
- @mock.patch.object(neutron, 'update_neutron_port')
+ @mock.patch.object(neutron, 'update_neutron_port', autospec=True)
def test__bind_flat_ports_set_binding_host_id_portgroup(self, update_mock):
internal_info = {'tenant_vif_port_id': 'foo'}
utils.create_test_portgroup(
@@ -204,7 +206,7 @@ class TestFlatInterface(db_base.DbTestCase):
mock.call(self.context, 'bar', exp_body1),
mock.call(self.context, 'foo', exp_body2)])
- @mock.patch.object(neutron, 'unbind_neutron_port')
+ @mock.patch.object(neutron, 'unbind_neutron_port', autospec=True)
def test__unbind_flat_ports(self, unbind_neutron_port_mock):
extra = {'vif_port_id': 'foo'}
utils.create_test_port(self.context, node_id=self.node.id,
@@ -215,7 +217,7 @@ class TestFlatInterface(db_base.DbTestCase):
unbind_neutron_port_mock.assert_called_once_with('foo',
context=self.context)
- @mock.patch.object(neutron, 'unbind_neutron_port')
+ @mock.patch.object(neutron, 'unbind_neutron_port', autospec=True)
def test__unbind_flat_ports_portgroup(self, unbind_neutron_port_mock):
internal_info = {'tenant_vif_port_id': 'foo'}
utils.create_test_portgroup(self.context, node_id=self.node.id,
@@ -227,11 +229,11 @@ class TestFlatInterface(db_base.DbTestCase):
uuid=uuidutils.generate_uuid())
with task_manager.acquire(self.context, self.node.id) as task:
self.interface._unbind_flat_ports(task)
- unbind_neutron_port_mock.has_calls(
+ unbind_neutron_port_mock.assert_has_calls(
[mock.call('foo', context=self.context),
- mock.call('bar', context=self.context)])
+ mock.call('bar', context=self.context)], any_order=True)
- @mock.patch.object(neutron, 'update_neutron_port')
+ @mock.patch.object(neutron, 'update_neutron_port', autospec=True)
def test__bind_flat_ports_set_binding_host_id_raise(self, update_mock):
update_mock.side_effect = (neutron_exceptions.ConnectionFailed())
extra = {'vif_port_id': 'foo'}
@@ -242,34 +244,38 @@ class TestFlatInterface(db_base.DbTestCase):
self.assertRaises(exception.NetworkError,
self.interface._bind_flat_ports, task)
- @mock.patch.object(flat_interface.FlatNetwork, '_bind_flat_ports')
+ @mock.patch.object(flat_interface.FlatNetwork, '_bind_flat_ports',
+ autospec=True)
def test_add_rescuing_network(self, bind_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_rescuing_network(task)
- bind_mock.assert_called_once_with(task)
+ bind_mock.assert_called_once_with(self.interface, task)
- @mock.patch.object(flat_interface.FlatNetwork, '_unbind_flat_ports')
+ @mock.patch.object(flat_interface.FlatNetwork, '_unbind_flat_ports',
+ autospec=True)
def test_remove_rescuing_network(self, unbind_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.remove_rescuing_network(task)
- unbind_mock.assert_called_once_with(task)
+ unbind_mock.assert_called_once_with(self.interface, task)
- @mock.patch.object(flat_interface.FlatNetwork, '_bind_flat_ports')
+ @mock.patch.object(flat_interface.FlatNetwork, '_bind_flat_ports',
+ autospec=True)
def test_add_provisioning_network(self, bind_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.add_provisioning_network(task)
- bind_mock.assert_called_once_with(task)
+ bind_mock.assert_called_once_with(self.interface, task)
- @mock.patch.object(flat_interface.FlatNetwork, '_unbind_flat_ports')
+ @mock.patch.object(flat_interface.FlatNetwork, '_unbind_flat_ports',
+ autospec=True)
def test_remove_provisioning_network(self, unbind_mock):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.remove_provisioning_network(task)
- unbind_mock.assert_called_once_with(task)
+ unbind_mock.assert_called_once_with(self.interface, task)
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron, 'add_ports_to_network')
- @mock.patch.object(neutron, 'rollback_ports')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron, 'add_ports_to_network', autospec=True)
+ @mock.patch.object(neutron, 'rollback_ports', autospec=True)
def test_add_inspection_network(self, rollback_mock, add_mock,
validate_mock):
add_mock.return_value = {self.port.uuid: 'vif-port-id'}
@@ -287,9 +293,9 @@ class TestFlatInterface(db_base.DbTestCase):
self.port.internal_info['inspection_vif_port_id'])
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron, 'add_ports_to_network')
- @mock.patch.object(neutron, 'rollback_ports')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron, 'add_ports_to_network', autospec=True)
+ @mock.patch.object(neutron, 'rollback_ports', autospec=True)
def test_add_inspection_network_from_node(self, rollback_mock, add_mock,
validate_mock):
add_mock.return_value = {self.port.uuid: 'vif-port-id'}
@@ -314,7 +320,7 @@ class TestFlatInterface(db_base.DbTestCase):
self.port.internal_info['inspection_vif_port_id'])
@mock.patch.object(neutron, 'validate_network',
- side_effect=lambda n, t, context=None: n)
+ side_effect=lambda n, t, context=None: n, autospec=True)
def test_validate_inspection(self, validate_mock):
inspection_network_uuid = '3aea0de6-4b92-44da-9aa0-52d134c83fdf'
driver_info = self.node.driver_info
@@ -332,3 +338,12 @@ class TestFlatInterface(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.UnsupportedDriverExtension,
self.interface.validate_inspection, task)
+
+ @mock.patch.object(neutron, 'get_neutron_port_data', autospec=True)
+ def test_get_node_network_data(self, mock_gnpd):
+ mock_gnpd.return_value = {}
+
+ with task_manager.acquire(self.context, self.node.id) as task:
+ network_data = self.interface.get_node_network_data(task)
+
+ self.assertEqual({}, network_data)
diff --git a/ironic/tests/unit/drivers/modules/network/test_neutron.py b/ironic/tests/unit/drivers/modules/network/test_neutron.py
index 2d083a740..4d8c5e7be 100644
--- a/ironic/tests/unit/drivers/modules/network/test_neutron.py
+++ b/ironic/tests/unit/drivers/modules/network/test_neutron.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_config import cfg
from oslo_utils import uuidutils
@@ -56,32 +56,33 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.neutron_port = {'id': '132f871f-eaec-4fed-9475-0d54465e0f00',
'mac_address': '52:54:00:cf:2d:32'}
- @mock.patch('%s.vif_list' % VIFMIXINPATH)
+ @mock.patch('%s.vif_list' % VIFMIXINPATH, autospec=True)
def test_vif_list(self, mock_vif_list):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_list(task)
- mock_vif_list.assert_called_once_with(task)
+ mock_vif_list.assert_called_once_with(self.interface, task)
- @mock.patch('%s.vif_attach' % VIFMIXINPATH)
+ @mock.patch('%s.vif_attach' % VIFMIXINPATH, autospec=True)
def test_vif_attach(self, mock_vif_attach):
vif = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_attach(task, vif)
- mock_vif_attach.assert_called_once_with(task, vif)
+ mock_vif_attach.assert_called_once_with(self.interface, task, vif)
- @mock.patch('%s.vif_detach' % VIFMIXINPATH)
+ @mock.patch('%s.vif_detach' % VIFMIXINPATH, autospec=True)
def test_vif_detach(self, mock_vif_detach):
vif_id = "vif"
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.vif_detach(task, vif_id)
- mock_vif_detach.assert_called_once_with(task, vif_id)
+ mock_vif_detach.assert_called_once_with(
+ self.interface, task, vif_id)
- @mock.patch('%s.port_changed' % VIFMIXINPATH)
+ @mock.patch('%s.port_changed' % VIFMIXINPATH, autospec=True)
def test_vif_port_changed(self, mock_p_changed):
port = mock.MagicMock()
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.port_changed(task, port)
- mock_p_changed.assert_called_once_with(task, port)
+ mock_p_changed.assert_called_once_with(self.interface, task, port)
def test_init_incorrect_provisioning_net(self):
self.config(provisioning_network=None, group='neutron')
@@ -163,9 +164,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
validate_mock.call_args_list)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_provisioning_network(self, add_ports_mock, rollback_mock,
validate_mock):
self.port.internal_info = {'provisioning_vif_port_id': 'vif-port-id'}
@@ -186,9 +187,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['provisioning_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_provisioning_network_from_node(self, add_ports_mock,
rollback_mock, validate_mock):
self.port.internal_info = {'provisioning_vif_port_id': 'vif-port-id'}
@@ -219,8 +220,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(neutron_common, 'validate_network',
lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_provisioning_network_with_sg(self, add_ports_mock,
rollback_mock):
sg_ids = []
@@ -243,8 +244,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['provisioning_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'remove_ports_from_network',
+ autospec=True)
def test_remove_provisioning_network(self, remove_ports_mock,
validate_mock):
self.port.internal_info = {'provisioning_vif_port_id': 'vif-port-id'}
@@ -260,8 +262,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertNotIn('provisioning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'remove_ports_from_network',
+ autospec=True)
def test_remove_provisioning_network_from_node(self, remove_ports_mock,
validate_mock):
self.port.internal_info = {'provisioning_vif_port_id': 'vif-port-id'}
@@ -282,9 +285,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertNotIn('provisioning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_cleaning_network(self, add_ports_mock, rollback_mock,
validate_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
@@ -301,9 +304,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['cleaning_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_cleaning_network_from_node(self, add_ports_mock,
rollback_mock, validate_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
@@ -328,8 +331,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(neutron_common, 'validate_network',
lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_cleaning_network_with_sg(self, add_ports_mock, rollback_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
sg_ids = []
@@ -349,8 +352,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['cleaning_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'remove_ports_from_network',
+ autospec=True)
def test_remove_cleaning_network(self, remove_ports_mock,
validate_mock):
self.port.internal_info = {'cleaning_vif_port_id': 'vif-port-id'}
@@ -366,8 +370,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertNotIn('cleaning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'remove_ports_from_network',
+ autospec=True)
def test_remove_cleaning_network_from_node(self, remove_ports_mock,
validate_mock):
self.port.internal_info = {'cleaning_vif_port_id': 'vif-port-id'}
@@ -388,7 +393,7 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertNotIn('cleaning_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
+ side_effect=lambda n, t, context=None: n, autospec=True)
def test_validate_rescue(self, validate_mock):
rescuing_network_uuid = '3aea0de6-4b92-44da-9aa0-52d134c83fdf'
driver_info = self.node.driver_info
@@ -409,9 +414,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.interface.validate_rescue, task)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_rescuing_network(self, add_ports_mock, rollback_mock,
validate_mock):
other_port = utils.create_test_port(
@@ -440,9 +445,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertNotIn('rescuing_vif_port_id', self.port.internal_info)
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_rescuing_network_from_node(self, add_ports_mock,
rollback_mock, validate_mock):
other_port = utils.create_test_port(
@@ -477,8 +482,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(neutron_common, 'validate_network',
lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_rescuing_network_with_sg(self, add_ports_mock, rollback_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
sg_ids = []
@@ -498,8 +503,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['rescuing_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'remove_ports_from_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'remove_ports_from_network',
+ autospec=True)
def test_remove_rescuing_network(self, remove_ports_mock,
validate_mock):
other_port = utils.create_test_port(
@@ -520,16 +526,16 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertNotIn('rescuing_vif_port_id', self.port.internal_info)
self.assertNotIn('rescuing_vif_port_id', other_port.internal_info)
- @mock.patch.object(neutron_common, 'unbind_neutron_port')
+ @mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
def test_unconfigure_tenant_networks(self, mock_unbind_port):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.unconfigure_tenant_networks(task)
mock_unbind_port.assert_called_once_with(
self.port.extra['vif_port_id'], context=task.context)
- @mock.patch.object(neutron_common, 'get_client')
- @mock.patch.object(neutron_common, 'wait_for_host_agent')
- @mock.patch.object(neutron_common, 'unbind_neutron_port')
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
+ @mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True)
+ @mock.patch.object(neutron_common, 'unbind_neutron_port', autospec=True)
def test_unconfigure_tenant_networks_smartnic(
self, mock_unbind_port, wait_agent_mock, client_mock):
nclient = mock.MagicMock()
@@ -553,8 +559,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
exception.NetworkError, 'No ports are associated',
self.interface.configure_tenant_networks, task)
- @mock.patch.object(neutron_common, 'get_client')
- @mock.patch.object(neutron, 'LOG')
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
+ @mock.patch.object(neutron, 'LOG', autospec=True)
def test_configure_tenant_networks_no_vif_id(self, log_mock, client_mock):
self.port.extra = {}
self.port.save()
@@ -572,9 +578,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
log_mock.error.call_args[0][0])
@mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True)
- @mock.patch.object(neutron_common, 'update_neutron_port')
- @mock.patch.object(neutron_common, 'get_client')
- @mock.patch.object(neutron, 'LOG')
+ @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True)
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
+ @mock.patch.object(neutron, 'LOG', autospec=True)
def test_configure_tenant_networks_multiple_ports_one_vif_id(
self, log_mock, client_mock, update_mock, wait_agent_mock):
expected_body = {
@@ -594,8 +600,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
expected_body)
@mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True)
- @mock.patch.object(neutron_common, 'update_neutron_port')
- @mock.patch.object(neutron_common, 'get_client')
+ @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True)
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
def test_configure_tenant_networks_update_fail(self, client_mock,
update_mock,
wait_agent_mock):
@@ -608,8 +614,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
client_mock.assert_called_once_with(context=task.context)
@mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True)
- @mock.patch.object(neutron_common, 'update_neutron_port')
- @mock.patch.object(neutron_common, 'get_client')
+ @mock.patch.object(neutron_common, 'update_neutron_port', autospec=True)
+ @mock.patch.object(neutron_common, 'get_client', autospec=True)
def _test_configure_tenant_networks(self, client_mock, update_mock,
wait_agent_mock,
is_client_id=False,
@@ -693,13 +699,15 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.node.save()
self._test_configure_tenant_networks(is_client_id=True)
+ @mock.patch.object(neutron_common, 'get_neutron_port_data', autospec=True)
@mock.patch.object(neutron_common, 'wait_for_host_agent', autospec=True)
@mock.patch.object(neutron_common, 'update_neutron_port', autospec=True)
@mock.patch.object(neutron_common, 'get_client', autospec=True)
@mock.patch.object(neutron_common, 'get_local_group_information',
autospec=True)
def test_configure_tenant_networks_with_portgroups(
- self, glgi_mock, client_mock, update_mock, wait_agent_mock):
+ self, glgi_mock, client_mock, update_mock, wait_agent_mock,
+ port_data_mock):
pg = utils.create_test_portgroup(
self.context, node_id=self.node.id, address='ff:54:00:cf:2d:32',
extra={'vif_port_id': uuidutils.generate_uuid()})
@@ -765,9 +773,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.assertFalse(self.interface.need_power_on(task))
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_inspection_network(self, add_ports_mock, rollback_mock,
validate_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
@@ -784,9 +792,9 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['inspection_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ side_effect=lambda n, t, context=None: n, autospec=True)
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_inspection_network_from_node(self, add_ports_mock,
rollback_mock, validate_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
@@ -812,8 +820,8 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(neutron_common, 'validate_network',
lambda n, t, context=None: n)
- @mock.patch.object(neutron_common, 'rollback_ports')
- @mock.patch.object(neutron_common, 'add_ports_to_network')
+ @mock.patch.object(neutron_common, 'rollback_ports', autospec=True)
+ @mock.patch.object(neutron_common, 'add_ports_to_network', autospec=True)
def test_add_inspection_network_with_sg(self, add_ports_mock,
rollback_mock):
add_ports_mock.return_value = {self.port.uuid: self.neutron_port['id']}
@@ -835,7 +843,7 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
self.port.internal_info['inspection_vif_port_id'])
@mock.patch.object(neutron_common, 'validate_network',
- side_effect=lambda n, t, context=None: n)
+ side_effect=lambda n, t, context=None: n, autospec=True)
def test_validate_inspection(self, validate_mock):
inspection_network_uuid = '3aea0de6-4b92-44da-9aa0-52d134c83fdf'
driver_info = self.node.driver_info
@@ -853,3 +861,12 @@ class NeutronInterfaceTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.id) as task:
self.assertRaises(exception.UnsupportedDriverExtension,
self.interface.validate_inspection, task)
+
+ @mock.patch.object(neutron_common, 'get_neutron_port_data', autospec=True)
+ def test_get_node_network_data(self, mock_gnpd):
+ mock_gnpd.return_value = {}
+
+ with task_manager.acquire(self.context, self.node.id) as task:
+ network_data = self.interface.get_node_network_data(task)
+
+ self.assertEqual({}, network_data)
diff --git a/ironic/tests/unit/drivers/modules/network/test_noop.py b/ironic/tests/unit/drivers/modules/network/test_noop.py
index be432988e..6de7812c2 100644
--- a/ironic/tests/unit/drivers/modules/network/test_noop.py
+++ b/ironic/tests/unit/drivers/modules/network/test_noop.py
@@ -94,3 +94,9 @@ class NoopInterfaceTestCase(db_base.DbTestCase):
def test_remove_inspection_network(self):
with task_manager.acquire(self.context, self.node.id) as task:
self.interface.remove_inspection_network(task)
+
+ def test_get_node_network_data(self):
+ with task_manager.acquire(self.context, self.node.id) as task:
+ network_data = self.interface.get_node_network_data(task)
+
+ self.assertEqual({}, network_data)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_bios.py b/ironic/tests/unit/drivers/modules/redfish/test_bios.py
index 0dc770c7c..96269f640 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_bios.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_bios.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import exception
@@ -92,14 +93,13 @@ class RedfishBiosTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- attributes = mock_get_system(task.node).bios.attributes
- settings = [{'name': k, 'value': v} for k, v in attributes.items()]
- mock_get_system.reset_mock()
+ settings = {'foo': 'bar'}
+ mock_get_system.return_value.bios.attributes = settings
task.driver.bios.cache_bios_settings(task)
mock_get_system.assert_called_once_with(task.node)
mock_setting_list.sync_node_setting.assert_called_once_with(
- task.context, task.node.id, settings)
+ task.context, task.node.id, [{'name': 'foo', 'value': 'bar'}])
mock_setting_list.create.assert_not_called()
mock_setting_list.save.assert_not_called()
mock_setting_list.delete.assert_not_called()
@@ -147,14 +147,13 @@ class RedfishBiosTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- attributes = mock_get_system(task.node).bios.attributes
- settings = [{'name': k, 'value': v} for k, v in attributes.items()]
- mock_get_system.reset_mock()
+ settings = {'foo': 'bar'}
+ mock_get_system.return_value.bios.attributes = settings
task.driver.bios.cache_bios_settings(task)
mock_get_system.assert_called_once_with(task.node)
mock_setting_list.sync_node_setting.assert_called_once_with(
- task.context, task.node.id, settings)
+ task.context, task.node.id, [{'name': 'foo', 'value': 'bar'}])
mock_setting_list.create.assert_called_once_with(
task.context, task.node.id, create_list)
mock_setting_list.save.assert_called_once_with(
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_boot.py b/ironic/tests/unit/drivers/modules/redfish/test_boot.py
index c1d86486a..b365eb830 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_boot.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_boot.py
@@ -14,8 +14,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_utils import importutils
from ironic.common import boot_devices
@@ -67,7 +67,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
'bootloader': 'bootloader'}
)
- actual_driver_info = task.driver.boot._parse_driver_info(task.node)
+ actual_driver_info = redfish_boot._parse_driver_info(task.node)
self.assertIn('kernel', actual_driver_info['deploy_kernel'])
self.assertIn('ramdisk', actual_driver_info['deploy_ramdisk'])
@@ -83,7 +83,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
'bootloader': 'bootloader'}
)
- actual_driver_info = task.driver.boot._parse_driver_info(task.node)
+ actual_driver_info = redfish_boot._parse_driver_info(task.node)
self.assertIn('kernel', actual_driver_info['rescue_kernel'])
self.assertIn('ramdisk', actual_driver_info['rescue_ramdisk'])
@@ -93,7 +93,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
- task.driver.boot._parse_driver_info,
+ redfish_boot._parse_driver_info,
task.node)
def _test_parse_driver_info_from_conf(self, mode='deploy'):
@@ -109,7 +109,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
self.config(group='conductor', **expected)
- image_info = task.driver.boot._parse_driver_info(task.node)
+ image_info = redfish_boot._parse_driver_info(task.node)
for key, value in expected.items():
self.assertEqual(value, image_info[key])
@@ -139,7 +139,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
task.node.driver_info.update(ramdisk_config)
self.assertRaises(exception.MissingParameterValue,
- task.driver.boot._parse_driver_info, task.node)
+ redfish_boot._parse_driver_info, task.node)
def test_parse_driver_info_mixed_source_deploy(self):
self._test_parse_driver_info_mixed_source()
@@ -161,8 +161,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
'kernel': 'http://kernel/img',
'ramdisk': 'http://ramdisk/img'})
- actual_instance_info = task.driver.boot._parse_deploy_info(
- task.node)
+ actual_instance_info = redfish_boot._parse_deploy_info(task.node)
self.assertEqual(
'http://boot/iso', actual_instance_info['image_source'])
@@ -175,52 +174,44 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
- task.driver.boot._parse_deploy_info,
+ redfish_boot._parse_deploy_info,
task.node)
def test__append_filename_param_without_qs(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- res = task.driver.boot._append_filename_param(
- 'http://a.b/c', 'b.img')
- expected = 'http://a.b/c?filename=b.img'
- self.assertEqual(expected, res)
+ res = redfish_boot._append_filename_param(
+ 'http://a.b/c', 'b.img')
+ expected = 'http://a.b/c?filename=b.img'
+ self.assertEqual(expected, res)
def test__append_filename_param_with_qs(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- res = task.driver.boot._append_filename_param(
- 'http://a.b/c?d=e&f=g', 'b.img')
- expected = 'http://a.b/c?d=e&f=g&filename=b.img'
- self.assertEqual(expected, res)
+ res = redfish_boot._append_filename_param(
+ 'http://a.b/c?d=e&f=g', 'b.img')
+ expected = 'http://a.b/c?d=e&f=g&filename=b.img'
+ self.assertEqual(expected, res)
def test__append_filename_param_with_filename(self):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- res = task.driver.boot._append_filename_param(
- 'http://a.b/c?filename=bootme.img', 'b.img')
- expected = 'http://a.b/c?filename=bootme.img'
- self.assertEqual(expected, res)
+ res = redfish_boot._append_filename_param(
+ 'http://a.b/c?filename=bootme.img', 'b.img')
+ expected = 'http://a.b/c?filename=bootme.img'
+ self.assertEqual(expected, res)
@mock.patch.object(redfish_boot, 'swift', autospec=True)
def test__publish_image_swift(self, mock_swift):
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- mock_swift_api = mock_swift.SwiftAPI.return_value
- mock_swift_api.get_temp_url.return_value = 'https://a.b/c.f?e=f'
+ mock_swift_api = mock_swift.SwiftAPI.return_value
+ mock_swift_api.get_temp_url.return_value = 'https://a.b/c.f?e=f'
- url = task.driver.boot._publish_image('file.iso', 'boot.iso')
+ url = redfish_boot._publish_image('file.iso', 'boot.iso')
- self.assertEqual(
- 'https://a.b/c.f?e=f&filename=file.iso', url)
+ self.assertEqual(
+ 'https://a.b/c.f?e=f&filename=file.iso', url)
- mock_swift.SwiftAPI.assert_called_once_with()
+ mock_swift.SwiftAPI.assert_called_once_with()
- mock_swift_api.create_object.assert_called_once_with(
- mock.ANY, mock.ANY, mock.ANY, mock.ANY)
+ mock_swift_api.create_object.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY, mock.ANY)
- mock_swift_api.get_temp_url.assert_called_once_with(
- mock.ANY, mock.ANY, mock.ANY)
+ mock_swift_api.get_temp_url.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY)
@mock.patch.object(redfish_boot, 'swift', autospec=True)
def test__unpublish_image_swift(self, mock_swift):
@@ -228,7 +219,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
shared=True) as task:
object_name = 'image-%s' % task.node.uuid
- task.driver.boot._unpublish_image(object_name)
+ redfish_boot._unpublish_image(object_name)
mock_swift.SwiftAPI.assert_called_once_with()
mock_swift_api = mock_swift.SwiftAPI.return_value
@@ -236,48 +227,47 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_swift_api.delete_object.assert_called_once_with(
'ironic_redfish_container', object_name)
+ @mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(redfish_boot, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test__publish_image_local_link(
- self, mock_mkdir, mock_link, mock_shutil):
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost', group='deploy')
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
-
- url = task.driver.boot._publish_image('file.iso', 'boot.iso')
+ url = redfish_boot._publish_image('file.iso', 'boot.iso')
- self.assertEqual(
- 'http://localhost/redfish/boot.iso?filename=file.iso', url)
+ self.assertEqual(
+ 'http://localhost/redfish/boot.iso?filename=file.iso', url)
- mock_mkdir.assert_called_once_with('/httpboot/redfish', 0x755)
- mock_link.assert_called_once_with(
- 'file.iso', '/httpboot/redfish/boot.iso')
+ mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
+ mock_link.assert_called_once_with(
+ 'file.iso', '/httpboot/redfish/boot.iso')
+ mock_chmod.assert_called_once_with('file.iso', 0o644)
+ @mock.patch.object(os, 'chmod', autospec=True)
@mock.patch.object(redfish_boot, 'shutil', autospec=True)
@mock.patch.object(os, 'link', autospec=True)
@mock.patch.object(os, 'mkdir', autospec=True)
def test__publish_image_local_copy(
- self, mock_mkdir, mock_link, mock_shutil):
+ self, mock_mkdir, mock_link, mock_shutil, mock_chmod):
self.config(use_swift=False, group='redfish')
self.config(http_url='http://localhost', group='deploy')
mock_link.side_effect = OSError()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
-
- url = task.driver.boot._publish_image('file.iso', 'boot.iso')
+ url = redfish_boot._publish_image('file.iso', 'boot.iso')
- self.assertEqual(
- 'http://localhost/redfish/boot.iso?filename=file.iso', url)
+ self.assertEqual(
+ 'http://localhost/redfish/boot.iso?filename=file.iso', url)
- mock_mkdir.assert_called_once_with('/httpboot/redfish', 0x755)
+ mock_mkdir.assert_called_once_with('/httpboot/redfish', 0o755)
- mock_shutil.copyfile.assert_called_once_with(
- 'file.iso', '/httpboot/redfish/boot.iso')
+ mock_shutil.copyfile.assert_called_once_with(
+ 'file.iso', '/httpboot/redfish/boot.iso')
+ mock_chmod.assert_called_once_with('/httpboot/redfish/boot.iso',
+ 0o644)
@mock.patch.object(redfish_boot, 'ironic_utils', autospec=True)
def test__unpublish_image_local(self, mock_ironic_utils):
@@ -289,24 +279,22 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
expected_file = '/httpboot/redfish/' + object_name
- task.driver.boot._unpublish_image(object_name)
+ redfish_boot._unpublish_image(object_name)
mock_ironic_utils.unlink_without_raise.assert_called_once_with(
expected_file)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_unpublish_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_unpublish_image', autospec=True)
def test__cleanup_floppy_image(self, mock_unpublish):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.driver.boot._cleanup_floppy_image(task)
+ redfish_boot._cleanup_floppy_image(task)
object_name = 'image-%s' % task.node.uuid
mock_unpublish.assert_called_once_with(object_name)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_publish_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_publish_image', autospec=True)
@mock.patch.object(images, 'create_vfat_image', autospec=True)
def test__prepare_floppy_image(
self, mock_create_vfat_image, mock__publish_image):
@@ -316,7 +304,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock__publish_image.return_value = expected_url
- url = task.driver.boot._prepare_floppy_image(task)
+ url = redfish_boot._prepare_floppy_image(task)
object_name = 'image-%s' % task.node.uuid
@@ -328,19 +316,17 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
self.assertEqual(expected_url, url)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_unpublish_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_unpublish_image', autospec=True)
def test__cleanup_iso_image(self, mock_unpublish):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
- task.driver.boot._cleanup_iso_image(task)
+ redfish_boot._cleanup_iso_image(task)
object_name = 'boot-%s' % task.node.uuid
mock_unpublish.assert_called_once_with(object_name)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_publish_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_publish_image', autospec=True)
@mock.patch.object(images, 'create_boot_iso', autospec=True)
def test__prepare_iso_image_uefi(
self, mock_create_boot_iso, mock__publish_image):
@@ -352,9 +338,10 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock__publish_image.return_value = expected_url
- url = task.driver.boot._prepare_iso_image(
+ url = redfish_boot._prepare_iso_image(
task, 'http://kernel/img', 'http://ramdisk/img',
- 'http://bootloader/img', root_uuid=task.node.uuid)
+ 'http://bootloader/img', root_uuid=task.node.uuid,
+ base_iso=None)
object_name = 'boot-%s' % task.node.uuid
@@ -366,12 +353,12 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
boot_mode='uefi', esp_image_href='http://bootloader/img',
configdrive_href=mock.ANY,
kernel_params='nofb nomodeset vga=normal',
- root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
+ root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
+ base_iso=None)
self.assertEqual(expected_url, url)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_publish_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_publish_image', autospec=True)
@mock.patch.object(images, 'create_boot_iso', autospec=True)
def test__prepare_iso_image_bios(
self, mock_create_boot_iso, mock__publish_image):
@@ -382,7 +369,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock__publish_image.return_value = expected_url
- url = task.driver.boot._prepare_iso_image(
+ url = redfish_boot._prepare_iso_image(
task, 'http://kernel/img', 'http://ramdisk/img',
bootloader_href=None, root_uuid=task.node.uuid)
@@ -396,12 +383,12 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
boot_mode=None, esp_image_href=None,
configdrive_href=mock.ANY,
kernel_params='nofb nomodeset vga=normal',
- root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
+ root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
+ base_iso=None)
self.assertEqual(expected_url, url)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_publish_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_publish_image', autospec=True)
@mock.patch.object(images, 'create_boot_iso', autospec=True)
def test__prepare_iso_image_kernel_params(
self, mock_create_boot_iso, mock__publish_image):
@@ -411,19 +398,43 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
task.node.instance_info.update(kernel_append_params=kernel_params)
- task.driver.boot._prepare_iso_image(
+ redfish_boot._prepare_iso_image(
task, 'http://kernel/img', 'http://ramdisk/img',
- bootloader_href=None, root_uuid=task.node.uuid)
+ bootloader_href=None, root_uuid=task.node.uuid,
+ base_iso=None)
mock_create_boot_iso.assert_called_once_with(
mock.ANY, mock.ANY, 'http://kernel/img', 'http://ramdisk/img',
boot_mode=None, esp_image_href=None,
configdrive_href=mock.ANY,
kernel_params=kernel_params,
- root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123')
+ root_uuid='1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
+ base_iso=None)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_iso_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_publish_image', autospec=True)
+ @mock.patch.object(images, 'create_boot_iso', autospec=True)
+ def test__prepare_iso_image_boot_iso(
+ self, mock_create_boot_iso, mock__publish_image):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+
+ task.node.instance_info = {'boot_iso': 'http://host/boot.iso',
+ 'capabilities': {
+ 'boot_option': 'ramdisk'}}
+
+ redfish_boot._prepare_iso_image(
+ task, None, None, root_uuid=None,
+ base_iso='http://host/boot.iso')
+
+ mock_create_boot_iso.assert_called_once_with(
+ mock.ANY, mock.ANY, None, None,
+ boot_mode=None, esp_image_href=None,
+ configdrive_href=None,
+ kernel_params=None,
+ root_uuid=None,
+ base_iso='http://host/boot.iso')
+
+ @mock.patch.object(redfish_boot, '_prepare_iso_image', autospec=True)
def test__prepare_deploy_iso(self, mock__prepare_iso_image):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
@@ -436,13 +447,40 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
task.node.instance_info.update(deploy_boot_mode='uefi')
- task.driver.boot._prepare_deploy_iso(task, {}, 'deploy')
+ redfish_boot._prepare_deploy_iso(task, {}, 'deploy')
mock__prepare_iso_image.assert_called_once_with(
- mock.ANY, 'kernel', 'ramdisk', 'bootloader', params={})
+ task, 'kernel', 'ramdisk', 'bootloader', params={})
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_iso_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_iso_image', autospec=True)
+ @mock.patch.object(images, 'create_vfat_image', autospec=True)
+ def test__prepare_deploy_iso_network_data(
+ self, mock_create_vfat_image, mock__prepare_iso_image):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk'}
+ )
+
+ task.node.instance_info.update()
+
+ network_data = {'a': ['b']}
+
+ mock_get_node_nw_data = mock.MagicMock(return_value=network_data)
+ task.driver.network.get_node_network_data = mock_get_node_nw_data
+
+ redfish_boot._prepare_deploy_iso(task, {}, 'deploy')
+
+ mock_create_vfat_image.assert_called_once_with(
+ mock.ANY, mock.ANY)
+
+ mock__prepare_iso_image.assert_called_once_with(
+ task, 'kernel', 'ramdisk', bootloader_href=None,
+ configdrive=mock.ANY, params={})
+
+ @mock.patch.object(redfish_boot, '_prepare_iso_image', autospec=True)
@mock.patch.object(images, 'create_boot_iso', autospec=True)
def test__prepare_boot_iso(self, mock_create_boot_iso,
mock__prepare_iso_image):
@@ -459,12 +497,35 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
'kernel': 'http://kernel/img',
'ramdisk': 'http://ramdisk/img'})
- task.driver.boot._prepare_boot_iso(
+ redfish_boot._prepare_boot_iso(
task, root_uuid=task.node.uuid)
mock__prepare_iso_image.assert_called_once_with(
mock.ANY, 'http://kernel/img', 'http://ramdisk/img',
- 'bootloader', root_uuid=task.node.uuid)
+ 'bootloader', root_uuid=task.node.uuid, base_iso=None)
+
+ @mock.patch.object(redfish_boot, '_prepare_iso_image', autospec=True)
+ @mock.patch.object(images, 'create_boot_iso', autospec=True)
+ def test__prepare_boot_iso_user_supplied(self, mock_create_boot_iso,
+ mock__prepare_iso_image):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk',
+ 'bootloader': 'bootloader'}
+ )
+
+ task.node.instance_info.update(
+ {'boot_iso': 'http://boot/iso'})
+
+ redfish_boot._prepare_boot_iso(
+ task, root_uuid=task.node.uuid)
+
+ mock__prepare_iso_image.assert_called_once_with(
+ mock.ANY, None, None,
+ 'bootloader', root_uuid=task.node.uuid,
+ base_iso='http://boot/iso')
@mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
@@ -527,6 +588,64 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
@mock.patch.object(deploy_utils, 'validate_image_properties',
autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ autospec=True)
+ def test_validate_bios_boot_iso(self, mock_get_boot_mode,
+ mock_validate_image_properties,
+ mock_parse_driver_info):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.instance_info.update(
+ {'boot_iso': 'http://localhost/file.iso'}
+ )
+
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk',
+ 'bootloader': 'bootloader'}
+ )
+ # NOTE(TheJulia): Boot mode doesn't matter for this
+ # test scenario.
+ mock_get_boot_mode.return_value = 'bios'
+
+ task.driver.boot.validate(task)
+
+ mock_validate_image_properties.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY)
+
+ @mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
+ @mock.patch.object(deploy_utils, 'validate_image_properties',
+ autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ autospec=True)
+ def test_validate_bios_boot_iso_conflicting_image_source(
+ self, mock_get_boot_mode,
+ mock_validate_image_properties,
+ mock_parse_driver_info):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.instance_info.update(
+ {'boot_iso': 'http://localhost/file.iso',
+ 'image_source': 'http://localhost/file.img'}
+ )
+
+ task.node.driver_info.update(
+ {'deploy_kernel': 'kernel',
+ 'deploy_ramdisk': 'ramdisk',
+ 'bootloader': 'bootloader'}
+ )
+ # NOTE(TheJulia): Boot mode doesn't matter for this
+ # test scenario.
+ mock_get_boot_mode.return_value = 'bios'
+
+ task.driver.boot.validate(task)
+
+ mock_validate_image_properties.assert_called_once_with(
+ mock.ANY, mock.ANY, mock.ANY)
+
+ @mock.patch.object(redfish_utils, 'parse_driver_info', autospec=True)
+ @mock.patch.object(deploy_utils, 'validate_image_properties',
+ autospec=True)
def test_validate_missing(self, mock_validate_image_properties,
mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
@@ -557,14 +676,10 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_boot.manager_utils, 'node_set_boot_device',
autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_deploy_iso', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_insert_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_deploy_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
@mock.patch.object(redfish_boot.manager_utils, 'node_power_action',
autospec=True)
@mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
@@ -607,14 +722,10 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_boot.manager_utils, 'node_set_boot_device',
autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_deploy_iso', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_insert_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_deploy_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
@mock.patch.object(redfish_boot.manager_utils, 'node_power_action',
autospec=True)
@mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
@@ -656,18 +767,12 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_boot.manager_utils, 'node_set_boot_device',
autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_floppy_image', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_deploy_iso', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_has_vmedia_device', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_insert_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_floppy_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_deploy_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_has_vmedia_device', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
@mock.patch.object(redfish_boot.manager_utils, 'node_power_action',
autospec=True)
@mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
@@ -728,16 +833,11 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_has_vmedia_device', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_cleanup_iso_image', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_cleanup_floppy_image', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, '_has_vmedia_device', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_cleanup_iso_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_cleanup_floppy_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
def test_clean_up_ramdisk(
self, mock__parse_driver_info, mock__cleanup_floppy_image,
mock__cleanup_iso_image, mock__eject_vmedia,
@@ -768,14 +868,10 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
'clean_up_instance', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_boot_iso', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_insert_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_boot_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
@mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
@mock.patch.object(redfish_boot, 'deploy_utils', autospec=True)
@mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
@@ -817,14 +913,10 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
'clean_up_instance', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_prepare_boot_iso', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_insert_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_boot_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
@mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
@mock.patch.object(redfish_boot, 'deploy_utils', autospec=True)
@mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
@@ -859,9 +951,86 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
+ 'clean_up_instance', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_boot_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'deploy_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
+ def test_prepare_instance_ramdisk_boot_iso(
+ self, mock_boot_mode_utils, mock_deploy_utils, mock_manager_utils,
+ mock__parse_driver_info, mock__insert_vmedia, mock__eject_vmedia,
+ mock__prepare_boot_iso, mock_clean_up_instance):
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.provision_state = states.DEPLOYING
+ task.node.driver_internal_info[
+ 'root_uuid_or_disk_id'] = self.node.uuid
+ task.node.instance_info = {'boot_iso': 'http://host/boot.iso'}
+
+ mock_deploy_utils.get_boot_option.return_value = 'ramdisk'
+
+ mock__prepare_boot_iso.return_value = 'image-url'
+
+ task.driver.boot.prepare_instance(task)
+
+ mock__prepare_boot_iso.assert_called_once_with(task)
+
+ mock__eject_vmedia.assert_called_once_with(
+ task, sushy.VIRTUAL_MEDIA_CD)
+
+ mock__insert_vmedia.assert_called_once_with(
+ task, 'image-url', sushy.VIRTUAL_MEDIA_CD)
+
+ mock_manager_utils.node_set_boot_device.assert_called_once_with(
+ task, boot_devices.CDROM, persistent=True)
+
+ mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
+
@mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_cleanup_iso_image', autospec=True)
+ 'clean_up_instance', autospec=True)
+ @mock.patch.object(redfish_boot, '_prepare_boot_iso', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_insert_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_parse_driver_info', autospec=True)
+ @mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'deploy_utils', autospec=True)
+ @mock.patch.object(redfish_boot, 'boot_mode_utils', autospec=True)
+ def test_prepare_instance_ramdisk_boot_iso_boot(
+ self, mock_boot_mode_utils, mock_deploy_utils, mock_manager_utils,
+ mock__parse_driver_info, mock__insert_vmedia, mock__eject_vmedia,
+ mock__prepare_boot_iso, mock_clean_up_instance):
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.node.provision_state = states.DEPLOYING
+ i_info = task.node.instance_info
+ i_info['boot_iso'] = "super-magic"
+ task.node.instance_info = i_info
+ mock_deploy_utils.get_boot_option.return_value = 'ramdisk'
+
+ mock__prepare_boot_iso.return_value = 'image-url'
+
+ task.driver.boot.prepare_instance(task)
+
+ mock__prepare_boot_iso.assert_called_once_with(task)
+
+ mock__eject_vmedia.assert_called_once_with(
+ task, sushy.VIRTUAL_MEDIA_CD)
+
+ mock__insert_vmedia.assert_called_once_with(
+ task, 'image-url', sushy.VIRTUAL_MEDIA_CD)
+
+ mock_manager_utils.node_set_boot_device.assert_called_once_with(
+ task, boot_devices.CDROM, persistent=True)
+
+ mock_boot_mode_utils.sync_boot_mode.assert_called_once_with(task)
+
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_cleanup_iso_image', autospec=True)
@mock.patch.object(redfish_boot, 'manager_utils', autospec=True)
def _test_prepare_instance_local_boot(
self, mock_manager_utils,
@@ -893,10 +1062,8 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
self.node.save()
self._test_prepare_instance_local_boot()
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_eject_vmedia', autospec=True)
- @mock.patch.object(redfish_boot.RedfishVirtualMediaBoot,
- '_cleanup_iso_image', autospec=True)
+ @mock.patch.object(redfish_boot, '_eject_vmedia', autospec=True)
+ @mock.patch.object(redfish_boot, '_cleanup_iso_image', autospec=True)
def _test_clean_up_instance(self, mock__cleanup_iso_image,
mock__eject_vmedia):
@@ -942,7 +1109,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_redfish_utils.get_system.return_value.managers = [
mock_manager]
- task.driver.boot._insert_vmedia(
+ redfish_boot._insert_vmedia(
task, 'img-url', sushy.VIRTUAL_MEDIA_CD)
mock_vmedia_cd.insert_media.assert_called_once_with(
@@ -967,7 +1134,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_redfish_utils.get_system.return_value.managers = [
mock_manager]
- task.driver.boot._insert_vmedia(
+ redfish_boot._insert_vmedia(
task, 'img-url', sushy.VIRTUAL_MEDIA_CD)
self.assertFalse(mock_vmedia_cd.insert_media.call_count)
@@ -990,7 +1157,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
self.assertRaises(
exception.InvalidParameterValue,
- task.driver.boot._insert_vmedia,
+ redfish_boot._insert_vmedia,
task, 'img-url', sushy.VIRTUAL_MEDIA_CD)
@mock.patch.object(redfish_boot, 'redfish_utils', autospec=True)
@@ -1013,7 +1180,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_redfish_utils.get_system.return_value.managers = [
mock_manager]
- task.driver.boot._eject_vmedia(task)
+ redfish_boot._eject_vmedia(task)
mock_vmedia_cd.eject_media.assert_called_once_with()
mock_vmedia_floppy.eject_media.assert_called_once_with()
@@ -1038,7 +1205,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_redfish_utils.get_system.return_value.managers = [
mock_manager]
- task.driver.boot._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
+ redfish_boot._eject_vmedia(task, sushy.VIRTUAL_MEDIA_CD)
mock_vmedia_cd.eject_media.assert_called_once_with()
self.assertFalse(mock_vmedia_floppy.eject_media.call_count)
@@ -1063,7 +1230,7 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_redfish_utils.get_system.return_value.managers = [
mock_manager]
- task.driver.boot._eject_vmedia(task)
+ redfish_boot._eject_vmedia(task)
self.assertFalse(mock_vmedia_cd.eject_media.call_count)
self.assertFalse(mock_vmedia_floppy.eject_media.call_count)
@@ -1085,6 +1252,6 @@ class RedfishVirtualMediaBootTestCase(db_base.DbTestCase):
mock_redfish_utils.get_system.return_value.managers = [
mock_manager]
- task.driver.boot._eject_vmedia(task)
+ redfish_boot._eject_vmedia(task)
self.assertFalse(mock_vmedia_cd.eject_media.call_count)
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_inspect.py b/ironic/tests/unit/drivers/modules/redfish/test_inspect.py
index 776a68bae..251d5bd91 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_inspect.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_inspect.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from oslo_utils import units
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_management.py b/ironic/tests/unit/drivers/modules/redfish/test_management.py
index c8a37951a..60c9fd095 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_management.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_management.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import boot_devices
@@ -99,6 +100,8 @@ class RedfishManagementTestCase(db_base.DbTestCase):
fake_system.set_system_boot_options.assert_called_once_with(
expected, enabled=sushy.BOOT_SOURCE_ENABLED_ONCE)
mock_get_system.assert_called_once_with(task.node)
+ self.assertNotIn('redfish_boot_device',
+ task.node.driver_internal_info)
# Reset mocks
fake_system.set_system_boot_options.reset_mock()
@@ -122,6 +125,8 @@ class RedfishManagementTestCase(db_base.DbTestCase):
fake_system.set_system_boot_options.assert_called_once_with(
sushy.BOOT_SOURCE_TARGET_PXE, enabled=expected)
mock_get_system.assert_called_once_with(task.node)
+ self.assertNotIn('redfish_boot_device',
+ task.node.driver_internal_info)
# Reset mocks
fake_system.set_system_boot_options.reset_mock()
@@ -169,6 +174,115 @@ class RedfishManagementTestCase(db_base.DbTestCase):
sushy.BOOT_SOURCE_TARGET_PXE,
enabled=sushy.BOOT_SOURCE_ENABLED_ONCE)
mock_get_system.assert_called_once_with(task.node)
+ self.assertNotIn('redfish_boot_device',
+ task.node.driver_internal_info)
+
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_set_boot_device_fail_no_change(self, mock_get_system):
+ fake_system = mock.Mock()
+ fake_system.set_system_boot_options.side_effect = (
+ sushy.exceptions.SushyError()
+ )
+ mock_get_system.return_value = fake_system
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ expected_values = [
+ (True, sushy.BOOT_SOURCE_ENABLED_CONTINUOUS),
+ (False, sushy.BOOT_SOURCE_ENABLED_ONCE)
+ ]
+
+ for target, expected in expected_values:
+ fake_system.boot.get.return_value = expected
+
+ self.assertRaisesRegex(
+ exception.RedfishError, 'Redfish set boot device',
+ task.driver.management.set_boot_device, task,
+ boot_devices.PXE, persistent=target)
+ fake_system.set_system_boot_options.assert_called_once_with(
+ sushy.BOOT_SOURCE_TARGET_PXE, enabled=None)
+ mock_get_system.assert_called_once_with(task.node)
+ self.assertNotIn('redfish_boot_device',
+ task.node.driver_internal_info)
+
+ # Reset mocks
+ fake_system.set_system_boot_options.reset_mock()
+ mock_get_system.reset_mock()
+
+ @mock.patch.object(sushy, 'Sushy', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_set_boot_device_persistence_fallback(self, mock_get_system,
+ mock_sushy):
+ fake_system = mock.Mock()
+ fake_system.set_system_boot_options.side_effect = [
+ sushy.exceptions.SushyError(),
+ None,
+ ]
+ mock_get_system.return_value = fake_system
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.management.set_boot_device(
+ task, boot_devices.PXE, persistent=True)
+ fake_system.set_system_boot_options.assert_has_calls([
+ mock.call(sushy.BOOT_SOURCE_TARGET_PXE,
+ enabled=sushy.BOOT_SOURCE_ENABLED_CONTINUOUS),
+ mock.call(sushy.BOOT_SOURCE_TARGET_PXE,
+ enabled=sushy.BOOT_SOURCE_ENABLED_ONCE),
+ ])
+ mock_get_system.assert_called_once_with(task.node)
+
+ task.node.refresh()
+ self.assertEqual(
+ sushy.BOOT_SOURCE_TARGET_PXE,
+ task.node.driver_internal_info['redfish_boot_device'])
+
+ def test_restore_boot_device(self):
+ fake_system = mock.Mock()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.driver_internal_info['redfish_boot_device'] = (
+ sushy.BOOT_SOURCE_TARGET_HDD
+ )
+
+ task.driver.management.restore_boot_device(task, fake_system)
+
+ fake_system.set_system_boot_options.assert_called_once_with(
+ sushy.BOOT_SOURCE_TARGET_HDD,
+ enabled=sushy.BOOT_SOURCE_ENABLED_ONCE)
+ # The stored boot device is kept intact
+ self.assertEqual(
+ sushy.BOOT_SOURCE_TARGET_HDD,
+ task.node.driver_internal_info['redfish_boot_device'])
+
+ def test_restore_boot_device_noop(self):
+ fake_system = mock.Mock()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.driver.management.restore_boot_device(task, fake_system)
+
+ self.assertFalse(fake_system.set_system_boot_options.called)
+
+ @mock.patch.object(redfish_mgmt.LOG, 'warning', autospec=True)
+ def test_restore_boot_device_failure(self, mock_log):
+ fake_system = mock.Mock()
+ fake_system.set_system_boot_options.side_effect = (
+ sushy.exceptions.SushyError()
+ )
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.driver_internal_info['redfish_boot_device'] = (
+ sushy.BOOT_SOURCE_TARGET_HDD
+ )
+
+ task.driver.management.restore_boot_device(task, fake_system)
+
+ fake_system.set_system_boot_options.assert_called_once_with(
+ sushy.BOOT_SOURCE_TARGET_HDD,
+ enabled=sushy.BOOT_SOURCE_ENABLED_ONCE)
+ self.assertTrue(mock_log.called)
+ # The stored boot device is kept intact
+ self.assertEqual(
+ sushy.BOOT_SOURCE_TARGET_HDD,
+ task.node.driver_internal_info['redfish_boot_device'])
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_get_boot_device(self, mock_get_system):
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_power.py b/ironic/tests/unit/drivers/modules/redfish/test_power.py
index 09068df24..d976e15a6 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_power.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_power.py
@@ -13,12 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
+from ironic.drivers.modules.redfish import management as redfish_mgmt
from ironic.drivers.modules.redfish import power as redfish_power
from ironic.drivers.modules.redfish import utils as redfish_utils
from ironic.tests.unit.db import base as db_base
@@ -83,19 +85,21 @@ class RedfishPowerTestCase(db_base.DbTestCase):
mock_get_system.assert_called_once_with(task.node)
mock_get_system.reset_mock()
+ @mock.patch.object(redfish_mgmt.RedfishManagement, 'restore_boot_device',
+ autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
- def test_set_power_state(self, mock_get_system):
+ def test_set_power_state(self, mock_get_system, mock_restore_bootdev):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_values = [
- (states.POWER_ON, sushy.RESET_ON),
- (states.POWER_OFF, sushy.RESET_FORCE_OFF),
- (states.REBOOT, sushy.RESET_FORCE_RESTART),
- (states.SOFT_REBOOT, sushy.RESET_GRACEFUL_RESTART),
- (states.SOFT_POWER_OFF, sushy.RESET_GRACEFUL_SHUTDOWN)
+ (states.POWER_ON, sushy.RESET_ON, True),
+ (states.POWER_OFF, sushy.RESET_FORCE_OFF, False),
+ (states.REBOOT, sushy.RESET_FORCE_RESTART, True),
+ (states.SOFT_REBOOT, sushy.RESET_GRACEFUL_RESTART, True),
+ (states.SOFT_POWER_OFF, sushy.RESET_GRACEFUL_SHUTDOWN, False)
]
- for target, expected in expected_values:
+ for target, expected, restore_bootdev in expected_values:
if target in (states.POWER_OFF, states.SOFT_POWER_OFF):
final = sushy.SYSTEM_POWER_STATE_OFF
transient = sushy.SYSTEM_POWER_STATE_ON
@@ -114,9 +118,15 @@ class RedfishPowerTestCase(db_base.DbTestCase):
system_result[0].reset_system.assert_called_once_with(expected)
mock_get_system.assert_called_with(task.node)
self.assertEqual(4, mock_get_system.call_count)
+ if restore_bootdev:
+ mock_restore_bootdev.assert_called_once_with(
+ task.driver.management, task, system_result[0])
+ else:
+ self.assertFalse(mock_restore_bootdev.called)
# Reset mocks
mock_get_system.reset_mock()
+ mock_restore_bootdev.reset_mock()
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_set_power_state_not_reached(self, mock_get_system):
@@ -159,41 +169,66 @@ class RedfishPowerTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(
- exception.RedfishError, 'Redfish set power state',
+ exception.RedfishError, 'power on failed',
task.driver.power.set_power_state, task, states.POWER_ON)
fake_system.reset_system.assert_called_once_with(
sushy.RESET_ON)
mock_get_system.assert_called_once_with(task.node)
+ @mock.patch.object(redfish_mgmt.RedfishManagement, 'restore_boot_device',
+ autospec=True)
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
- def test_reboot(self, mock_get_system):
+ def test_reboot_from_power_off(self, mock_get_system,
+ mock_restore_bootdev):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- expected_values = [
- (sushy.SYSTEM_POWER_STATE_ON, sushy.RESET_FORCE_RESTART),
- (sushy.SYSTEM_POWER_STATE_OFF, sushy.RESET_ON)
+ system_result = [
+ # Initial state
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_OFF),
+ # Transient state - still powered off
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_OFF),
+ # Final state - down powering off
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_ON)
]
+ mock_get_system.side_effect = system_result
- for current, expected in expected_values:
- system_result = [
- # Initial state
- mock.Mock(power_state=current),
- # Transient state - powering off
- mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_OFF),
- # Final state - down powering off
- mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_ON)
- ]
- mock_get_system.side_effect = system_result
+ task.driver.power.reboot(task)
- task.driver.power.reboot(task)
+ # Asserts
+ system_result[0].reset_system.assert_called_once_with(
+ sushy.RESET_ON)
+ mock_get_system.assert_called_with(task.node)
+ self.assertEqual(3, mock_get_system.call_count)
+ mock_restore_bootdev.assert_called_once_with(
+ task.driver.management, task, system_result[0])
- # Asserts
- system_result[0].reset_system.assert_called_once_with(expected)
- mock_get_system.assert_called_with(task.node)
- self.assertEqual(3, mock_get_system.call_count)
+ @mock.patch.object(redfish_mgmt.RedfishManagement, 'restore_boot_device',
+ autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_reboot_from_power_on(self, mock_get_system, mock_restore_bootdev):
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ system_result = [
+ # Initial state
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_ON),
+ # Transient state - powering off
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_OFF),
+ # Final state - down powering off
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_ON)
+ ]
+ mock_get_system.side_effect = system_result
- # Reset mocks
- mock_get_system.reset_mock()
+ task.driver.power.reboot(task)
+
+ # Asserts
+ system_result[0].reset_system.assert_has_calls([
+ mock.call(sushy.RESET_FORCE_OFF),
+ mock.call(sushy.RESET_ON),
+ ])
+ mock_get_system.assert_called_with(task.node)
+ self.assertEqual(3, mock_get_system.call_count)
+ mock_restore_bootdev.assert_called_once_with(
+ task.driver.management, task, system_result[0])
@mock.patch.object(redfish_utils, 'get_system', autospec=True)
def test_reboot_not_reached(self, mock_get_system):
@@ -220,12 +255,41 @@ class RedfishPowerTestCase(db_base.DbTestCase):
shared=False) as task:
fake_system.power_state = sushy.SYSTEM_POWER_STATE_ON
self.assertRaisesRegex(
- exception.RedfishError, 'Redfish reboot failed',
+ exception.RedfishError, 'Reboot failed.*power off',
task.driver.power.reboot, task)
fake_system.reset_system.assert_called_once_with(
- sushy.RESET_FORCE_RESTART)
+ sushy.RESET_FORCE_OFF)
mock_get_system.assert_called_once_with(task.node)
+ @mock.patch.object(sushy, 'Sushy', autospec=True)
+ @mock.patch.object(redfish_utils, 'get_system', autospec=True)
+ def test_reboot_fail_on_power_on(self, mock_get_system, mock_sushy):
+ system_result = [
+ # Initial state
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_ON),
+ # Transient state - powering off
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_OFF),
+ # Final state - down powering off
+ mock.Mock(power_state=sushy.SYSTEM_POWER_STATE_ON)
+ ]
+ mock_get_system.side_effect = system_result
+ fake_system = system_result[0]
+ fake_system.reset_system.side_effect = [
+ None,
+ sushy.exceptions.SushyError(),
+ ]
+
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ self.assertRaisesRegex(
+ exception.RedfishError, 'Reboot failed.*power on',
+ task.driver.power.reboot, task)
+ fake_system.reset_system.assert_has_calls([
+ mock.call(sushy.RESET_FORCE_OFF),
+ mock.call(sushy.RESET_ON),
+ ])
+ mock_get_system.assert_called_with(task.node)
+
def test_get_supported_power_states(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
diff --git a/ironic/tests/unit/drivers/modules/redfish/test_utils.py b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
index 7107a8ab2..794982bba 100644
--- a/ironic/tests/unit/drivers/modules/redfish/test_utils.py
+++ b/ironic/tests/unit/drivers/modules/redfish/test_utils.py
@@ -16,8 +16,8 @@
import collections
import copy
import os
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import importutils
import requests
diff --git a/ironic/tests/unit/drivers/modules/storage/test_cinder.py b/ironic/tests/unit/drivers/modules/storage/test_cinder.py
index 02f2a56f2..165ce7c43 100644
--- a/ironic/tests/unit/drivers/modules/storage/test_cinder.py
+++ b/ironic/tests/unit/drivers/modules/storage/test_cinder.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from ironic.common import cinder as cinder_common
@@ -367,7 +368,7 @@ class CinderInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
- @mock.patch.object(cinder, 'LOG')
+ @mock.patch.object(cinder, 'LOG', autospec=True)
def test_attach_detach_volumes_no_volumes(self, mock_log,
mock_attach, mock_detach):
with task_manager.acquire(self.context, self.node.id) as task:
@@ -399,7 +400,8 @@ class CinderInterfaceTestCase(db_base.DbTestCase):
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
@mock.patch.object(cinder_common, 'attach_volumes', autospec=True)
@mock.patch.object(cinder, 'LOG', autospec=True)
- @mock.patch.object(objects.volume_target.VolumeTarget, 'list_by_volume_id')
+ @mock.patch.object(objects.volume_target.VolumeTarget, 'list_by_volume_id',
+ autospec=True)
def test_attach_detach_called_with_target_and_connector(self,
mock_target_list,
mock_log,
@@ -513,7 +515,7 @@ class CinderInterfaceTestCase(db_base.DbTestCase):
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch.object(cinder_common, 'detach_volumes', autospec=True)
- @mock.patch.object(cinder, 'LOG')
+ @mock.patch.object(cinder, 'LOG', autospec=True)
def test_detach_volumes_failure_raises_exception(self,
mock_log,
mock_detach):
diff --git a/ironic/tests/unit/drivers/modules/storage/test_external.py b/ironic/tests/unit/drivers/modules/storage/test_external.py
index 50b478d31..30b60bec4 100644
--- a/ironic/tests/unit/drivers/modules/storage/test_external.py
+++ b/ironic/tests/unit/drivers/modules/storage/test_external.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.conductor import task_manager
diff --git a/ironic/tests/unit/drivers/modules/test_agent.py b/ironic/tests/unit/drivers/modules/test_agent.py
index 590a22e08..7db79b066 100644
--- a/ironic/tests/unit/drivers/modules/test_agent.py
+++ b/ironic/tests/unit/drivers/modules/test_agent.py
@@ -12,9 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import types
+from unittest import mock
-import mock
from oslo_config import cfg
from ironic.common import dhcp_factory
@@ -36,7 +35,6 @@ from ironic.drivers.modules.network import flat as flat_network
from ironic.drivers.modules.network import neutron as neutron_network
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import noop as noop_storage
-from ironic.drivers import utils as driver_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
@@ -122,6 +120,35 @@ class TestAgentMethods(db_base.DbTestCase):
show_mock.assert_called_once_with(self.context, 'fake-image')
@mock.patch.object(images, 'image_show', autospec=True)
+ def test_check_image_size_raw_stream_enabled_format_raw(self, show_mock):
+ CONF.set_override('stream_raw_images', True, 'agent')
+ # Image is bigger than memory but it's raw and will be streamed
+ # so the test should pass
+ show_mock.return_value = {
+ 'size': 15 * 1024 * 1024,
+ }
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.properties['memory_mb'] = 10
+ agent.check_image_size(task, 'fake-image', 'raw')
+ show_mock.assert_called_once_with(self.context, 'fake-image')
+
+ @mock.patch.object(images, 'image_show', autospec=True)
+ def test_check_image_size_raw_stream_enabled_format_qcow2(self, show_mock):
+ CONF.set_override('stream_raw_images', True, 'agent')
+ # Image is bigger than memory and won't be streamed
+ show_mock.return_value = {
+ 'size': 15 * 1024 * 1024,
+ }
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ task.node.properties['memory_mb'] = 10
+ self.assertRaises(exception.InvalidParameterValue,
+ agent.check_image_size,
+ task, 'fake-image', 'qcow2')
+ show_mock.assert_called_once_with(self.context, 'fake-image')
+
+ @mock.patch.object(images, 'image_show', autospec=True)
def test_check_image_size_raw_stream_disabled(self, show_mock):
CONF.set_override('stream_raw_images', False, 'agent')
show_mock.return_value = {
@@ -138,12 +165,12 @@ class TestAgentMethods(db_base.DbTestCase):
task, 'fake-image')
show_mock.assert_called_once_with(self.context, 'fake-image')
- @mock.patch.object(deploy_utils, 'check_for_missing_params')
+ @mock.patch.object(deploy_utils, 'check_for_missing_params', autospec=True)
def test_validate_http_provisioning_not_glance(self, utils_mock):
agent.validate_http_provisioning_configuration(self.node)
utils_mock.assert_not_called()
- @mock.patch.object(deploy_utils, 'check_for_missing_params')
+ @mock.patch.object(deploy_utils, 'check_for_missing_params', autospec=True)
def test_validate_http_provisioning_not_http(self, utils_mock):
i_info = self.node.instance_info
i_info['image_source'] = '0448fa34-4db1-407b-a051-6357d5f86c59'
@@ -424,11 +451,11 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertNotIn(
'deployment_reboot', task.node.driver_internal_info)
- @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
- def test_deploy_storage_should_write_image_false(self, mock_write,
- mock_pxe_instance):
+ def test_deploy_storage_should_write_image_false(
+ self, mock_write, mock_power):
mock_write.return_value = False
self.node.provision_state = states.DEPLOYING
self.node.deploy_step = {
@@ -438,7 +465,7 @@ class TestAgentDeploy(db_base.DbTestCase):
self.context, self.node['uuid'], shared=False) as task:
driver_return = self.driver.deploy(task)
self.assertIsNone(driver_return)
- self.assertTrue(mock_pxe_instance.called)
+ self.assertFalse(mock_power.called)
@mock.patch.object(agent_client.AgentClient, 'prepare_image',
autospec=True)
@@ -450,26 +477,15 @@ class TestAgentDeploy(db_base.DbTestCase):
mock_is_fast_track.return_value = True
self.node.target_provision_state = states.ACTIVE
self.node.provision_state = states.DEPLOYING
- test_temp_url = 'http://image'
- expected_image_info = {
- 'urls': [test_temp_url],
- 'id': 'fake-image',
- 'node_uuid': self.node.uuid,
- 'checksum': 'checksum',
- 'disk_format': 'qcow2',
- 'container_format': 'bare',
- 'stream_raw_images': CONF.agent.stream_raw_images,
- }
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
- self.driver.deploy(task)
+ result = self.driver.deploy(task)
+ self.assertIsNone(result)
self.assertFalse(power_mock.called)
self.assertFalse(mock_pxe_instance.called)
- task.node.refresh()
- prepare_image_mock.assert_called_with(mock.ANY, task.node,
- expected_image_info)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertFalse(prepare_image_mock.called)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
@@ -509,10 +525,12 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
spec_set=True, autospec=True)
@mock.patch.object(flat_network.FlatNetwork,
@@ -544,16 +562,18 @@ class TestAgentDeploy(db_base.DbTestCase):
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(neutron_network.NeutronNetwork,
'add_provisioning_network',
spec_set=True, autospec=True)
@@ -586,15 +606,16 @@ class TestAgentDeploy(db_base.DbTestCase):
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
@mock.patch.object(neutron_network.NeutronNetwork,
@@ -634,7 +655,7 @@ class TestAgentDeploy(db_base.DbTestCase):
secret=False)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
self.node.refresh()
capabilities = self.node.instance_info['capabilities']
self.assertEqual('local', capabilities['boot_option'])
@@ -642,9 +663,10 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
@mock.patch.object(neutron_network.NeutronNetwork,
@@ -681,16 +703,17 @@ class TestAgentDeploy(db_base.DbTestCase):
secret=False)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
self.node.refresh()
capabilities = self.node.instance_info['capabilities']
self.assertEqual('local', capabilities['boot_option'])
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(image_service.HttpImageService, 'validate_href',
autospec=True)
@mock.patch.object(neutron_network.NeutronNetwork,
@@ -730,7 +753,7 @@ class TestAgentDeploy(db_base.DbTestCase):
secret=False)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
self.node.refresh()
capabilities = self.node.instance_info['capabilities']
self.assertEqual('local', capabilities['boot_option'])
@@ -738,10 +761,12 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(neutron_network.NeutronNetwork,
'add_provisioning_network',
spec_set=True, autospec=True)
@@ -786,9 +811,10 @@ class TestAgentDeploy(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'validate',
spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
def test_prepare_manage_agent_boot_false(
self, build_instance_info_mock,
build_options_mock, pxe_prepare_ramdisk_mock,
@@ -813,9 +839,10 @@ class TestAgentDeploy(db_base.DbTestCase):
self.node.refresh()
self.assertEqual('bar', self.node.instance_info['foo'])
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
def _test_prepare_rescue_states(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock, prov_state):
@@ -827,7 +854,7 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertFalse(build_instance_info_mock.called)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
def test_prepare_rescue_states(self):
for state in (states.RESCUING, states.RESCUEWAIT,
@@ -836,7 +863,8 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
spec_set=True, autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance',
@@ -916,9 +944,10 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
def test_prepare_adopting(
self, build_instance_info_mock, build_options_mock,
pxe_prepare_ramdisk_mock, add_provisioning_net_mock):
@@ -937,9 +966,10 @@ class TestAgentDeploy(db_base.DbTestCase):
spec_set=True, autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'validate',
spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
def test_prepare_boot_from_volume(self, mock_write,
@@ -967,10 +997,12 @@ class TestAgentDeploy(db_base.DbTestCase):
@mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
spec_set=True, autospec=True)
@mock.patch.object(flat_network.FlatNetwork,
@@ -1010,37 +1042,34 @@ class TestAgentDeploy(db_base.DbTestCase):
self.assertFalse(build_options_mock.called)
self.assertFalse(pxe_prepare_ramdisk_mock.called)
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
- @mock.patch.object(pxe.PXEBoot, 'clean_up_instance')
- @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory', autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_instance', autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_clean_up(self, pxe_clean_up_ramdisk_mock,
- pxe_clean_up_instance_mock, clean_dhcp_mock,
- set_dhcp_provider_mock):
+ pxe_clean_up_instance_mock, dhcp_factor_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.driver.clean_up(task)
- pxe_clean_up_ramdisk_mock.assert_called_once_with(task)
- pxe_clean_up_instance_mock.assert_called_once_with(task)
- set_dhcp_provider_mock.assert_called_once_with()
- clean_dhcp_mock.assert_called_once_with(task)
-
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
- @mock.patch.object(pxe.PXEBoot, 'clean_up_instance')
- @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk')
+ pxe_clean_up_ramdisk_mock.assert_called_once_with(
+ task.driver.boot, task)
+ pxe_clean_up_instance_mock.assert_called_once_with(
+ task.driver.boot, task)
+ dhcp_factor_mock.assert_called_once_with()
+
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory', autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_instance', autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
def test_clean_up_manage_agent_boot_false(self, pxe_clean_up_ramdisk_mock,
pxe_clean_up_instance_mock,
- clean_dhcp_mock,
- set_dhcp_provider_mock):
+ dhcp_factor_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
self.config(group='agent', manage_agent_boot=False)
self.driver.clean_up(task)
self.assertFalse(pxe_clean_up_ramdisk_mock.called)
- pxe_clean_up_instance_mock.assert_called_once_with(task)
- set_dhcp_provider_mock.assert_called_once_with()
- clean_dhcp_mock.assert_called_once_with(task)
+ pxe_clean_up_instance_mock.assert_called_once_with(
+ task.driver.boot, task)
+ dhcp_factor_mock.assert_called_once_with()
@mock.patch.object(agent_base, 'get_steps', autospec=True)
def test_get_clean_steps(self, mock_get_steps):
@@ -1110,13 +1139,21 @@ class TestAgentDeploy(db_base.DbTestCase):
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=False)
- def _test_continue_deploy(self, additional_driver_info=None,
- additional_expected_image_info=None):
+ def _test_write_image(self, additional_driver_info=None,
+ additional_expected_image_info=None,
+ compat=False):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
driver_info = self.node.driver_info
driver_info.update(additional_driver_info or {})
self.node.driver_info = driver_info
+ if not compat:
+ step = {'step': 'write_image', 'interface': 'deploy'}
+ dii = self.node.driver_internal_info
+ dii['agent_cached_deploy_steps'] = {
+ 'deploy': [step],
+ }
+ self.node.driver_internal_info = dii
self.node.save()
test_temp_url = 'http://image'
expected_image_info = {
@@ -1130,24 +1167,34 @@ class TestAgentDeploy(db_base.DbTestCase):
}
expected_image_info.update(additional_expected_image_info or {})
- client_mock = mock.MagicMock(spec_set=['prepare_image'])
+ client_mock = mock.MagicMock(spec_set=['prepare_image',
+ 'execute_deploy_step'])
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy._client = client_mock
- task.driver.deploy.continue_deploy(task)
-
- client_mock.prepare_image.assert_called_with(task.node,
- expected_image_info)
+ task.driver.deploy.write_image(task)
+
+ if compat:
+ client_mock.prepare_image.assert_called_with(
+ task.node, expected_image_info, wait=True)
+ else:
+ step['args'] = {'image_info': expected_image_info,
+ 'configdrive': None}
+ client_mock.execute_deploy_step.assert_called_once_with(
+ step, task.node, mock.ANY)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
- def test_continue_deploy(self):
- self._test_continue_deploy()
+ def test_write_image(self):
+ self._test_write_image()
- def test_continue_deploy_with_proxies(self):
- self._test_continue_deploy(
+ def test_write_image_compat(self):
+ self._test_write_image(compat=True)
+
+ def test_write_image_with_proxies(self):
+ self._test_write_image(
additional_driver_info={'image_https_proxy': 'https://spam.ni',
'image_http_proxy': 'spam.ni',
'image_no_proxy': '.eggs.com'},
@@ -1157,22 +1204,22 @@ class TestAgentDeploy(db_base.DbTestCase):
'no_proxy': '.eggs.com'}
)
- def test_continue_deploy_with_no_proxy_without_proxies(self):
- self._test_continue_deploy(
+ def test_write_image_with_no_proxy_without_proxies(self):
+ self._test_write_image(
additional_driver_info={'image_no_proxy': '.eggs.com'}
)
- def test_continue_deploy_image_source_is_url(self):
+ def test_write_image_image_source_is_url(self):
instance_info = self.node.instance_info
instance_info['image_source'] = 'http://example.com/woof.img'
self.node.instance_info = instance_info
- self._test_continue_deploy(
+ self._test_write_image(
additional_expected_image_info={
'id': 'woof.img'
}
)
- def test_continue_deploy_partition_image(self):
+ def test_write_image_partition_image(self):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
i_info = self.node.instance_info
@@ -1223,163 +1270,97 @@ class TestAgentDeploy(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy._client = client_mock
- task.driver.deploy.continue_deploy(task)
+ task.driver.deploy.write_image(task)
client_mock.prepare_image.assert_called_with(task.node,
- expected_image_info)
+ expected_image_info,
+ wait=True)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(deploy_utils, 'remove_http_instance_symlink',
autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
- @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
+ @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance(self, check_deploy_mock,
- prepare_instance_mock, power_off_mock,
- get_power_state_mock, node_power_action_mock,
- uuid_mock, log_mock, remove_symlink_mock,
- power_on_node_if_needed_mock, resume_mock):
+ def test_prepare_instance_boot(self, prepare_instance_mock,
+ uuid_mock, log_mock, remove_symlink_mock):
self.config(manage_agent_boot=True, group='agent')
self.config(image_download_source='http', group='agent')
- check_deploy_mock.return_value = None
- uuid_mock.return_value = None
- self.node.provision_state = states.DEPLOYWAIT
+ uuid_mock.return_value = {}
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- get_power_state_mock.return_value = states.POWER_OFF
- power_on_node_if_needed_mock.return_value = None
task.node.driver_internal_info['is_whole_disk_image'] = True
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
- uuid_mock.assert_called_once_with(mock.ANY, task, 'root_uuid')
+ task.driver.deploy.prepare_instance_boot(task)
+ uuid_mock.assert_called_once_with(mock.ANY, task.node)
self.assertNotIn('root_uuid_or_disk_id',
task.node.driver_internal_info)
- self.assertTrue(log_mock.called)
- self.assertIn("Ironic Python Agent version 3.1.0 and beyond",
- log_mock.call_args[0][0])
+ self.assertFalse(log_mock.called)
prepare_instance_mock.assert_called_once_with(mock.ANY, task,
None, None, None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertTrue(remove_symlink_mock.called)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
+ @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_no_manage_agent_boot(
- self, check_deploy_mock, prepare_instance_mock, power_off_mock,
- get_power_state_mock, node_power_action_mock, uuid_mock,
- bootdev_mock, log_mock, power_on_node_if_needed_mock,
- resume_mock):
+ def test_prepare_instance_boot_no_manage_agent_boot(
+ self, prepare_instance_mock, uuid_mock,
+ bootdev_mock, log_mock):
self.config(manage_agent_boot=False, group='agent')
- check_deploy_mock.return_value = None
- uuid_mock.return_value = None
- self.node.provision_state = states.DEPLOYWAIT
+ uuid_mock.return_value = {}
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
task.node.driver_internal_info['is_whole_disk_image'] = True
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
- uuid_mock.assert_called_once_with(mock.ANY, task, 'root_uuid')
+ task.driver.deploy.prepare_instance_boot(task)
+ uuid_mock.assert_called_once_with(mock.ANY, task.node)
self.assertNotIn('root_uuid_or_disk_id',
task.node.driver_internal_info)
self.assertFalse(log_mock.called)
self.assertFalse(prepare_instance_mock.called)
bootdev_mock.assert_called_once_with(task, 'disk', persistent=True)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
- @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
+ @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_partition_image(self, check_deploy_mock,
- prepare_instance_mock,
- power_off_mock,
- get_power_state_mock,
- node_power_action_mock,
- uuid_mock, boot_mode_mock,
- log_mock,
- power_on_node_if_needed_mock,
- resume_mock):
- check_deploy_mock.return_value = None
+ def test_prepare_instance_boot_partition_image(self, prepare_instance_mock,
+ uuid_mock, boot_mode_mock,
+ log_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'netboot'}}
- uuid_mock.return_value = 'root_uuid'
- self.node.provision_state = states.DEPLOYWAIT
+ uuid_mock.return_value = {
+ 'command_result': {'root uuid': 'root_uuid'}
+ }
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
boot_mode_mock.return_value = 'bios'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
- uuid_mock.assert_called_once_with(mock.ANY,
- task, 'root_uuid')
+ task.driver.deploy.prepare_instance_boot(task)
+ uuid_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertEqual('root_uuid',
driver_int_info['root_uuid_or_disk_id']),
@@ -1389,47 +1370,71 @@ class TestAgentDeploy(db_base.DbTestCase):
task,
'root_uuid',
None, None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
+ @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
+ @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
+ autospec=True)
+ @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
+ autospec=True)
+ @mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
+ def test_prepare_instance_boot_partition_image_compat(
+ self, prepare_instance_mock, uuid_mock,
+ old_uuid_mock, boot_mode_mock, log_mock):
+ self.node.instance_info = {
+ 'capabilities': {'boot_option': 'netboot'}}
+ uuid_mock.side_effect = exception.AgentAPIError
+ old_uuid_mock.return_value = 'root_uuid'
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ boot_mode_mock.return_value = 'bios'
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=False) as task:
+ driver_internal_info = task.node.driver_internal_info
+ driver_internal_info['is_whole_disk_image'] = False
+ task.node.driver_internal_info = driver_internal_info
+ task.driver.deploy.prepare_instance_boot(task)
+ uuid_mock.assert_called_once_with(mock.ANY, task.node)
+ old_uuid_mock.assert_called_once_with(mock.ANY, task, 'root_uuid')
+ driver_int_info = task.node.driver_internal_info
+ self.assertEqual('root_uuid',
+ driver_int_info['root_uuid_or_disk_id']),
+ boot_mode_mock.assert_called_once_with(task.node)
+ self.assertTrue(log_mock.called)
+ prepare_instance_mock.assert_called_once_with(mock.ANY,
+ task,
+ 'root_uuid',
+ None, None)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
+ self.assertEqual(states.ACTIVE, task.node.target_provision_state)
+
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
- @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
+ @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_partition_localboot_ppc64(
- self, check_deploy_mock, prepare_instance_mock,
- power_off_mock, get_power_state_mock,
- node_power_action_mock, uuid_mock, boot_mode_mock, log_mock,
- power_on_node_if_needed_mock, resume_mock):
- check_deploy_mock.return_value = None
- uuid_mock.side_effect = ['root_uuid', 'prep_boot_part_uuid']
- self.node.provision_state = states.DEPLOYWAIT
+ def test_prepare_instance_boot_partition_localboot_ppc64(
+ self, prepare_instance_mock,
+ uuid_mock, boot_mode_mock, log_mock):
+ uuid_mock.return_value = {
+ 'command_result': {
+ 'root uuid': 'root_uuid',
+ 'PReP Boot partition uuid': 'prep_boot_part_uuid',
+ }
+ }
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
@@ -1439,198 +1444,75 @@ class TestAgentDeploy(db_base.DbTestCase):
properties.update(cpu_arch='ppc64le')
task.node.properties = properties
boot_mode_mock.return_value = 'bios'
- task.driver.deploy.reboot_to_instance(task)
+ task.driver.deploy.prepare_instance_boot(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertEqual('root_uuid',
driver_int_info['root_uuid_or_disk_id']),
- uuid_mock_calls = [
- mock.call(mock.ANY, task, 'root_uuid'),
- mock.call(mock.ANY, task, 'PReP_Boot_partition_uuid')]
- uuid_mock.assert_has_calls(uuid_mock_calls)
+ uuid_mock.assert_called_once_with(mock.ANY, task.node)
boot_mode_mock.assert_called_once_with(task.node)
self.assertFalse(log_mock.called)
prepare_instance_mock.assert_called_once_with(
mock.ANY, task, 'root_uuid', None, 'prep_boot_part_uuid')
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
-
- @mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
- autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- @mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
- autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_boot_error(
- self, check_deploy_mock, prepare_instance_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- uuid_mock, collect_ramdisk_logs_mock, log_mock):
- check_deploy_mock.return_value = "Error"
- uuid_mock.return_value = None
- self.node.provision_state = states.DEPLOYWAIT
- self.node.target_provision_state = states.ACTIVE
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=False) as task:
- get_power_state_mock.return_value = states.POWER_OFF
- task.node.driver_internal_info['is_whole_disk_image'] = True
- task.driver.deploy.reboot_to_instance(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
- self.assertFalse(prepare_instance_mock.called)
- self.assertFalse(log_mock.called)
- self.assertFalse(power_off_mock.called)
- collect_ramdisk_logs_mock.assert_called_once_with(task.node)
- self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
@mock.patch.object(agent.LOG, 'warning', spec_set=True, autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
autospec=True)
- @mock.patch.object(agent.AgentDeployMixin, '_get_uuid_from_result',
+ @mock.patch.object(agent_client.AgentClient, 'get_partition_uuids',
autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
@mock.patch.object(agent.AgentDeployMixin, 'prepare_instance_to_boot',
autospec=True)
- @mock.patch('ironic.drivers.modules.agent.AgentDeployMixin'
- '.check_deploy_success', autospec=True)
- def test_reboot_to_instance_localboot(self, check_deploy_mock,
- prepare_instance_mock,
- power_off_mock,
- get_power_state_mock,
- node_power_action_mock,
- uuid_mock, boot_mode_mock,
- log_mock,
- power_on_node_if_needed_mock,
- resume_mock):
- check_deploy_mock.return_value = None
- uuid_mock.side_effect = ['root_uuid', 'efi_uuid']
- self.node.provision_state = states.DEPLOYWAIT
+ def test_prepare_instance_boot_localboot(self, prepare_instance_mock,
+ uuid_mock, boot_mode_mock,
+ log_mock):
+ uuid_mock.return_value = {
+ 'command_result': {
+ 'root uuid': 'root_uuid',
+ 'efi system partition uuid': 'efi_uuid',
+ }
+ }
+ self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_OFF
driver_internal_info = task.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = False
task.node.driver_internal_info = driver_internal_info
boot_option = {'capabilities': '{"boot_option": "local"}'}
task.node.instance_info = boot_option
boot_mode_mock.return_value = 'uefi'
- task.driver.deploy.reboot_to_instance(task)
+ task.driver.deploy.prepare_instance_boot(task)
- check_deploy_mock.assert_called_once_with(mock.ANY, task.node)
driver_int_info = task.node.driver_internal_info
self.assertEqual('root_uuid',
driver_int_info['root_uuid_or_disk_id']),
- uuid_mock_calls = [
- mock.call(mock.ANY, task, 'root_uuid'),
- mock.call(mock.ANY, task, 'efi_system_partition_uuid')]
- uuid_mock.assert_has_calls(uuid_mock_calls)
+ uuid_mock.assert_called_once_with(mock.ANY, task.node)
boot_mode_mock.assert_called_once_with(task.node)
self.assertFalse(log_mock.called)
prepare_instance_mock.assert_called_once_with(
mock.ANY, task, 'root_uuid', 'efi_uuid', None)
- power_off_mock.assert_called_once_with(task.node)
- get_power_state_mock.assert_called_once_with(task)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- resume_mock.assert_called_once_with(task)
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = []
- self.assertFalse(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_is_done(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'SUCCESS'}]
- self.assertTrue(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_did_start(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'RUNNING'}]
- self.assertTrue(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_multiple_commands(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'cache_image',
- 'command_status': 'SUCCESS'},
- {'command_name': 'prepare_image',
- 'command_status': 'RUNNING'}]
- self.assertTrue(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_has_started_other_commands(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'cache_image',
- 'command_status': 'SUCCESS'}]
- self.assertFalse(task.driver.deploy.deploy_has_started(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'SUCCESS'}]
- self.assertTrue(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done_empty_response(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = []
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
-
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
- autospec=True)
- def test_deploy_is_done_race(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'some_other_command',
- 'command_status': 'SUCCESS'}]
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
- @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
+ @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
+ @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
- def test_deploy_is_done_still_running(self, mock_get_cmd):
- with task_manager.acquire(self.context, self.node.uuid) as task:
- mock_get_cmd.return_value = [{'command_name': 'prepare_image',
- 'command_status': 'RUNNING'}]
- self.assertFalse(task.driver.deploy.deploy_is_done(task))
+ def test_prepare_instance_boot_storage_should_write_image_with_smartnic(
+ self, mock_write, mock_pxe_instance):
+ mock_write.return_value = False
+ self.node.provision_state = states.DEPLOYING
+ self.node.deploy_step = {
+ 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
+ self.node.save()
+ with task_manager.acquire(
+ self.context, self.node['uuid'], shared=False) as task:
+ driver_return = self.driver.prepare_instance_boot(task)
+ self.assertIsNone(driver_return)
+ self.assertTrue(mock_pxe_instance.called)
@mock.patch.object(manager_utils, 'restore_power_state_if_needed',
autospec=True)
@@ -1638,10 +1520,12 @@ class TestAgentDeploy(db_base.DbTestCase):
autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(flat_network.FlatNetwork,
'add_provisioning_network',
spec_set=True, autospec=True)
@@ -1677,7 +1561,7 @@ class TestAgentDeploy(db_base.DbTestCase):
build_instance_info_mock.assert_called_once_with(task)
build_options_mock.assert_called_once_with(task.node)
pxe_prepare_ramdisk_mock.assert_called_once_with(
- task, {'a': 'b'})
+ task.driver.boot, task, {'a': 'b'})
power_on_node_if_needed_mock.assert_called_once_with(task)
restore_power_state_mock.assert_called_once_with(
task, states.POWER_OFF)
@@ -1726,31 +1610,6 @@ class TestAgentDeploy(db_base.DbTestCase):
self.context, self.node['uuid'], shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
- @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
- autospec=True)
- def test_deploy_storage_should_write_image_false_with_smartnic_port(
- self, mock_write, mock_pxe_instance,
- power_on_node_if_needed_mock, restore_power_state_mock):
- mock_write.return_value = False
- self.node.provision_state = states.DEPLOYING
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(
- self.context, self.node['uuid'], shared=False) as task:
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- driver_return = self.driver.deploy(task)
- self.assertIsNone(driver_return)
- self.assertTrue(mock_pxe_instance.called)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
-
class AgentRAIDTestCase(db_base.DbTestCase):
@@ -1759,8 +1618,8 @@ class AgentRAIDTestCase(db_base.DbTestCase):
self.config(enabled_raid_interfaces=['fake', 'agent', 'no-raid'])
self.target_raid_config = {
"logical_disks": [
- {'size_gb': 200, 'raid_level': 0, 'is_root_volume': True},
- {'size_gb': 200, 'raid_level': 5}
+ {'size_gb': 200, 'raid_level': "0", 'is_root_volume': True},
+ {'size_gb': 200, 'raid_level': "5"}
]}
self.clean_step = {'step': 'create_configuration',
'interface': 'raid'}
@@ -1790,7 +1649,38 @@ class AgentRAIDTestCase(db_base.DbTestCase):
self.assertEqual(0, ret[0]['priority'])
self.assertEqual(0, ret[1]['priority'])
- @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(agent_base, 'get_steps', autospec=True)
+ def test_get_deploy_steps(self, get_steps_mock):
+ get_steps_mock.return_value = [
+ {'step': 'apply_configuration', 'interface': 'raid',
+ 'priority': 0},
+ ]
+
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ ret = task.driver.raid.get_deploy_steps(task)
+
+ self.assertEqual('apply_configuration', ret[0]['step'])
+
+ @mock.patch.object(agent_base, 'execute_step', autospec=True)
+ def test_apply_configuration(self, execute_mock):
+ deploy_step = {
+ 'interface': 'raid',
+ 'step': 'apply_configuration',
+ 'args': {
+ 'raid_config': self.target_raid_config,
+ 'delete_existing': True
+ },
+ 'priority': 82
+ }
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ execute_mock.return_value = states.DEPLOYWAIT
+ task.node.deploy_step = deploy_step
+ return_value = task.driver.raid.apply_configuration(
+ task, self.target_raid_config, delete_existing=True)
+ self.assertEqual(states.DEPLOYWAIT, return_value)
+ execute_mock.assert_called_once_with(task, deploy_step, 'deploy')
+
+ @mock.patch.object(raid, 'filter_target_raid_config', autospec=True)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_create_configuration(self, execute_mock,
filter_target_raid_config_mock):
@@ -1807,7 +1697,7 @@ class AgentRAIDTestCase(db_base.DbTestCase):
execute_mock.assert_called_once_with(task, self.clean_step,
'clean')
- @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(raid, 'filter_target_raid_config', autospec=True)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_create_configuration_skip_root(self, execute_mock,
filter_target_raid_config_mock):
@@ -1828,7 +1718,7 @@ class AgentRAIDTestCase(db_base.DbTestCase):
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
- @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(raid, 'filter_target_raid_config', autospec=True)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_create_configuration_skip_nonroot(self, execute_mock,
filter_target_raid_config_mock):
@@ -1849,7 +1739,7 @@ class AgentRAIDTestCase(db_base.DbTestCase):
exp_target_raid_config,
task.node.driver_internal_info['target_raid_config'])
- @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(raid, 'filter_target_raid_config', autospec=True)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_create_configuration_no_target_raid_config_after_skipping(
self, execute_mock, filter_target_raid_config_mock):
@@ -1864,7 +1754,7 @@ class AgentRAIDTestCase(db_base.DbTestCase):
create_nonroot_volumes=False)
self.assertFalse(execute_mock.called)
- @mock.patch.object(raid, 'filter_target_raid_config')
+ @mock.patch.object(raid, 'filter_target_raid_config', autospec=True)
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_create_configuration_empty_target_raid_config(
self, execute_mock, filter_target_raid_config_mock):
@@ -1890,16 +1780,33 @@ class AgentRAIDTestCase(db_base.DbTestCase):
update_raid_info_mock.assert_called_once_with(task.node, 'foo')
@mock.patch.object(raid, 'update_raid_info', autospec=True)
- def test__create_configuration_final_registered(
- self, update_raid_info_mock):
- self.node.clean_step = {'interface': 'raid',
- 'step': 'create_configuration'}
- command = {'command_result': {'clean_result': 'foo'}}
- create_hook = agent_base._get_post_step_hook(self.node, 'clean')
+ def _test__create_configuration_final_registered(
+ self, update_raid_info_mock, step_type='clean'):
+ step = {'interface': 'raid'}
+ if step_type == 'clean':
+ step['step'] = 'create_configuration'
+ self.node.clean_step = step
+ state = states.CLEANWAIT
+ command = {'command_result': {'clean_result': 'foo'}}
+ create_hook = agent_base._get_post_step_hook(self.node, 'clean')
+ else:
+ step['step'] = 'apply_configuration'
+ self.node.deploy_step = step
+ command = {'command_result': {'deploy_result': 'foo'}}
+ state = states.DEPLOYWAIT
+ create_hook = agent_base._get_post_step_hook(self.node, 'deploy')
+
with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.node.provision_state = state
create_hook(task, command)
update_raid_info_mock.assert_called_once_with(task.node, 'foo')
+ def test__create_configuration_final_registered_clean(self):
+ self._test__create_configuration_final_registered(step_type='clean')
+
+ def test__create_configuration_final_registered_deploy(self):
+ self._test__create_configuration_final_registered(step_type='deploy')
+
@mock.patch.object(raid, 'update_raid_info', autospec=True)
def test__create_configuration_final_bad_command_result(
self, update_raid_info_mock):
@@ -1911,6 +1818,17 @@ class AgentRAIDTestCase(db_base.DbTestCase):
task, command)
self.assertFalse(update_raid_info_mock.called)
+ @mock.patch.object(raid, 'update_raid_info', autospec=True)
+ def test__create_configuration_final_bad_command_result2(
+ self, update_raid_info_mock):
+ command = {'command_result': {'deploy_result': None}}
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ raid_mgmt = agent.AgentRAID
+ self.assertRaises(exception.IronicException,
+ raid_mgmt._create_configuration_final,
+ task, command)
+ self.assertFalse(update_raid_info_mock.called)
+
@mock.patch.object(agent_base, 'execute_step', autospec=True)
def test_delete_configuration(self, execute_mock):
execute_mock.return_value = states.CLEANING
diff --git a/ironic/tests/unit/drivers/modules/test_agent_base.py b/ironic/tests/unit/drivers/modules/test_agent_base.py
index 4f7ce9a1f..dac8e2fca 100644
--- a/ironic/tests/unit/drivers/modules/test_agent_base.py
+++ b/ironic/tests/unit/drivers/modules/test_agent_base.py
@@ -15,8 +15,8 @@
import time
import types
+from unittest import mock
-import mock
from oslo_config import cfg
from testtools import matchers
@@ -171,6 +171,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.deploy.heartbeat(task, 'url', '3.2.0')
+ self.assertIsNone(task.node.last_error)
self.assertFalse(task.shared)
self.assertEqual(
'url', task.node.driver_internal_info['agent_url'])
@@ -304,6 +305,44 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
self.assertFalse(rti_mock.called)
self.assertFalse(in_resume_deploy_mock.called)
+ @mock.patch.object(agent_base.HeartbeatMixin, 'process_next_step',
+ autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'in_core_deploy_step', autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'deploy_has_started', autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'deploy_is_done', autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin, 'continue_deploy',
+ autospec=True)
+ @mock.patch.object(agent_base.HeartbeatMixin,
+ 'reboot_to_instance', autospec=True)
+ def test_heartbeat_decomposed_steps(self, rti_mock, cd_mock,
+ deploy_is_done_mock,
+ deploy_started_mock,
+ in_deploy_mock,
+ next_step_mock):
+ self.deploy.has_decomposed_deploy_steps = True
+ # Check that heartbeats do not trigger deployment actions when the
+ # driver has decomposed deploy steps.
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.deploy.heartbeat(task, 'url', '3.2.0')
+ self.assertFalse(task.shared)
+ self.assertEqual(
+ 'url', task.node.driver_internal_info['agent_url'])
+ self.assertEqual(
+ '3.2.0',
+ task.node.driver_internal_info['agent_version'])
+ self.assertFalse(in_deploy_mock.called)
+ self.assertFalse(deploy_started_mock.called)
+ self.assertFalse(deploy_is_done_mock.called)
+ self.assertFalse(cd_mock.called)
+ self.assertFalse(rti_mock.called)
+ self.assertTrue(next_step_mock.called)
+
@mock.patch.object(agent_base.HeartbeatMixin, 'continue_deploy',
autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin,
@@ -533,7 +572,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
mock_notify.assert_called_once_with(task, 'clean')
mock_set_steps.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'cleaning_error_handler')
+ @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(objects.node.Node, 'touch_provisioning', autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin,
'refresh_steps', autospec=True)
@@ -611,7 +650,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
mock_touch.assert_called_once_with(mock.ANY)
self.assertFalse(mock_continue.called)
- @mock.patch.object(manager_utils, 'cleaning_error_handler')
+ @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin,
'continue_cleaning', autospec=True)
def test_heartbeat_continue_cleaning_fails(self, mock_continue,
@@ -634,7 +673,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
mock_continue.assert_called_once_with(mock.ANY, task)
mock_handler.assert_called_once_with(task, mock.ANY)
- @mock.patch.object(manager_utils, 'rescuing_error_handler')
+ @mock.patch.object(manager_utils, 'rescuing_error_handler', autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin, '_finalize_rescue',
autospec=True)
def test_heartbeat_rescue(self, mock_finalize_rescue,
@@ -648,7 +687,7 @@ class HeartbeatMixinTest(AgentDeployMixinBaseTest):
mock_finalize_rescue.assert_called_once_with(mock.ANY, task)
self.assertFalse(mock_rescue_err_handler.called)
- @mock.patch.object(manager_utils, 'rescuing_error_handler')
+ @mock.patch.object(manager_utils, 'rescuing_error_handler', autospec=True)
@mock.patch.object(agent_base.HeartbeatMixin, '_finalize_rescue',
autospec=True)
def test_heartbeat_rescue_fails(self, mock_finalize,
@@ -848,9 +887,7 @@ class AgentRescueTests(AgentDeployMixinBaseTest):
class AgentDeployMixinTest(AgentDeployMixinBaseTest):
- @mock.patch.object(manager_utils, 'power_on_node_if_needed')
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
+ @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -858,34 +895,27 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy(
+ def test_tear_down_agent(
self, power_off_mock, get_power_state_mock,
- node_power_action_mock, collect_mock, resume_mock,
+ node_power_action_mock, collect_mock,
power_on_node_if_needed_mock):
cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
power_on_node_if_needed_mock.return_value = None
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(2, get_power_state_mock.call_count)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertFalse(node_power_action_mock.called)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
collect_mock.assert_called_once_with(task.node)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -893,70 +923,47 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_doesnt_complete(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock,
- node_power_action_mock, mock_collect, resume_mock,
- power_on_node_if_needed_mock):
+ def test_tear_down_agent_soft_poweroff_doesnt_complete(
+ self, power_off_mock, get_power_state_mock,
+ node_power_action_mock, mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- power_on_node_if_needed_mock.return_value = None
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.return_value = states.POWER_ON
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
+ @mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(fake.FakePower, 'get_power_state',
+ spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_soft_poweroff_fails(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, node_power_action_mock, mock_collect, resume_mock):
+ def test_tear_down_agent_soft_poweroff_fails(
+ self, power_off_mock, get_power_state_mock, node_power_action_mock,
+ mock_collect):
power_off_mock.side_effect = RuntimeError("boom")
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- self.deploy.reboot_and_finish_deploy(task)
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ get_power_state_mock.return_value = states.POWER_ON
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed')
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -964,36 +971,26 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_get_power_state_fails(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
- mock_collect, resume_mock, power_on_node_if_needed_mock):
+ def test_tear_down_agent_soft_poweroff_race(
+ self, power_off_mock, get_power_state_mock, node_power_action_mock,
+ mock_collect):
+ # Test the situation when soft power off works, but ironic doesn't
+ # learn about it.
+ power_off_mock.side_effect = RuntimeError("boom")
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- get_power_state_mock.side_effect = RuntimeError("boom")
- power_on_node_if_needed_mock.return_value = None
- self.deploy.reboot_and_finish_deploy(task)
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ get_power_state_mock.side_effect = [states.POWER_ON,
+ states.POWER_OFF]
+ self.deploy.tear_down_agent(task)
power_off_mock.assert_called_once_with(task.node)
- self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertFalse(node_power_action_mock.called)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
+ @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(time, 'sleep', lambda seconds: None)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@@ -1001,30 +998,21 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.neutron.NeutronNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.neutron.NeutronNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_configure_tenant_network_exception(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock, node_power_action_mock,
+ def test_tear_down_agent_get_power_state_fails(
+ self, power_off_mock, get_power_state_mock, node_power_action_mock,
mock_collect, power_on_node_if_needed_mock):
- self.node.network_interface = 'neutron'
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- power_on_node_if_needed_mock.return_value = None
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- configure_tenant_net_mock.side_effect = exception.NetworkError(
- "boom")
- self.assertRaises(exception.InstanceDeployFailure,
- self.deploy.reboot_and_finish_deploy, task)
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ get_power_state_mock.side_effect = RuntimeError("boom")
+ power_on_node_if_needed_mock.return_value = None
+ self.deploy.tear_down_agent(task)
+ power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
@@ -1035,78 +1023,31 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
spec=types.FunctionType)
@mock.patch.object(agent_client.AgentClient, 'power_off',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy_power_off_fails(
+ def test_tear_down_agent_power_off_fails(
self, power_off_mock, get_power_state_mock,
node_power_action_mock, mock_collect):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
get_power_state_mock.return_value = states.POWER_ON
node_power_action_mock.side_effect = RuntimeError("boom")
self.assertRaises(exception.InstanceDeployFailure,
- self.deploy.reboot_and_finish_deploy,
+ self.deploy.tear_down_agent,
task)
power_off_mock.assert_called_once_with(task.node)
self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF)])
+ node_power_action_mock.assert_called_with(task, states.POWER_OFF)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
mock_collect.assert_called_once_with(task.node)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed',
- autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(time, 'sleep', lambda seconds: None)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'remove_provisioning_network', spec_set=True, autospec=True)
- @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
- 'configure_tenant_networks', spec_set=True, autospec=True)
- def test_reboot_and_finish_deploy_power_on_fails(
- self, configure_tenant_net_mock, remove_provisioning_net_mock,
- power_off_mock, get_power_state_mock,
- node_power_action_mock, mock_collect,
- power_on_node_if_needed_mock):
- self.node.provision_state = states.DEPLOYING
- self.node.target_provision_state = states.ACTIVE
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- power_on_node_if_needed_mock.return_value = None
- get_power_state_mock.return_value = states.POWER_ON
- node_power_action_mock.side_effect = [None,
- RuntimeError("boom")]
- self.assertRaises(exception.InstanceDeployFailure,
- self.deploy.reboot_and_finish_deploy,
- task)
- power_off_mock.assert_called_once_with(task.node)
- self.assertEqual(7, get_power_state_mock.call_count)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON)])
- remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
- task)
- configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
- self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- self.assertFalse(mock_collect.called)
-
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'sync',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy_power_action_oob_power_off(
- self, sync_mock, node_power_action_mock, mock_collect,
- resume_mock):
+ def test_tear_down_agent_power_action_oob_power_off(
+ self, sync_mock, node_power_action_mock, mock_collect):
# Enable force power off
driver_info = self.node.driver_info
driver_info['deploy_forces_oob_reboot'] = True
@@ -1115,30 +1056,23 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- self.deploy.reboot_and_finish_deploy(task)
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.tear_down_agent(task)
sync_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON),
- ])
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(mock_collect.called)
- resume_mock.assert_called_once_with(task)
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
@mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(agent_base.LOG, 'warning', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'sync',
spec=types.FunctionType)
- def test_reboot_and_finish_deploy_power_action_oob_power_off_failed(
- self, sync_mock, node_power_action_mock, log_mock, mock_collect,
- resume_mock):
+ def test_tear_down_agent_power_action_oob_power_off_failed(
+ self, sync_mock, node_power_action_mock, log_mock, mock_collect):
# Enable force power off
driver_info = self.node.driver_info
driver_info['deploy_forces_oob_reboot'] = True
@@ -1147,17 +1081,16 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ log_mock.reset_mock()
+
sync_mock.return_value = {'faultstring': 'Unknown command: blah'}
- self.deploy.reboot_and_finish_deploy(task)
+ self.deploy.tear_down_agent(task)
sync_mock.assert_called_once_with(task.node)
- node_power_action_mock.assert_has_calls([
- mock.call(task, states.POWER_OFF),
- mock.call(task, states.POWER_ON),
- ])
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_OFF)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
log_error = ('The version of the IPA ramdisk used in the '
'deployment do not support the command "sync"')
@@ -1167,6 +1100,95 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
{'node': task.node.uuid, 'error': log_error})
self.assertFalse(mock_collect.called)
+ @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
+ @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
+ @mock.patch.object(time, 'sleep', lambda seconds: None)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ @mock.patch.object(fake.FakePower, 'get_supported_power_states',
+ lambda self, task: [states.REBOOT])
+ @mock.patch.object(agent_client.AgentClient, 'sync', autospec=True)
+ def test_tear_down_agent_no_power_on_support(
+ self, sync_mock, node_power_action_mock, collect_mock,
+ power_on_node_if_needed_mock):
+ cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.tear_down_agent(task)
+ node_power_action_mock.assert_called_once_with(task, states.REBOOT)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
+ self.assertEqual(states.ACTIVE, task.node.target_provision_state)
+ collect_mock.assert_called_once_with(task.node)
+ self.assertFalse(power_on_node_if_needed_mock.called)
+ sync_mock.assert_called_once_with(self.deploy._client, task.node)
+
+ @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
+ autospec=True)
+ @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'remove_provisioning_network', spec_set=True, autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'configure_tenant_networks', spec_set=True, autospec=True)
+ def test_switch_to_tenant_network(self, configure_tenant_net_mock,
+ remove_provisioning_net_mock,
+ power_on_node_if_needed_mock,
+ restore_power_state_mock):
+ power_on_node_if_needed_mock.return_value = states.POWER_OFF
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.switch_to_tenant_network(task)
+ remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
+ task)
+ configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
+ power_on_node_if_needed_mock.assert_called_once_with(task)
+ restore_power_state_mock.assert_called_once_with(
+ task, states.POWER_OFF)
+
+ @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'remove_provisioning_network', spec_set=True, autospec=True)
+ @mock.patch('ironic.drivers.modules.network.noop.NoopNetwork.'
+ 'configure_tenant_networks', spec_set=True, autospec=True)
+ def test_switch_to_tenant_network_fails(self, configure_tenant_net_mock,
+ remove_provisioning_net_mock,
+ mock_collect):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ configure_tenant_net_mock.side_effect = exception.NetworkError(
+ "boom")
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.deploy.switch_to_tenant_network, task)
+ remove_provisioning_net_mock.assert_called_once_with(mock.ANY,
+ task)
+ configure_tenant_net_mock.assert_called_once_with(mock.ANY, task)
+ self.assertFalse(mock_collect.called)
+
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ def test_boot_instance(self, node_power_action_mock):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.boot_instance(task)
+ node_power_action_mock.assert_called_once_with(task,
+ states.POWER_ON)
+
+ @mock.patch.object(fake.FakePower, 'get_supported_power_states',
+ lambda self, task: [states.REBOOT])
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
+ def test_boot_instance_no_power_on(self, node_power_action_mock):
+ self.node.provision_state = states.DEPLOYING
+ self.node.target_provision_state = states.ACTIVE
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ self.deploy.boot_instance(task)
+ self.assertFalse(node_power_action_mock.called)
+
@mock.patch.object(agent_client.AgentClient, 'install_bootloader',
autospec=True)
@mock.patch.object(deploy_utils, 'try_set_boot_device', autospec=True)
@@ -1815,6 +1837,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
hook_mock.assert_called_once_with(task, command_status)
notify_mock.assert_called_once_with(task, 'clean')
+ @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'notify_conductor_resume_operation',
autospec=True)
@mock.patch.object(agent_base,
@@ -1824,7 +1847,7 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
autospec=True)
def test_continue_cleaning_with_hook_fails(
self, status_mock, error_handler_mock, get_hook_mock,
- notify_mock):
+ notify_mock, collect_logs_mock):
self.node.clean_step = {
'priority': 10,
'interface': 'raid',
@@ -1847,6 +1870,8 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
hook_mock.assert_called_once_with(task, command_status)
error_handler_mock.assert_called_once_with(task, mock.ANY)
self.assertFalse(notify_mock.called)
+ collect_logs_mock.assert_called_once_with(task.node,
+ label='cleaning')
@mock.patch.object(manager_utils, 'notify_conductor_resume_operation',
autospec=True)
@@ -1908,10 +1933,12 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
self.deploy.continue_cleaning(task)
notify_mock.assert_called_once_with(task, 'clean')
+ @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(agent_client.AgentClient, 'get_commands_status',
autospec=True)
- def test_continue_cleaning_fail(self, status_mock, error_mock):
+ def test_continue_cleaning_fail(self, status_mock, error_mock,
+ collect_logs_mock):
# Test that a failure puts the node in CLEANFAIL
status_mock.return_value = [{
'command_status': 'FAILED',
@@ -1922,6 +1949,8 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
shared=False) as task:
self.deploy.continue_cleaning(task)
error_mock.assert_called_once_with(task, mock.ANY)
+ collect_logs_mock.assert_called_once_with(task.node,
+ label='cleaning')
@mock.patch.object(conductor_steps, 'set_node_cleaning_steps',
autospec=True)
@@ -2057,46 +2086,6 @@ class AgentDeployMixinTest(AgentDeployMixinBaseTest):
hook_returned = agent_base._get_post_step_hook(self.node, 'clean')
self.assertIsNone(hook_returned)
- @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed')
- @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy',
- autospec=True)
- @mock.patch.object(driver_utils, 'collect_ramdisk_logs', autospec=True)
- @mock.patch.object(time, 'sleep', lambda seconds: None)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(fake.FakePower, 'get_power_state',
- spec=types.FunctionType)
- @mock.patch.object(agent_client.AgentClient, 'power_off',
- spec=types.FunctionType)
- def test_reboot_and_finish_deploy_with_smartnic_port(
- self, power_off_mock, get_power_state_mock,
- node_power_action_mock, collect_mock, resume_mock,
- power_on_node_if_needed_mock, restore_power_state_mock):
- cfg.CONF.set_override('deploy_logs_collect', 'always', 'agent')
- self.node.provision_state = states.DEPLOYING
- self.node.target_provision_state = states.ACTIVE
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(self.context, self.node.uuid,
- shared=True) as task:
- get_power_state_mock.side_effect = [states.POWER_ON,
- states.POWER_OFF]
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- self.deploy.reboot_and_finish_deploy(task)
- power_off_mock.assert_called_once_with(task.node)
- self.assertEqual(2, get_power_state_mock.call_count)
- node_power_action_mock.assert_called_once_with(
- task, states.POWER_ON)
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
- self.assertEqual(states.ACTIVE, task.node.target_provision_state)
- collect_mock.assert_called_once_with(task.node)
- resume_mock.assert_called_once_with(task)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
-
class TestRefreshCleanSteps(AgentDeployMixinBaseTest):
@@ -2185,6 +2174,26 @@ class TestRefreshCleanSteps(AgentDeployMixinBaseTest):
self.assertEqual([self.clean_steps['clean_steps'][
'SpecificHardwareManager'][1]], steps['raid'])
+ @mock.patch.object(agent_base.LOG, 'warning', autospec=True)
+ @mock.patch.object(agent_client.AgentClient, 'get_deploy_steps',
+ autospec=True)
+ def test_refresh_steps_busy(self, client_mock, log_mock):
+ client_mock.side_effect = exception.AgentAPIError(
+ node="node", status="500", error='agent is busy')
+
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+ log_mock.reset_mock()
+ self.deploy.refresh_steps(task, 'deploy')
+
+ client_mock.assert_called_once_with(mock.ANY, task.node,
+ task.ports)
+ self.assertNotIn('agent_cached_deploy_steps_refreshed',
+ task.node.driver_internal_info)
+ self.assertIsNone(task.node.driver_internal_info.get(
+ 'agent_cached_deploy_steps'))
+ log_mock.assert_not_called()
+
@mock.patch.object(agent_client.AgentClient, 'get_clean_steps',
autospec=True)
def test_refresh_steps_missing_steps(self, client_mock):
@@ -2314,6 +2323,23 @@ class StepMethodsTestCase(db_base.DbTestCase):
self.context, self.node.uuid, shared=False) as task:
self.assertEqual([], agent_base.get_steps(task, 'clean'))
+ def test_find_step(self):
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+ step = agent_base.find_step(task, 'clean', 'deploy',
+ 'erase_devices')
+ self.assertEqual(self.clean_steps['deploy'][0], step)
+
+ def test_find_step_not_found(self):
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
+ self.assertIsNone(agent_base.find_step(
+ task, 'clean', 'non-deploy', 'erase_devices'))
+ self.assertIsNone(agent_base.find_step(
+ task, 'clean', 'deploy', 'something_else'))
+ self.assertIsNone(agent_base.find_step(
+ task, 'deploy', 'deploy', 'erase_devices'))
+
def test_get_deploy_steps(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
@@ -2321,18 +2347,35 @@ class StepMethodsTestCase(db_base.DbTestCase):
'agent_cached_deploy_steps': self.clean_steps
}
steps = self.deploy.get_deploy_steps(task)
- # 2 in-band steps + one out-of-band
- self.assertEqual(3, len(steps))
- self.assertIn(self.clean_steps['deploy'][0], steps)
- self.assertIn(self.clean_steps['deploy'][1], steps)
- self.assertNotIn(self.clean_steps['raid'][0], steps)
+ # 2 in-band steps + 3 out-of-band
+ expected = [
+ {'step': 'deploy', 'priority': 100, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'tear_down_agent', 'priority': 40, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'switch_to_tenant_network', 'priority': 30,
+ 'argsinfo': None, 'interface': 'deploy'},
+ {'step': 'boot_instance', 'priority': 20, 'argsinfo': None,
+ 'interface': 'deploy'},
+ ] + self.clean_steps['deploy']
+ self.assertCountEqual(expected, steps)
def test_get_deploy_steps_only_oob(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=False) as task:
steps = self.deploy.get_deploy_steps(task)
- # one out-of-band step
- self.assertEqual(1, len(steps))
+ # three base out-of-band steps
+ expected = [
+ {'step': 'deploy', 'priority': 100, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'tear_down_agent', 'priority': 40, 'argsinfo': None,
+ 'interface': 'deploy'},
+ {'step': 'switch_to_tenant_network', 'priority': 30,
+ 'argsinfo': None, 'interface': 'deploy'},
+ {'step': 'boot_instance', 'priority': 20, 'argsinfo': None,
+ 'interface': 'deploy'},
+ ]
+ self.assertCountEqual(expected, steps)
@mock.patch('ironic.objects.Port.list_by_node_id',
spec_set=types.FunctionType)
diff --git a/ironic/tests/unit/drivers/modules/test_agent_client.py b/ironic/tests/unit/drivers/modules/test_agent_client.py
index 1bea74188..1f8cc1f6a 100644
--- a/ironic/tests/unit/drivers/modules/test_agent_client.py
+++ b/ironic/tests/unit/drivers/modules/test_agent_client.py
@@ -14,8 +14,8 @@
from http import client as http_client
import json
+from unittest import mock
-import mock
import requests
import retrying
@@ -29,13 +29,29 @@ CONF = conf.CONF
class MockResponse(object):
- def __init__(self, text, status_code=http_client.OK):
- assert isinstance(text, str)
+ def __init__(self, data=None, status_code=http_client.OK, text=None):
+ assert not (data and text)
self.text = text
+ self.data = data
self.status_code = status_code
def json(self):
- return json.loads(self.text)
+ if self.text:
+ return json.loads(self.text)
+ else:
+ return self.data
+
+
+class MockCommandStatus(MockResponse):
+ def __init__(self, status, name='fake', error=None):
+ super().__init__({
+ 'commands': [
+ {'command_name': name,
+ 'command_status': status,
+ 'command_result': 'I did something',
+ 'command_error': error}
+ ]
+ })
class MockNode(object):
@@ -46,13 +62,15 @@ class MockNode(object):
'hardware_manager_version': {'generic': '1'}
}
self.instance_info = {}
+ self.driver_info = {}
def as_dict(self, secure=False):
assert secure, 'agent_client must pass secure=True'
return {
'uuid': self.uuid,
'driver_internal_info': self.driver_internal_info,
- 'instance_info': self.instance_info
+ 'instance_info': self.instance_info,
+ 'driver_info': self.driver_info,
}
@@ -76,7 +94,7 @@ class TestAgentClient(base.TestCase):
def test__get_command_url_fail(self):
del self.node.driver_internal_info['agent_url']
- self.assertRaises(exception.IronicException,
+ self.assertRaises(exception.AgentConnectionFailed,
self.client._get_command_url,
self.node)
@@ -87,8 +105,7 @@ class TestAgentClient(base.TestCase):
def test__command(self):
response_data = {'status': 'ok'}
- response_text = json.dumps(response_data)
- self.client.session.post.return_value = MockResponse(response_text)
+ self.client.session.post.return_value = MockResponse(response_data)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
@@ -102,11 +119,13 @@ class TestAgentClient(base.TestCase):
url,
data=body,
params={'wait': 'false'},
- timeout=60)
+ timeout=60,
+ verify=True)
def test__command_fail_json(self):
response_text = 'this be not json matey!'
- self.client.session.post.return_value = MockResponse(response_text)
+ self.client.session.post.return_value = MockResponse(
+ text=response_text)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
@@ -121,7 +140,8 @@ class TestAgentClient(base.TestCase):
url,
data=body,
params={'wait': 'false'},
- timeout=60)
+ timeout=60,
+ verify=True)
def test__command_fail_post(self):
error = 'Boom'
@@ -146,7 +166,7 @@ class TestAgentClient(base.TestCase):
method = 'foo.bar'
params = {}
- self.client._get_command_url(self.node)
+ url = self.client._get_command_url(self.node)
self.client._get_command_body(method, params)
e = self.assertRaises(exception.AgentConnectionFailed,
@@ -157,9 +177,16 @@ class TestAgentClient(base.TestCase):
'command %(method)s. Error: %(error)s' %
{'method': method, 'node': self.node.uuid,
'error': error}, str(e))
+ self.client.session.post.assert_called_with(
+ url,
+ data=mock.ANY,
+ params={'wait': 'false'},
+ timeout=60,
+ verify=True)
+ self.assertEqual(3, self.client.session.post.call_count)
def test__command_error_code(self):
- response_text = '{"faultstring": "you dun goofd"}'
+ response_text = {"faultstring": "you dun goofd"}
self.client.session.post.return_value = MockResponse(
response_text, status_code=http_client.BAD_REQUEST)
method = 'standby.run_image'
@@ -176,13 +203,13 @@ class TestAgentClient(base.TestCase):
url,
data=body,
params={'wait': 'false'},
- timeout=60)
+ timeout=60,
+ verify=True)
def test__command_error_code_okay_error_typeerror_embedded(self):
- response_text = ('{"faultstring": "you dun goofd", '
- '"command_error": {"type": "TypeError"}}')
- self.client.session.post.return_value = MockResponse(
- response_text)
+ response_data = {"faultstring": "you dun goofd",
+ "command_error": {"type": "TypeError"}}
+ self.client.session.post.return_value = MockResponse(response_data)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
@@ -197,7 +224,61 @@ class TestAgentClient(base.TestCase):
url,
data=body,
params={'wait': 'false'},
- timeout=60)
+ timeout=60,
+ verify=True)
+
+ def test__command_verify(self):
+ response_data = {'status': 'ok'}
+ self.client.session.post.return_value = MockResponse(response_data)
+ method = 'standby.run_image'
+ image_info = {'image_id': 'test_image'}
+ params = {'image_info': image_info}
+
+ self.node.driver_info['agent_verify_ca'] = '/path/to/agent.crt'
+
+ url = self.client._get_command_url(self.node)
+ body = self.client._get_command_body(method, params)
+
+ response = self.client._command(self.node, method, params)
+ self.assertEqual(response, response_data)
+ self.client.session.post.assert_called_once_with(
+ url,
+ data=body,
+ params={'wait': 'false'},
+ timeout=60,
+ verify='/path/to/agent.crt')
+
+ @mock.patch('time.sleep', lambda seconds: None)
+ def test__command_poll(self):
+ response_data = {'status': 'ok'}
+ final_status = MockCommandStatus('SUCCEEDED', name='run_image')
+ self.client.session.post.return_value = MockResponse(response_data)
+ self.client.session.get.side_effect = [
+ MockCommandStatus('RUNNING', name='run_image'),
+ final_status,
+ ]
+
+ method = 'standby.run_image'
+ image_info = {'image_id': 'test_image'}
+ params = {'image_info': image_info}
+ expected = {'command_error': None,
+ 'command_name': 'run_image',
+ 'command_result': 'I did something',
+ 'command_status': 'SUCCEEDED'}
+
+ url = self.client._get_command_url(self.node)
+ body = self.client._get_command_body(method, params)
+
+ response = self.client._command(self.node, method, params, poll=True)
+ self.assertEqual(expected, response)
+ self.client.session.post.assert_called_once_with(
+ url,
+ data=body,
+ params={'wait': 'false'},
+ timeout=60,
+ verify=True)
+ self.client.session.get.assert_called_with(url, timeout=60,
+ verify=True)
def test_get_commands_status(self):
with mock.patch.object(self.client.session, 'get',
@@ -211,18 +292,42 @@ class TestAgentClient(base.TestCase):
'%(agent_url)s/%(api_version)s/commands' % {
'agent_url': agent_url,
'api_version': CONF.agent.agent_api_version},
- timeout=CONF.agent.command_timeout)
+ timeout=CONF.agent.command_timeout,
+ verify=True)
def test_get_commands_status_retries(self):
+ res = mock.MagicMock(spec_set=['json'])
+ res.json.return_value = {'commands': []}
+ self.client.session.get.side_effect = [
+ requests.ConnectionError('boom'),
+ res
+ ]
+ self.assertEqual([], self.client.get_commands_status(self.node))
+ self.assertEqual(2, self.client.session.get.call_count)
+
+ def test_get_commands_status_no_retries(self):
+ self.client.session.get.side_effect = requests.ConnectionError('boom')
+ self.assertRaises(exception.AgentConnectionFailed,
+ self.client.get_commands_status, self.node,
+ retry_connection=False)
+ self.assertEqual(1, self.client.session.get.call_count)
+
+ def test_get_commands_status_verify(self):
+ self.node.driver_info['agent_verify_ca'] = '/path/to/agent.crt'
+
with mock.patch.object(self.client.session, 'get',
autospec=True) as mock_get:
res = mock.MagicMock(spec_set=['json'])
res.json.return_value = {'commands': []}
- mock_get.side_effect = [
- requests.ConnectionError('boom'),
- res]
+ mock_get.return_value = res
self.assertEqual([], self.client.get_commands_status(self.node))
- self.assertEqual(2, mock_get.call_count)
+ agent_url = self.node.driver_internal_info.get('agent_url')
+ mock_get.assert_called_once_with(
+ '%(agent_url)s/%(api_version)s/commands' % {
+ 'agent_url': agent_url,
+ 'api_version': CONF.agent.agent_api_version},
+ timeout=CONF.agent.command_timeout,
+ verify='/path/to/agent.crt')
def test_prepare_image(self):
self.client._command = mock.MagicMock(spec_set=[])
@@ -234,7 +339,7 @@ class TestAgentClient(base.TestCase):
wait=False)
self.client._command.assert_called_once_with(
node=self.node, method='standby.prepare_image',
- params=params, wait=False)
+ params=params, poll=False)
def test_prepare_image_with_configdrive(self):
self.client._command = mock.MagicMock(spec_set=[])
@@ -251,7 +356,19 @@ class TestAgentClient(base.TestCase):
wait=False)
self.client._command.assert_called_once_with(
node=self.node, method='standby.prepare_image',
- params=params, wait=False)
+ params=params, poll=False)
+
+ def test_prepare_image_with_wait(self):
+ self.client._command = mock.MagicMock(spec_set=[])
+ image_info = {'image_id': 'image'}
+ params = {'image_info': image_info}
+
+ self.client.prepare_image(self.node,
+ image_info,
+ wait=True)
+ self.client._command.assert_called_once_with(
+ node=self.node, method='standby.prepare_image',
+ params=params, poll=True)
def test_start_iscsi_target(self):
self.client._command = mock.MagicMock(spec_set=[])
@@ -305,9 +422,8 @@ class TestAgentClient(base.TestCase):
self.node, root_uuid, efi_system_part_uuid=efi_system_part_uuid,
prep_boot_part_uuid=prep_boot_part_uuid, target_boot_mode='hello')
self.client._command.assert_called_once_with(
- command_timeout_factor=2, node=self.node,
- method='image.install_bootloader', params=params,
- wait=True)
+ node=self.node, method='image.install_bootloader', params=params,
+ poll=True)
def test_install_bootloader(self):
self._test_install_bootloader(root_uuid='fake-root-uuid',
@@ -415,8 +531,7 @@ class TestAgentClient(base.TestCase):
def test__command_agent_client(self):
response_data = {'status': 'ok'}
- response_text = json.dumps(response_data)
- self.client.session.post.return_value = MockResponse(response_text)
+ self.client.session.post.return_value = MockResponse(response_data)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
@@ -433,7 +548,8 @@ class TestAgentClient(base.TestCase):
data=body,
params={'wait': 'false',
'agent_token': 'magical'},
- timeout=60)
+ timeout=60,
+ verify=True)
class TestAgentClientAttempts(base.TestCase):
@@ -472,13 +588,12 @@ class TestAgentClientAttempts(base.TestCase):
mock_sleep.return_value = None
error = 'Connection Timeout'
response_data = {'status': 'ok'}
- response_text = json.dumps(response_data)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
self.client.session.post.side_effect = [requests.Timeout(error),
requests.Timeout(error),
- MockResponse(response_text)]
+ MockResponse(response_data)]
response = self.client._command(self.node, method, params)
self.assertEqual(3, self.client.session.post.call_count)
@@ -487,19 +602,19 @@ class TestAgentClientAttempts(base.TestCase):
self.client._get_command_url(self.node),
data=self.client._get_command_body(method, params),
params={'wait': 'false'},
- timeout=60)
+ timeout=60,
+ verify=True)
@mock.patch.object(retrying.time, 'sleep', autospec=True)
def test__command_succeed_after_one_timeout(self, mock_sleep):
mock_sleep.return_value = None
error = 'Connection Timeout'
response_data = {'status': 'ok'}
- response_text = json.dumps(response_data)
method = 'standby.run_image'
image_info = {'image_id': 'test_image'}
params = {'image_info': image_info}
self.client.session.post.side_effect = [requests.Timeout(error),
- MockResponse(response_text),
+ MockResponse(response_data),
requests.Timeout(error)]
response = self.client._command(self.node, method, params)
@@ -509,4 +624,5 @@ class TestAgentClientAttempts(base.TestCase):
self.client._get_command_url(self.node),
data=self.client._get_command_body(method, params),
params={'wait': 'false'},
- timeout=60)
+ timeout=60,
+ verify=True)
diff --git a/ironic/tests/unit/drivers/modules/test_agent_power.py b/ironic/tests/unit/drivers/modules/test_agent_power.py
new file mode 100644
index 000000000..0d4004c66
--- /dev/null
+++ b/ironic/tests/unit/drivers/modules/test_agent_power.py
@@ -0,0 +1,127 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from unittest import mock
+
+from ironic.common import exception
+from ironic.common import states
+from ironic.conductor import task_manager
+from ironic.drivers.modules import agent_client
+from ironic.drivers.modules import agent_power
+from ironic.tests.unit.db import base as db_base
+from ironic.tests.unit.objects import utils as object_utils
+
+
+@mock.patch('time.sleep', lambda _sec: None)
+class AgentPowerTest(db_base.DbTestCase):
+
+ def setUp(self):
+ super(AgentPowerTest, self).setUp()
+ self.config(fast_track=True, group='deploy')
+ self.power = agent_power.AgentPower()
+ dii = {
+ 'agent_last_heartbeat': datetime.datetime.now().strftime(
+ "%Y-%m-%dT%H:%M:%S.%f"),
+ 'deployment_reboot': True,
+ 'agent_url': 'http://url',
+ 'agent_secret_token': 'very secret',
+ }
+ self.node = object_utils.create_test_node(
+ self.context, driver_internal_info=dii,
+ provision_state=states.DEPLOYING)
+ self.task = mock.Mock(spec=task_manager.TaskManager, node=self.node)
+
+ def test_basics(self):
+ self.assertEqual({}, self.power.get_properties())
+ self.assertFalse(self.power.supports_power_sync(self.task))
+ self.assertEqual([states.REBOOT, states.SOFT_REBOOT],
+ self.power.get_supported_power_states(self.task))
+
+ def test_validate(self):
+ self.power.validate(self.task)
+
+ def test_validate_fails(self):
+ self.node.driver_internal_info['agent_last_heartbeat'] = \
+ datetime.datetime(2010, 7, 19).strftime(
+ "%Y-%m-%dT%H:%M:%S.%f")
+ self.assertRaises(exception.InvalidParameterValue,
+ self.power.validate, self.task)
+
+ del self.node.driver_internal_info['agent_last_heartbeat']
+ self.assertRaises(exception.InvalidParameterValue,
+ self.power.validate, self.task)
+
+ def test_get_power_state(self):
+ self.assertEqual(states.POWER_ON,
+ self.power.get_power_state(self.task))
+
+ def test_get_power_state_unknown(self):
+ self.node.driver_internal_info['agent_last_heartbeat'] = \
+ datetime.datetime(2010, 7, 19).strftime(
+ "%Y-%m-%dT%H:%M:%S.%f")
+ self.assertIsNone(self.power.get_power_state(self.task))
+
+ del self.node.driver_internal_info['agent_last_heartbeat']
+ self.assertIsNone(self.power.get_power_state(self.task))
+
+ @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
+ autospec=True)
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot(self, mock_reboot, mock_commands):
+ mock_commands.side_effect = [
+ [{'command_name': 'run_image', 'command_status': 'RUNNING'}],
+ exception.AgentConnectionFailed,
+ exception.AgentConnectionFailed,
+ [{'command_name': 'get_deploy_steps', 'command_status': 'RUNNING'}]
+ ]
+ with task_manager.acquire(self.context, self.node.id) as task:
+ # Save the node since the upgrade_lock call changes it
+ node = task.node
+ self.power.reboot(task)
+ mock_reboot.assert_called_once_with(self.power._client, node)
+ mock_commands.assert_called_with(self.power._client, node,
+ retry_connection=False,
+ expect_errors=True)
+ self.assertEqual(4, mock_commands.call_count)
+
+ node.refresh()
+ self.assertNotIn('agent_secret_token', node.driver_internal_info)
+ self.assertNotIn('agent_url', node.driver_internal_info)
+
+ @mock.patch.object(agent_client.AgentClient, 'get_commands_status',
+ autospec=True)
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot_timeout(self, mock_reboot, mock_commands):
+ mock_commands.side_effect = exception.AgentConnectionFailed
+ with task_manager.acquire(self.context, self.node.id) as task:
+ node = task.node
+ self.assertRaisesRegex(exception.PowerStateFailure,
+ 'Agent failed to come back',
+ self.power.reboot, task, timeout=0.001)
+ mock_commands.assert_called_with(self.power._client, node,
+ retry_connection=False,
+ expect_errors=True)
+
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot_another_state(self, mock_reboot):
+ with task_manager.acquire(self.context, self.node.id) as task:
+ task.node.provision_state = states.DEPLOYWAIT
+ self.power.reboot(task)
+ mock_reboot.assert_called_once_with(self.power._client, task.node)
+
+ @mock.patch.object(agent_client.AgentClient, 'reboot', autospec=True)
+ def test_reboot_into_instance(self, mock_reboot):
+ with task_manager.acquire(self.context, self.node.id) as task:
+ del task.node.driver_internal_info['deployment_reboot']
+ self.power.reboot(task)
+ mock_reboot.assert_called_once_with(self.power._client, task.node)
diff --git a/ironic/tests/unit/drivers/modules/test_boot_mode_utils.py b/ironic/tests/unit/drivers/modules/test_boot_mode_utils.py
index 11b5b5f7b..f191eec3e 100644
--- a/ironic/tests/unit/drivers/modules/test_boot_mode_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_boot_mode_utils.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import boot_modes
from ironic.drivers.modules import boot_mode_utils
@@ -29,7 +29,7 @@ class GetBootModeTestCase(tests_base.TestCase):
driver='fake-hardware')
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
- autospect=True)
+ autospec=True)
def test_get_boot_mode_bios(self, mock_for_deploy):
mock_for_deploy.return_value = boot_modes.LEGACY_BIOS
boot_mode = boot_mode_utils.get_boot_mode(self.node)
@@ -42,9 +42,9 @@ class GetBootModeTestCase(tests_base.TestCase):
boot_mode = boot_mode_utils.get_boot_mode(self.node)
self.assertEqual(boot_modes.UEFI, boot_mode)
- @mock.patch.object(boot_mode_utils, 'LOG', autospect=True)
+ @mock.patch.object(boot_mode_utils, 'LOG', autospec=True)
@mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
- autospect=True)
+ autospec=True)
def test_get_boot_mode_default(self, mock_for_deploy, mock_log):
boot_mode_utils.warn_about_default_boot_mode = False
mock_for_deploy.return_value = None
diff --git a/ironic/tests/unit/drivers/modules/test_console_utils.py b/ironic/tests/unit/drivers/modules/test_console_utils.py
index a3cd142ad..3419abb4a 100644
--- a/ironic/tests/unit/drivers/modules/test_console_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_console_utils.py
@@ -19,19 +19,20 @@
import errno
import fcntl
+import ipaddress
import os
import random
import signal
+import socket
import string
import subprocess
import tempfile
import time
+from unittest import mock
from ironic_lib import utils as ironic_utils
-import mock
from oslo_config import cfg
from oslo_service import loopingcall
-from oslo_utils import netutils
import psutil
from ironic.common import exception
@@ -223,7 +224,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase):
generated_url = (
console_utils.get_shellinabox_console_url(self.info['port']))
console_host = CONF.my_ip
- if netutils.is_valid_ipv6(console_host):
+ if ipaddress.ip_address(console_host).version == 6:
console_host = '[%s]' % console_host
http_url = "%s://%s:%s" % (scheme, console_host, self.info['port'])
self.assertEqual(http_url, generated_url)
@@ -668,7 +669,7 @@ class ConsoleUtilsTestCase(db_base.DbTestCase):
def test_allocate_port_success(self, mock_verify, mock_ports):
self.config(port_range='10000:10001', group='console')
port = console_utils.acquire_port()
- mock_verify.assert_called_once_with(10000)
+ mock_verify.assert_called_once_with(10000, host=None)
self.assertEqual(port, 10000)
mock_ports.add.assert_called_once_with(10000)
@@ -679,7 +680,9 @@ class ConsoleUtilsTestCase(db_base.DbTestCase):
mock_verify.side_effect = (exception.Conflict, exception.Conflict,
None)
port = console_utils.acquire_port()
- verify_calls = [mock.call(10000), mock.call(10001), mock.call(10002)]
+ verify_calls = [mock.call(10000, host=None),
+ mock.call(10001, host=None),
+ mock.call(10002, host=None)]
mock_verify.assert_has_calls(verify_calls)
self.assertEqual(port, 10002)
mock_ports.add.assert_called_once_with(10002)
@@ -691,5 +694,39 @@ class ConsoleUtilsTestCase(db_base.DbTestCase):
mock_verify.side_effect = exception.Conflict
self.assertRaises(exception.NoFreeIPMITerminalPorts,
console_utils.acquire_port)
- verify_calls = [mock.call(p) for p in range(10000, 10005)]
+ verify_calls = [mock.call(p, host=None) for p in range(10000, 10005)]
mock_verify.assert_has_calls(verify_calls)
+
+ @mock.patch.object(socket, 'socket', autospec=True)
+ def test__verify_port_default(self, mock_socket):
+ self.config(host='localhost.localdomain')
+ mock_sock = mock.MagicMock()
+ mock_socket.return_value = mock_sock
+ console_utils._verify_port(10000)
+ mock_sock.bind.assert_called_once_with(('localhost.localdomain',
+ 10000))
+
+ @mock.patch.object(socket, 'socket', autospec=True)
+ def test__verify_port_hostname(self, mock_socket):
+ mock_sock = mock.MagicMock()
+ mock_socket.return_value = mock_sock
+ console_utils._verify_port(10000, host='localhost.localdomain')
+ mock_socket.assert_called_once_with()
+ mock_sock.bind.assert_called_once_with(('localhost.localdomain',
+ 10000))
+
+ @mock.patch.object(socket, 'socket', autospec=True)
+ def test__verify_port_ipv4(self, mock_socket):
+ mock_sock = mock.MagicMock()
+ mock_socket.return_value = mock_sock
+ console_utils._verify_port(10000, host='1.2.3.4')
+ mock_socket.assert_called_once_with()
+ mock_sock.bind.assert_called_once_with(('1.2.3.4', 10000))
+
+ @mock.patch.object(socket, 'socket', autospec=True)
+ def test__verify_port_ipv6(self, mock_socket):
+ mock_sock = mock.MagicMock()
+ mock_socket.return_value = mock_sock
+ console_utils._verify_port(10000, host='2001:dead:beef::1')
+ mock_socket.assert_called_once_with(socket.AF_INET6)
+ mock_sock.bind.assert_called_once_with(('2001:dead:beef::1', 10000))
diff --git a/ironic/tests/unit/drivers/modules/test_deploy_utils.py b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
index a39b33a35..0307fd8d2 100644
--- a/ironic/tests/unit/drivers/modules/test_deploy_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_deploy_utils.py
@@ -16,9 +16,9 @@
import os
import tempfile
+from unittest import mock
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import uuidutils
@@ -582,6 +582,34 @@ class GetPxeBootConfigTestCase(db_base.DbTestCase):
result = utils.get_pxe_boot_file(self.node)
self.assertEqual('bios-bootfile', result)
+ def test_get_ipxe_boot_file(self):
+ self.config(ipxe_bootfile_name='meow', group='pxe')
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('meow', result)
+
+ def test_get_ipxe_boot_file_uefi(self):
+ self.config(uefi_ipxe_bootfile_name='ipxe-uefi-bootfile', group='pxe')
+ properties = {'capabilities': 'boot_mode:uefi'}
+ self.node.properties = properties
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('ipxe-uefi-bootfile', result)
+
+ def test_get_ipxe_boot_file_other_arch(self):
+ arch_names = {'aarch64': 'ipxe-aa64.efi',
+ 'x86_64': 'ipxe.kpxe'}
+ self.config(ipxe_bootfile_name_by_arch=arch_names, group='pxe')
+ properties = {'cpu_arch': 'aarch64', 'capabilities': 'boot_mode:uefi'}
+ self.node.properties = properties
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('ipxe-aa64.efi', result)
+
+ def test_get_ipxe_boot_file_fallback(self):
+ self.config(ipxe_bootfile_name=None, group='pxe')
+ self.config(uefi_ipxe_bootfile_name=None, group='pxe')
+ self.config(pxe_bootfile_name='lolcat', group='pxe')
+ result = utils.get_ipxe_boot_file(self.node)
+ self.assertEqual('lolcat', result)
+
def test_get_pxe_config_template_emtpy_property(self):
self.node.properties = {}
self.config(pxe_config_template_by_arch=self.template_by_arch,
@@ -597,6 +625,28 @@ class GetPxeBootConfigTestCase(db_base.DbTestCase):
result = utils.get_pxe_config_template(node)
self.assertEqual('fake-template', result)
+ def test_get_ipxe_config_template(self):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware')
+ self.assertIn('ipxe_config.template',
+ utils.get_ipxe_config_template(node))
+
+ def test_get_ipxe_config_template_none(self):
+ self.config(ipxe_config_template=None, group='pxe')
+ self.config(pxe_config_template='magical_bootloader',
+ group='pxe')
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware')
+ self.assertEqual('magical_bootloader',
+ utils.get_ipxe_config_template(node))
+
+ def test_get_ipxe_config_template_override_pxe_fallback(self):
+ node = obj_utils.create_test_node(
+ self.context, driver='fake-hardware',
+ driver_info={'pxe_template': 'magical'})
+ self.assertEqual('magical',
+ utils.get_ipxe_config_template(node))
+
@mock.patch('time.sleep', lambda sec: None)
class OtherFunctionTestCase(db_base.DbTestCase):
@@ -771,19 +821,9 @@ class OtherFunctionTestCase(db_base.DbTestCase):
mock_clean_up_caches.assert_called_once_with(None, 'master_dir',
[('uuid', 'path')])
- @mock.patch('ironic.common.keystone.get_auth')
- @mock.patch.object(utils, '_get_ironic_session')
- def test_get_ironic_api_url_from_config(self, mock_ks, mock_auth):
- mock_sess = mock.Mock()
- mock_ks.return_value = mock_sess
- fake_api_url = 'http://foo/'
- self.config(api_url=fake_api_url, group='conductor')
- # also checking for stripped trailing slash
- self.assertEqual(fake_api_url[:-1], utils.get_ironic_api_url())
-
- @mock.patch('ironic.common.keystone.get_auth')
- @mock.patch.object(utils, '_get_ironic_session')
- @mock.patch('ironic.common.keystone.get_adapter')
+ @mock.patch('ironic.common.keystone.get_auth', autospec=True)
+ @mock.patch.object(utils, '_get_ironic_session', autospec=True)
+ @mock.patch('ironic.common.keystone.get_adapter', autospec=True)
def test_get_ironic_api_url_from_keystone(self, mock_ka, mock_ks,
mock_auth):
mock_sess = mock.Mock()
@@ -791,7 +831,6 @@ class OtherFunctionTestCase(db_base.DbTestCase):
fake_api_url = 'http://foo/'
mock_ka.return_value.get_endpoint.return_value = fake_api_url
# NOTE(pas-ha) endpoint_override is None by default
- self.config(api_url=None, group='conductor')
url = utils.get_ironic_api_url()
# also checking for stripped trailing slash
self.assertEqual(fake_api_url[:-1], url)
@@ -799,26 +838,24 @@ class OtherFunctionTestCase(db_base.DbTestCase):
auth=mock_auth.return_value)
mock_ka.return_value.get_endpoint.assert_called_once_with()
- @mock.patch('ironic.common.keystone.get_auth')
- @mock.patch.object(utils, '_get_ironic_session')
- @mock.patch('ironic.common.keystone.get_adapter')
+ @mock.patch('ironic.common.keystone.get_auth', autospec=True)
+ @mock.patch.object(utils, '_get_ironic_session', autospec=True)
+ @mock.patch('ironic.common.keystone.get_adapter', autospec=True)
def test_get_ironic_api_url_fail(self, mock_ka, mock_ks, mock_auth):
mock_sess = mock.Mock()
mock_ks.return_value = mock_sess
mock_ka.return_value.get_endpoint.side_effect = (
exception.KeystoneFailure())
- self.config(api_url=None, group='conductor')
self.assertRaises(exception.InvalidParameterValue,
utils.get_ironic_api_url)
- @mock.patch('ironic.common.keystone.get_auth')
- @mock.patch.object(utils, '_get_ironic_session')
- @mock.patch('ironic.common.keystone.get_adapter')
+ @mock.patch('ironic.common.keystone.get_auth', autospec=True)
+ @mock.patch.object(utils, '_get_ironic_session', autospec=True)
+ @mock.patch('ironic.common.keystone.get_adapter', autospec=True)
def test_get_ironic_api_url_none(self, mock_ka, mock_ks, mock_auth):
mock_sess = mock.Mock()
mock_ks.return_value = mock_sess
mock_ka.return_value.get_endpoint.return_value = None
- self.config(api_url=None, group='conductor')
self.assertRaises(exception.InvalidParameterValue,
utils.get_ironic_api_url)
@@ -1115,7 +1152,7 @@ class AgentMethodsTestCase(db_base.DbTestCase):
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
@mock.patch.object(utils, 'build_agent_options', autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
- 'add_cleaning_network')
+ 'add_cleaning_network', autospec=True)
def _test_prepare_inband_cleaning(
self, add_cleaning_network_mock,
build_options_mock, power_mock, prepare_ramdisk_mock,
@@ -1127,7 +1164,8 @@ class AgentMethodsTestCase(db_base.DbTestCase):
self.assertEqual(
states.CLEANWAIT,
utils.prepare_inband_cleaning(task, manage_boot=manage_boot))
- add_cleaning_network_mock.assert_called_once_with(task)
+ add_cleaning_network_mock.assert_called_once_with(
+ task.driver.network, task)
if not fast_track:
power_mock.assert_called_once_with(task, states.REBOOT)
else:
@@ -1156,7 +1194,7 @@ class AgentMethodsTestCase(db_base.DbTestCase):
@mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
@mock.patch('ironic.drivers.modules.network.flat.FlatNetwork.'
- 'remove_cleaning_network')
+ 'remove_cleaning_network', autospec=True)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def _test_tear_down_inband_cleaning(
self, power_mock, remove_cleaning_network_mock,
@@ -1172,7 +1210,8 @@ class AgentMethodsTestCase(db_base.DbTestCase):
power_mock.assert_called_once_with(task, states.POWER_OFF)
else:
self.assertFalse(power_mock.called)
- remove_cleaning_network_mock.assert_called_once_with(task)
+ remove_cleaning_network_mock.assert_called_once_with(
+ task.driver.network, task)
if manage_boot:
clean_up_ramdisk_mock.assert_called_once_with(
task.driver.boot, task)
@@ -1192,13 +1231,13 @@ class AgentMethodsTestCase(db_base.DbTestCase):
self._test_tear_down_inband_cleaning(cleaning_error=True)
def test_build_agent_options_conf(self):
- self.config(api_url='https://api-url', group='conductor')
+ self.config(endpoint_override='https://api-url',
+ group='service_catalog')
options = utils.build_agent_options(self.node)
self.assertEqual('https://api-url', options['ipa-api-url'])
- @mock.patch.object(utils, '_get_ironic_session')
+ @mock.patch.object(utils, '_get_ironic_session', autospec=True)
def test_build_agent_options_keystone(self, session_mock):
- self.config(api_url=None, group='conductor')
sess = mock.Mock()
sess.get_endpoint.return_value = 'https://api-url'
session_mock.return_value = sess
@@ -1358,6 +1397,20 @@ class ValidateImagePropertiesTestCase(db_base.DbTestCase):
inst_info, ['kernel', 'ramdisk'])
self.assertEqual(expected_error, str(error))
+ def test_validate_image_properties_boot_iso_conflict(self):
+ instance_info = {
+ 'image_source': 'http://ubuntu',
+ 'boot_iso': 'http://ubuntu.iso',
+ }
+ expected_error = ("An 'image_source' and 'boot_iso' "
+ "parameter may not be specified at "
+ "the same time.")
+ error = self.assertRaises(exception.InvalidParameterValue,
+ utils.validate_image_properties,
+ self.context,
+ instance_info, [])
+ self.assertEqual(expected_error, str(error))
+
class ValidateParametersTestCase(db_base.DbTestCase):
@@ -2176,7 +2229,7 @@ class TestStorageInterfaceUtils(db_base.DbTestCase):
class InstanceImageCacheTestCase(db_base.DbTestCase):
- @mock.patch.object(fileutils, 'ensure_tree')
+ @mock.patch.object(fileutils, 'ensure_tree', autospec=True)
def test_with_master_path(self, mock_ensure_tree):
self.config(instance_master_path='/fake/path', group='pxe')
self.config(image_cache_size=500, group='pxe')
@@ -2188,7 +2241,7 @@ class InstanceImageCacheTestCase(db_base.DbTestCase):
self.assertEqual(500 * 1024 * 1024, cache._cache_size)
self.assertEqual(30 * 60, cache._cache_ttl)
- @mock.patch.object(fileutils, 'ensure_tree')
+ @mock.patch.object(fileutils, 'ensure_tree', autospec=True)
def test_without_master_path(self, mock_ensure_tree):
self.config(instance_master_path='', group='pxe')
self.config(image_cache_size=500, group='pxe')
diff --git a/ironic/tests/unit/drivers/modules/test_image_cache.py b/ironic/tests/unit/drivers/modules/test_image_cache.py
index bd57332fb..896a1900b 100644
--- a/ironic/tests/unit/drivers/modules/test_image_cache.py
+++ b/ironic/tests/unit/drivers/modules/test_image_cache.py
@@ -20,9 +20,9 @@ import datetime
import os
import tempfile
import time
+from unittest import mock
import uuid
-import mock
from oslo_utils import uuidutils
from ironic.common import exception
@@ -733,8 +733,12 @@ class TestFetchCleanup(base.TestCase):
@mock.patch.object(images, 'converted_size', autospec=True)
@mock.patch.object(images, 'fetch', autospec=True)
@mock.patch.object(images, 'image_to_raw', autospec=True)
+ @mock.patch.object(images, 'force_raw_will_convert', autospec=True,
+ return_value=True)
@mock.patch.object(image_cache, '_clean_up_caches', autospec=True)
- def test__fetch(self, mock_clean, mock_raw, mock_fetch, mock_size):
+ def test__fetch(
+ self, mock_clean, mock_will_convert, mock_raw, mock_fetch,
+ mock_size):
mock_size.return_value = 100
image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
mock_fetch.assert_called_once_with('fake', 'fake-uuid',
@@ -742,3 +746,22 @@ class TestFetchCleanup(base.TestCase):
mock_clean.assert_called_once_with('/foo', 100)
mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
'/foo/bar.part')
+ mock_will_convert.assert_called_once_with('fake-uuid', '/foo/bar.part')
+
+ @mock.patch.object(images, 'converted_size', autospec=True)
+ @mock.patch.object(images, 'fetch', autospec=True)
+ @mock.patch.object(images, 'image_to_raw', autospec=True)
+ @mock.patch.object(images, 'force_raw_will_convert', autospec=True,
+ return_value=False)
+ @mock.patch.object(image_cache, '_clean_up_caches', autospec=True)
+ def test__fetch_already_raw(
+ self, mock_clean, mock_will_convert, mock_raw, mock_fetch,
+ mock_size):
+ image_cache._fetch('fake', 'fake-uuid', '/foo/bar', force_raw=True)
+ mock_fetch.assert_called_once_with('fake', 'fake-uuid',
+ '/foo/bar.part', force_raw=False)
+ mock_clean.assert_not_called()
+ mock_size.assert_not_called()
+ mock_raw.assert_called_once_with('fake-uuid', '/foo/bar',
+ '/foo/bar.part')
+ mock_will_convert.assert_called_once_with('fake-uuid', '/foo/bar.part')
diff --git a/ironic/tests/unit/drivers/modules/test_inspect_utils.py b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
index c43e996ba..8b44dde5a 100644
--- a/ironic/tests/unit/drivers/modules/test_inspect_utils.py
+++ b/ironic/tests/unit/drivers/modules/test_inspect_utils.py
@@ -14,7 +14,8 @@
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/test_inspector.py b/ironic/tests/unit/drivers/modules/test_inspector.py
index 857fd5765..6b2313dab 100644
--- a/ironic/tests/unit/drivers/modules/test_inspector.py
+++ b/ironic/tests/unit/drivers/modules/test_inspector.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import eventlet
-import mock
import openstack
from ironic.common import context
@@ -227,6 +228,39 @@ class InspectHardwareTestCase(BaseTestCase):
self.assertFalse(self.driver.network.remove_inspection_network.called)
self.assertFalse(self.driver.boot.clean_up_ramdisk.called)
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url',
+ autospec=True)
+ def test_managed_fast_track(self, mock_ironic_url, mock_client):
+ CONF.set_override('fast_track', True, group='deploy')
+ CONF.set_override('extra_kernel_params',
+ 'ipa-inspection-collectors=default,logs '
+ 'ipa-collect-dhcp=1',
+ group='inspector')
+ endpoint = 'http://192.169.0.42:5050/v1'
+ mock_ironic_url.return_value = 'http://192.169.0.42:6385'
+ mock_client.return_value.get_endpoint.return_value = endpoint
+ mock_introspect = mock_client.return_value.start_introspection
+ self.iface.validate(self.task)
+ self.assertEqual(states.INSPECTWAIT,
+ self.iface.inspect_hardware(self.task))
+ mock_introspect.assert_called_once_with(self.node.uuid,
+ manage_boot=False)
+ self.driver.boot.prepare_ramdisk.assert_called_once_with(
+ self.task, ramdisk_params={
+ 'ipa-inspection-callback-url': endpoint + '/continue',
+ 'ipa-inspection-collectors': 'default,logs',
+ 'ipa-collect-dhcp': '1',
+ 'ipa-api-url': 'http://192.169.0.42:6385',
+ })
+ self.driver.network.add_inspection_network.assert_called_once_with(
+ self.task)
+ self.driver.power.set_power_state.assert_has_calls([
+ mock.call(self.task, states.POWER_OFF, timeout=None),
+ mock.call(self.task, states.POWER_ON, timeout=None),
+ ])
+ self.assertFalse(self.driver.network.remove_inspection_network.called)
+ self.assertFalse(self.driver.boot.clean_up_ramdisk.called)
+
@mock.patch.object(task_manager, 'acquire', autospec=True)
def test_managed_error(self, mock_acquire, mock_client):
endpoint = 'http://192.169.0.42:5050/v1'
diff --git a/ironic/tests/unit/drivers/modules/test_ipmitool.py b/ironic/tests/unit/drivers/modules/test_ipmitool.py
index 6f7b2c513..c13aae62a 100644
--- a/ironic/tests/unit/drivers/modules/test_ipmitool.py
+++ b/ironic/tests/unit/drivers/modules/test_ipmitool.py
@@ -28,10 +28,10 @@ import subprocess
import tempfile
import time
import types
+from unittest import mock
import fixtures
from ironic_lib import utils as ironic_utils
-import mock
from oslo_concurrency import processutils
from oslo_utils import uuidutils
@@ -172,7 +172,7 @@ class IPMIToolCheckInitTestCase(base.TestCase):
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_console_init_calls_for_socat(self, mock_check_dir, mock_support):
- with mock.patch.object(ipmi, 'TMP_DIR_CHECKED'):
+ with mock.patch.object(ipmi, 'TMP_DIR_CHECKED', autospec=True):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMISocatConsole()
@@ -184,7 +184,7 @@ class IPMIToolCheckInitTestCase(base.TestCase):
def test_console_init_calls_for_socat_already_checked(self,
mock_check_dir,
mock_support):
- with mock.patch.object(ipmi, 'TMP_DIR_CHECKED'):
+ with mock.patch.object(ipmi, 'TMP_DIR_CHECKED', autospec=True):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.IPMISocatConsole()
@@ -498,7 +498,8 @@ class IPMIToolPrivateMethodTestCase(
# BackoffLoopingCall, it multiplies default interval (equals to 1) by
# 2 * return_value, so if you want BackoffLoopingCall to "sleep" for
# 1 second, return_value should be 0.5.
- m = mock.patch.object(random.SystemRandom, 'gauss', return_value=0.5)
+ m = mock.patch.object(random.SystemRandom, 'gauss', return_value=0.5,
+ autospec=True)
m.start()
self.addCleanup(m.stop)
@@ -1036,18 +1037,96 @@ class IPMIToolPrivateMethodTestCase(
mock_support.return_value = True
mock_exec.return_value = (None, None)
+ self.config(use_ipmitool_retries=True, group='ipmi')
+
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
mock_exec.assert_called_once_with(*args)
- def test__exec_ipmitool_wait(self):
- mock_popen = mock.MagicMock()
- mock_popen.poll.side_effect = [1, 1, 1, 1, 1]
- ipmi._exec_ipmitool_wait(1, {'uuid': ''}, mock_popen)
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_with_ironic_retries(
+ self, mock_exec, mock_support):
+ args = [
+ 'ipmitool',
+ '-I', 'lanplus',
+ '-H', self.info['address'],
+ '-L', self.info['priv_level'],
+ '-U', self.info['username'],
+ '-v',
+ '-R', '1',
+ '-N', '5',
+ '-f', awesome_password_filename,
+ 'A', 'B', 'C',
+ ]
- self.assertTrue(mock_popen.terminate.called)
- self.assertTrue(mock_popen.kill.called)
+ mock_support.return_value = True
+ mock_exec.return_value = (None, None)
+
+ self.config(use_ipmitool_retries=False, group='ipmi')
+
+ ipmi._exec_ipmitool(self.info, 'A B C')
+
+ mock_support.assert_called_once_with('timing')
+ mock_exec.assert_called_once_with(*args)
+
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_with_timeout(
+ self, mock_exec, mock_support):
+ args = [
+ 'ipmitool',
+ '-I', 'lanplus',
+ '-H', self.info['address'],
+ '-L', self.info['priv_level'],
+ '-U', self.info['username'],
+ '-v',
+ '-R', '12',
+ '-N', '5',
+ '-f', awesome_password_filename,
+ 'A', 'B', 'C',
+ ]
+
+ mock_support.return_value = True
+ mock_exec.return_value = (None, None)
+
+ self.config(use_ipmitool_retries=True, group='ipmi')
+ ipmi._exec_ipmitool(self.info, 'A B C', kill_on_timeout=True)
+
+ mock_support.assert_called_once_with('timing')
+ mock_exec.assert_called_once_with(*args, timeout=60)
+
+ @mock.patch.object(ipmi, '_is_option_supported', autospec=True)
+ @mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
+ @mock.patch.object(utils, 'execute', autospec=True)
+ def test__exec_ipmitool_with_ironic_retries_multiple(
+ self, mock_exec, mock_support):
+
+ mock_exec.side_effect = [
+ processutils.ProcessExecutionError(
+ stderr="Unknown"
+ ),
+ processutils.ProcessExecutionError(
+ stderr="Unknown"
+ ),
+ processutils.ProcessExecutionError(
+ stderr="Unknown"
+ ),
+ ]
+
+ self.config(min_command_interval=1, group='ipmi')
+ self.config(command_retry_timeout=3, group='ipmi')
+ self.config(use_ipmitool_retries=False, group='ipmi')
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ ipmi._exec_ipmitool,
+ self.info, 'A B C')
+
+ mock_support.assert_called_once_with('timing')
+ self.assertEqual(3, mock_exec.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', _make_password_file_stub)
@@ -1243,6 +1322,8 @@ class IPMIToolPrivateMethodTestCase(
'A', 'B', 'C',
]
+ self.config(use_ipmitool_retries=True, group='ipmi')
+
mock_support.return_value = False
mock_exec.side_effect = processutils.ProcessExecutionError("x")
self.assertRaises(processutils.ProcessExecutionError,
@@ -2087,7 +2168,8 @@ class IPMIToolDriverTestCase(Base):
self.management.set_boot_device,
task, boot_devices.PXE)
- @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy')
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ autospec=True)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_set_boot_device_uefi(self, mock_exec,
mock_boot_mode):
@@ -2103,7 +2185,8 @@ class IPMIToolDriverTestCase(Base):
]
mock_exec.assert_has_calls(mock_calls)
- @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy')
+ @mock.patch.object(boot_mode_utils, 'get_boot_mode_for_deploy',
+ autospec=True)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_set_boot_device_uefi_and_persistent(
self, mock_exec, mock_boot_mode):
@@ -2546,7 +2629,7 @@ class IPMIToolDriverTestCase(Base):
with task_manager.acquire(self.context,
self.node.uuid) as task:
port = ipmi._allocate_port(task)
- mock_acquire.assert_called_once_with()
+ mock_acquire.assert_called_once_with(host=None)
self.assertEqual(port, 1234)
info = task.node.driver_internal_info
self.assertEqual(info['allocated_ipmi_terminal_port'], 1234)
@@ -2876,6 +2959,7 @@ class IPMIToolSocatDriverTestCase(IPMIToolShellinaboxTestCase):
autospec=True)
def test_start_console_alloc_port(self, mock_stop, mock_start, mock_info,
mock_alloc):
+ self.config(socat_address='2001:dead:beef::1', group='console')
mock_start.return_value = None
mock_info.return_value = {'port': None}
mock_alloc.return_value = 1234
@@ -2887,7 +2971,7 @@ class IPMIToolSocatDriverTestCase(IPMIToolShellinaboxTestCase):
mock_start.assert_called_once_with(
self.console, {'port': 1234},
console_utils.start_socat_console)
- mock_alloc.assert_called_once_with(mock.ANY)
+ mock_alloc.assert_called_once_with(mock.ANY, host='2001:dead:beef::1')
@mock.patch.object(ipmi.IPMISocatConsole, '_get_ipmi_cmd', autospec=True)
@mock.patch.object(console_utils, 'start_socat_console',
diff --git a/ironic/tests/unit/drivers/modules/test_ipxe.py b/ironic/tests/unit/drivers/modules/test_ipxe.py
index cbec1bb0d..d0bb625ff 100644
--- a/ironic/tests/unit/drivers/modules/test_ipxe.py
+++ b/ironic/tests/unit/drivers/modules/test_ipxe.py
@@ -16,8 +16,8 @@
"""Test class for iPXE driver."""
import os
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_serialization import jsonutils as json
from oslo_utils import uuidutils
@@ -83,7 +83,6 @@ class iPXEBootTestCase(db_base.DbTestCase):
driver_internal_info=self.driver_internal_info)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
- self.config(group='conductor', api_url='http://127.0.0.1:1234/')
def test_get_properties(self):
expected = pxe_base.COMMON_PROPERTIES
@@ -132,6 +131,30 @@ class iPXEBootTestCase(db_base.DbTestCase):
self.assertRaises(exception.MissingParameterValue,
task.driver.boot.validate, task)
+ @mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
+ return_value='ramdisk', autospec=True)
+ def test_validate_with_boot_iso(self, mock_boot_option, mock_glance):
+ i_info = self.node.driver_info
+ i_info['boot_iso'] = "http://localhost:1234/boot.iso"
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ task.driver.boot.validate(task)
+ self.assertTrue(mock_boot_option.called)
+ self.assertTrue(mock_glance.called)
+
+ def test_validate_with_boot_iso_and_image_source(self):
+ i_info = self.node.instance_info
+ i_info['image_source'] = "http://localhost:1234/image"
+ i_info['boot_iso'] = "http://localhost:1234/boot.iso"
+ self.node.instance_info = i_info
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid,
+ shared=True) as task:
+ self.assertRaises(exception.InvalidParameterValue,
+ task.driver.boot.validate,
+ task)
+
def test_validate_fail_missing_image_source(self):
info = dict(INST_INFO_DICT)
del info['image_source']
@@ -228,7 +251,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
# and refactored as we begin to separate PXE and iPXE interfaces.
@mock.patch.object(manager_utils, 'node_get_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory')
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@@ -310,14 +333,9 @@ class iPXEBootTestCase(db_base.DbTestCase):
mock_cache_r_k.assert_called_once_with(
task, {'rescue_kernel': 'a', 'rescue_ramdisk': 'r'},
ipxe_enabled=True)
- if uefi:
- mock_pxe_config.assert_called_once_with(
- task, {}, CONF.pxe.uefi_pxe_config_template,
- ipxe_enabled=True)
- else:
- mock_pxe_config.assert_called_once_with(
- task, {}, CONF.pxe.pxe_config_template,
- ipxe_enabled=True)
+ mock_pxe_config.assert_called_once_with(
+ task, {}, CONF.pxe.ipxe_config_template,
+ ipxe_enabled=True)
def test_prepare_ramdisk(self):
self.node.provision_state = states.DEPLOYING
@@ -660,7 +678,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
boot_devices.PXE,
persistent=True)
- @mock.patch('os.path.isfile', return_value=False)
+ @mock.patch('os.path.isfile', return_value=False, autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@@ -700,7 +718,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
ipxe_enabled=True)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
- task, mock.ANY, CONF.pxe.pxe_config_template,
+ task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
@@ -709,7 +727,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory')
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_missing_root_uuid(
@@ -746,7 +764,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(pxe_base.LOG, 'warning', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory')
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_whole_disk_image_missing_root_uuid(
@@ -817,7 +835,7 @@ class iPXEBootTestCase(db_base.DbTestCase):
self.assertFalse(cache_mock.called)
provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
create_pxe_config_mock.assert_called_once_with(
- task, mock.ANY, CONF.pxe.pxe_config_template,
+ task, mock.ANY, CONF.pxe.ipxe_config_template,
ipxe_enabled=True)
switch_pxe_config_mock.assert_called_once_with(
pxe_config_path, None, boot_modes.LEGACY_BIOS, False,
@@ -826,6 +844,52 @@ class iPXEBootTestCase(db_base.DbTestCase):
boot_devices.PXE,
persistent=True)
+ @mock.patch('os.path.isfile', lambda filename: False)
+ @mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
+ @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
+ @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
+ @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
+ @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
+ def test_prepare_instance_netboot_ramdisk(
+ self, get_image_info_mock, cache_mock,
+ dhcp_factory_mock, switch_pxe_config_mock,
+ set_boot_device_mock, create_pxe_config_mock):
+ http_url = 'http://192.1.2.3:1234'
+ self.config(http_url=http_url, group='deploy')
+ provider_mock = mock.MagicMock()
+ dhcp_factory_mock.return_value = provider_mock
+ self.node.instance_info = {'boot_iso': 'http://1.2.3.4:1234/boot.iso',
+ 'capabilities': {'boot_option': 'ramdisk'}}
+ image_info = {'kernel': ('', '/path/to/kernel'),
+ 'deploy_kernel': ('', '/path/to/kernel'),
+ 'ramdisk': ('', '/path/to/ramdisk'),
+ 'deploy_ramdisk': ('', '/path/to/ramdisk')}
+ get_image_info_mock.return_value = image_info
+ self.node.provision_state = states.DEPLOYING
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ print(task.node)
+ dhcp_opts = pxe_utils.dhcp_options_for_instance(task,
+ ipxe_enabled=True)
+ dhcp_opts += pxe_utils.dhcp_options_for_instance(
+ task, ipxe_enabled=True, ip_version=6)
+ pxe_config_path = pxe_utils.get_pxe_config_file_path(
+ task.node.uuid, ipxe_enabled=True)
+ task.driver.boot.prepare_instance(task)
+ self.assertTrue(get_image_info_mock.called)
+ self.assertTrue(cache_mock.called)
+ provider_mock.update_dhcp.assert_called_once_with(task, dhcp_opts)
+ create_pxe_config_mock.assert_called_once_with(
+ task, mock.ANY, CONF.pxe.ipxe_config_template,
+ ipxe_enabled=True)
+ switch_pxe_config_mock.assert_called_once_with(
+ pxe_config_path, None, boot_modes.LEGACY_BIOS, False,
+ ipxe_enabled=True, iscsi_boot=False, ramdisk_boot=True)
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.PXE,
+ persistent=True)
+
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
def test_prepare_instance_localboot(self, clean_up_pxe_config_mock,
@@ -858,6 +922,41 @@ class iPXEBootTestCase(db_base.DbTestCase):
task, ipxe_enabled=True)
self.assertFalse(set_boot_device_mock.called)
+ @mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
+ @mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
+ @mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
+ @mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
+ @mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
+ def test_prepare_instance_localboot_with_fallback(
+ self, get_image_info_mock, cache_mock,
+ dhcp_factory_mock, switch_pxe_config_mock,
+ clean_up_pxe_config_mock, set_boot_device_mock):
+ self.config(enable_netboot_fallback=True, group='pxe')
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.node.instance_info = task.node.instance_info
+ task.node.instance_info['capabilities'] = {'boot_option': 'local'}
+ task.node.driver_internal_info['root_uuid_or_disk_id'] = (
+ "30212642-09d3-467f-8e09-21685826ab50")
+ task.node.driver_internal_info['is_whole_disk_image'] = False
+ pxe_config_path = pxe_utils.get_pxe_config_file_path(
+ task.node.uuid, ipxe_enabled=True)
+
+ task.driver.boot.prepare_instance(task)
+
+ set_boot_device_mock.assert_called_once_with(task,
+ boot_devices.DISK,
+ persistent=True)
+ switch_pxe_config_mock.assert_called_once_with(
+ pxe_config_path, "30212642-09d3-467f-8e09-21685826ab50",
+ 'bios', True, False, False, False, ipxe_enabled=True)
+ # No clean up
+ self.assertFalse(clean_up_pxe_config_mock.called)
+ # No netboot configuration beyond the PXE files
+ self.assertFalse(get_image_info_mock.called)
+ self.assertFalse(cache_mock.called)
+ self.assertFalse(dhcp_factory_mock.return_value.update_dhcp.called)
+
@mock.patch.object(pxe_utils, 'clean_up_pxe_env', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_clean_up_instance(self, get_image_info_mock,
diff --git a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py
index e723b3518..b6eb12771 100644
--- a/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py
+++ b/ironic/tests/unit/drivers/modules/test_iscsi_deploy.py
@@ -19,10 +19,10 @@ import os
import tempfile
import time
import types
+from unittest import mock
from ironic_lib import disk_utils
from ironic_lib import utils as ironic_utils
-import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
@@ -348,8 +348,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'node': self.node.uuid,
'params': log_params,
}
- uuid_dict_returned = {'root uuid': '12345678-87654321'}
- deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': '12345678-87654321'}
+ deploy_mock.return_value = deployment_uuids
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
@@ -362,7 +362,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertIsNone(task.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
- self.assertEqual(uuid_dict_returned, retval)
+ self.assertEqual(deployment_uuids, retval)
mock_disk_layout.assert_called_once_with(task.node, mock.ANY)
@mock.patch.object(iscsi_deploy, 'LOG', autospec=True)
@@ -392,8 +392,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'node': self.node.uuid,
'params': log_params,
}
- uuid_dict_returned = {'disk identifier': '87654321'}
- deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'disk identifier': '87654321'}
+ deploy_mock.return_value = deployment_uuids
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
@@ -406,7 +406,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertIsNone(task.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
- self.assertEqual(uuid_dict_returned, retval)
+ self.assertEqual(deployment_uuids, retval)
def _test_get_deploy_info(self, extra_instance_info=None):
if extra_instance_info is None:
@@ -489,8 +489,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
driver_internal_info = {'agent_url': 'http://1.2.3.4:1234'}
self.node.driver_internal_info = driver_internal_info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- continue_deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ continue_deploy_mock.return_value = deployment_uuids
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
with task_manager.acquire(self.context, self.node.uuid,
@@ -504,7 +504,7 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertEqual(
'some-root-uuid',
task.node.driver_internal_info['root_uuid_or_disk_id'])
- self.assertEqual(ret_val, uuid_dict_returned)
+ self.assertEqual(ret_val, deployment_uuids)
@mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
def test_do_agent_iscsi_deploy_preserve_ephemeral(self,
@@ -517,8 +517,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
'agent_url': 'http://1.2.3.4:1234'}
self.node.driver_internal_info = driver_internal_info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- continue_deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ continue_deploy_mock.return_value = deployment_uuids
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
with task_manager.acquire(self.context, self.node.uuid,
@@ -553,7 +553,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertIsNotNone(self.node.last_error)
mock_collect_logs.assert_called_once_with(task.node)
- @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url')
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url',
+ autospec=True)
def test_validate_good_api_url(self, mock_get_url):
mock_get_url.return_value = 'http://127.0.0.1:1234'
with task_manager.acquire(self.context, self.node.uuid,
@@ -561,7 +562,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
iscsi_deploy.validate(task)
mock_get_url.assert_called_once_with()
- @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url')
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url',
+ autospec=True)
def test_validate_fail_no_api_url(self, mock_get_url):
mock_get_url.side_effect = exception.InvalidParameterValue('Ham!')
with task_manager.acquire(self.context, self.node.uuid,
@@ -570,7 +572,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
iscsi_deploy.validate, task)
mock_get_url.assert_called_once_with()
- @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url')
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url',
+ autospec=True)
def test_validate_invalid_root_device_hints(self, mock_get_url):
mock_get_url.return_value = 'http://spam.ham/baremetal'
with task_manager.acquire(self.context, self.node.uuid,
@@ -579,7 +582,8 @@ class IscsiDeployMethodsTestCase(db_base.DbTestCase):
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.validate, task)
- @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url')
+ @mock.patch('ironic.drivers.modules.deploy_utils.get_ironic_api_url',
+ autospec=True)
def test_validate_invalid_root_device_hints_iinfo(self, mock_get_url):
mock_get_url.return_value = 'http://spam.ham/baremetal'
with task_manager.acquire(self.context, self.node.uuid,
@@ -612,7 +616,8 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
props = task.driver.deploy.get_properties()
- self.assertEqual(['deploy_forces_oob_reboot'], list(props))
+ self.assertEqual({'agent_verify_ca', 'deploy_forces_oob_reboot'},
+ set(props))
@mock.patch.object(iscsi_deploy, 'validate', autospec=True)
@mock.patch.object(deploy_utils, 'validate_capabilities', autospec=True)
@@ -752,10 +757,13 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
@mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'attach_volumes',
autospec=True)
- @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info')
- @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk')
- @mock.patch.object(deploy_utils, 'build_agent_options')
- @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy')
+ @mock.patch.object(deploy_utils, 'populate_storage_driver_internal_info',
+ autospec=True)
+ @mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
+ @mock.patch.object(deploy_utils, 'build_agent_options',
+ autospec=True)
+ @mock.patch.object(deploy_utils, 'build_instance_info_for_deploy',
+ autospec=True)
@mock.patch.object(flat_network.FlatNetwork, 'add_provisioning_network',
spec_set=True, autospec=True)
@mock.patch.object(flat_network.FlatNetwork,
@@ -831,54 +839,31 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.assertNotIn(
'deployment_reboot', task.node.driver_internal_info)
+ @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'configure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'remove_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot,
- 'prepare_instance',
- spec_set=True, autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
- @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True)
- def test_deploy_storage_check_write_image_false(self,
- mock_cache_instance_image,
- mock_check_image_size,
- mock_node_power_action,
- mock_prepare_instance,
- mock_remove_network,
- mock_tenant_network,
- mock_write):
+ def test_deploy_storage_should_write_image_false(
+ self, mock_write, mock_node_power_action):
mock_write.return_value = False
self.node.provision_state = states.DEPLOYING
self.node.deploy_step = {
'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
self.node.save()
- with task_manager.acquire(self.context,
- self.node.uuid, shared=False) as task:
+ with task_manager.acquire(
+ self.context, self.node.uuid, shared=False) as task:
ret = task.driver.deploy.deploy(task)
self.assertIsNone(ret)
- self.assertFalse(mock_cache_instance_image.called)
- self.assertFalse(mock_check_image_size.called)
- mock_remove_network.assert_called_once_with(mock.ANY, task)
- mock_tenant_network.assert_called_once_with(mock.ANY, task)
- mock_prepare_instance.assert_called_once_with(mock.ANY, task)
- self.assertEqual(2, mock_node_power_action.call_count)
- self.assertEqual(states.DEPLOYING, task.node.provision_state)
+ self.assertFalse(mock_node_power_action.called)
@mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
@mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True)
- @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'continue_deploy',
+ @mock.patch.object(iscsi_deploy.ISCSIDeploy, 'write_image',
autospec=True)
@mock.patch('ironic.conductor.utils.is_fast_track', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
def test_deploy_fast_track(self, power_mock, mock_pxe_instance,
- mock_is_fast_track, continue_deploy_mock,
+ mock_is_fast_track, write_image_mock,
cache_image_mock, check_image_size_mock):
mock_is_fast_track.return_value = True
self.node.target_provision_state = states.ACTIVE
@@ -889,16 +874,17 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.node.save()
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
- task.driver.deploy.deploy(task)
+ result = task.driver.deploy.deploy(task)
+ self.assertIsNone(result)
self.assertFalse(power_mock.called)
self.assertFalse(mock_pxe_instance.called)
task.node.refresh()
- self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
+ self.assertEqual(states.DEPLOYING, task.node.provision_state)
self.assertEqual(states.ACTIVE,
task.node.target_provision_state)
cache_image_mock.assert_called_with(mock.ANY, task.node)
check_image_size_mock.assert_called_with(task)
- continue_deploy_mock.assert_called_with(mock.ANY, task)
+ self.assertFalse(write_image_mock.called)
@mock.patch.object(noop_storage.NoopStorage, 'detach_volumes',
autospec=True)
@@ -932,8 +918,10 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.node.uuid, shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider',
+ autospec=True)
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp',
+ autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_instance', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
@mock.patch.object(deploy_utils, 'destroy_images', autospec=True)
@@ -949,7 +937,7 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
clean_up_instance_mock.assert_called_once_with(
task.driver.boot, task)
set_dhcp_provider_mock.assert_called_once_with()
- clean_dhcp_mock.assert_called_once_with(task)
+ clean_dhcp_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning(self, prepare_inband_cleaning_mock):
@@ -995,90 +983,96 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
agent_execute_clean_step_mock.assert_called_once_with(
task, {'some-step': 'step-info'}, 'clean')
- @mock.patch.object(agent_base.AgentDeployMixin,
- 'reboot_and_finish_deploy', autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
- def test_continue_deploy_netboot(self, do_agent_iscsi_deploy_mock,
- reboot_and_finish_deploy_mock):
+ def test_write_image(self, do_agent_iscsi_deploy_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'netboot'}}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ do_agent_iscsi_deploy_mock.return_value = deployment_uuids
+ self.node.save()
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.deploy.write_image(task)
+ do_agent_iscsi_deploy_mock.assert_called_once_with(
+ task, task.driver.deploy._client)
+ self.assertEqual(
+ task.node.driver_internal_info['deployment_uuids'],
+ deployment_uuids)
+
+ @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
+ autospec=True)
+ @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
+ def test_write_image_bfv(self, do_agent_iscsi_deploy_mock,
+ should_write_image_mock):
+ should_write_image_mock.return_value = False
+ with task_manager.acquire(self.context, self.node.uuid) as task:
+ task.driver.deploy.write_image(task)
+ self.assertFalse(do_agent_iscsi_deploy_mock.called)
+
+ def test_prepare_instance_boot_netboot(self):
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
+ self.node.instance_info = {
+ 'capabilities': {'boot_option': 'netboot'}}
+ self.node.provision_state = states.DEPLOYWAIT
+ self.node.target_provision_state = states.ACTIVE
+ info = self.node.driver_internal_info
+ info['deployment_uuids'] = deployment_uuids
+ self.node.driver_internal_info = info
self.node.save()
+
with task_manager.acquire(self.context, self.node.uuid) as task:
- with mock.patch.object(
- task.driver.boot, 'prepare_instance') as m_prep_instance:
- task.driver.deploy.continue_deploy(task)
- do_agent_iscsi_deploy_mock.assert_called_once_with(
- task, task.driver.deploy._client)
- reboot_and_finish_deploy_mock.assert_called_once_with(
- mock.ANY, task)
+ with mock.patch.object(task.driver.boot,
+ 'prepare_instance',
+ autospec=True) as m_prep_instance:
+ task.driver.deploy.prepare_instance_boot(task)
m_prep_instance.assert_called_once_with(task)
@mock.patch.object(fake.FakeManagement, 'set_boot_device', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
- 'reboot_and_finish_deploy', autospec=True)
- @mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
- @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
- def test_continue_deploy_localboot(self, do_agent_iscsi_deploy_mock,
- configure_local_boot_mock,
- reboot_and_finish_deploy_mock,
- set_boot_device_mock):
+ def test_prepare_instance_boot_localboot(self, configure_local_boot_mock,
+ set_boot_device_mock):
- self.node.instance_info = {
- 'capabilities': {'boot_option': 'local'}}
+ deployment_uuids = {'root uuid': 'some-root-uuid'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
+ info = self.node.driver_internal_info
+ info['deployment_uuids'] = deployment_uuids
+ self.node.driver_internal_info = info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid'}
- do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid) as task:
- task.driver.deploy.continue_deploy(task)
- do_agent_iscsi_deploy_mock.assert_called_once_with(
- task, task.driver.deploy._client)
+ task.driver.deploy.prepare_instance_boot(task)
configure_local_boot_mock.assert_called_once_with(
task.driver.deploy, task, root_uuid='some-root-uuid',
efi_system_part_uuid=None, prep_boot_part_uuid=None)
- reboot_and_finish_deploy_mock.assert_called_once_with(
- task.driver.deploy, task)
set_boot_device_mock.assert_called_once_with(
mock.ANY, task, device=boot_devices.DISK, persistent=True)
@mock.patch.object(fake.FakeManagement, 'set_boot_device', autospec=True)
@mock.patch.object(agent_base.AgentDeployMixin,
- 'reboot_and_finish_deploy', autospec=True)
- @mock.patch.object(agent_base.AgentDeployMixin,
'configure_local_boot', autospec=True)
- @mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
- def test_continue_deploy_localboot_uefi(self, do_agent_iscsi_deploy_mock,
- configure_local_boot_mock,
- reboot_and_finish_deploy_mock,
- set_boot_device_mock):
-
+ def test_prepare_instance_boot_localboot_uefi(
+ self, configure_local_boot_mock, set_boot_device_mock):
+ deployment_uuids = {'root uuid': 'some-root-uuid',
+ 'efi system partition uuid': 'efi-part-uuid'}
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
+ info = self.node.driver_internal_info
+ info['deployment_uuids'] = deployment_uuids
+ self.node.driver_internal_info = info
self.node.save()
- uuid_dict_returned = {'root uuid': 'some-root-uuid',
- 'efi system partition uuid': 'efi-part-uuid'}
- do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid) as task:
- task.driver.deploy.continue_deploy(task)
- do_agent_iscsi_deploy_mock.assert_called_once_with(
- task, task.driver.deploy._client)
+ task.driver.deploy.prepare_instance_boot(task)
configure_local_boot_mock.assert_called_once_with(
task.driver.deploy, task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-part-uuid', prep_boot_part_uuid=None)
- reboot_and_finish_deploy_mock.assert_called_once_with(
- task.driver.deploy, task)
set_boot_device_mock.assert_called_once_with(
mock.ANY, task, device=boot_devices.DISK, persistent=True)
@@ -1157,49 +1151,6 @@ class ISCSIDeployTestCase(db_base.DbTestCase):
self.node.uuid, shared=False) as task:
self.assertEqual(0, len(task.volume_targets))
- @mock.patch.object(manager_utils, 'restore_power_state_if_needed',
- autospec=True)
- @mock.patch.object(manager_utils, 'power_on_node_if_needed', autospec=True)
- @mock.patch.object(noop_storage.NoopStorage, 'should_write_image',
- autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'configure_tenant_networks',
- spec_set=True, autospec=True)
- @mock.patch.object(flat_network.FlatNetwork,
- 'remove_provisioning_network',
- spec_set=True, autospec=True)
- @mock.patch.object(pxe.PXEBoot,
- 'prepare_instance',
- spec_set=True, autospec=True)
- @mock.patch.object(manager_utils, 'node_power_action', autospec=True)
- @mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
- @mock.patch.object(deploy_utils, 'cache_instance_image', autospec=True)
- def test_deploy_storage_check_write_image_false_with_smartnic_port(
- self, mock_cache_instance_image, mock_check_image_size,
- mock_node_power_action, mock_prepare_instance,
- mock_remove_network, mock_tenant_network, mock_write,
- power_on_node_if_needed_mock, restore_power_state_mock):
- mock_write.return_value = False
- self.node.provision_state = states.DEPLOYING
- self.node.deploy_step = {
- 'step': 'deploy', 'priority': 50, 'interface': 'deploy'}
- self.node.save()
- with task_manager.acquire(
- self.context, self.node.uuid, shared=False) as task:
- power_on_node_if_needed_mock.return_value = states.POWER_OFF
- ret = task.driver.deploy.deploy(task)
- self.assertIsNone(ret)
- self.assertFalse(mock_cache_instance_image.called)
- self.assertFalse(mock_check_image_size.called)
- mock_remove_network.assert_called_once_with(mock.ANY, task)
- mock_tenant_network.assert_called_once_with(mock.ANY, task)
- mock_prepare_instance.assert_called_once_with(mock.ANY, task)
- self.assertEqual(2, mock_node_power_action.call_count)
- self.assertEqual(states.DEPLOYING, task.node.provision_state)
- power_on_node_if_needed_mock.assert_called_once_with(task)
- restore_power_state_mock.assert_called_once_with(
- task, states.POWER_OFF)
-
# Cleanup of iscsi_deploy with pxe boot interface
class CleanUpFullFlowTestCase(db_base.DbTestCase):
@@ -1267,8 +1218,10 @@ class CleanUpFullFlowTestCase(db_base.DbTestCase):
os.link(self.master_instance_path, self.image_path)
dhcp_factory.DHCPFactory._dhcp_provider = None
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
- @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider',
+ autospec=True)
+ @mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp',
+ autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
def test_clean_up_with_master(self, mock_get_deploy_image_info,
@@ -1287,7 +1240,7 @@ class CleanUpFullFlowTestCase(db_base.DbTestCase):
mock_get_deploy_image_info.assert_called_with(
task.node, mode='deploy', ipxe_enabled=False)
set_dhcp_provider_mock.assert_called_once_with()
- clean_dhcp_mock.assert_called_once_with(task)
+ clean_dhcp_mock.assert_called_once_with(mock.ANY, task)
for path in ([self.kernel_path, self.image_path, self.config_path]
+ self.files):
self.assertFalse(os.path.exists(path),
diff --git a/ironic/tests/unit/drivers/modules/test_noop.py b/ironic/tests/unit/drivers/modules/test_noop.py
index f1db0bdf7..692b5aa04 100644
--- a/ironic/tests/unit/drivers/modules/test_noop.py
+++ b/ironic/tests/unit/drivers/modules/test_noop.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import stevedore
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/test_noop_mgmt.py b/ironic/tests/unit/drivers/modules/test_noop_mgmt.py
index c34b05fe3..78731e40e 100644
--- a/ironic/tests/unit/drivers/modules/test_noop_mgmt.py
+++ b/ironic/tests/unit/drivers/modules/test_noop_mgmt.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import boot_devices
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/test_pxe.py b/ironic/tests/unit/drivers/modules/test_pxe.py
index ed4fa2b63..79fb2952f 100644
--- a/ironic/tests/unit/drivers/modules/test_pxe.py
+++ b/ironic/tests/unit/drivers/modules/test_pxe.py
@@ -17,8 +17,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_serialization import jsonutils as json
from oslo_utils import timeutils
@@ -85,7 +85,6 @@ class PXEBootTestCase(db_base.DbTestCase):
driver_internal_info=self.driver_internal_info)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
- self.config(group='conductor', api_url='http://127.0.0.1:1234/')
self.config(my_ipv6='2001:db8::1')
def test_get_properties(self):
@@ -225,7 +224,7 @@ class PXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(manager_utils, 'node_get_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory')
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'get_image_info', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@@ -582,7 +581,7 @@ class PXEBootTestCase(db_base.DbTestCase):
boot_devices.PXE,
persistent=True)
- @mock.patch('os.path.isfile', return_value=False)
+ @mock.patch('os.path.isfile', return_value=False, autospec=True)
@mock.patch.object(pxe_utils, 'create_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
@@ -630,7 +629,7 @@ class PXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(deploy_utils, 'switch_pxe_config', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory')
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_netboot_missing_root_uuid(
@@ -665,7 +664,7 @@ class PXEBootTestCase(db_base.DbTestCase):
@mock.patch.object(pxe_base.LOG, 'warning', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
- @mock.patch.object(dhcp_factory, 'DHCPFactory')
+ @mock.patch.object(dhcp_factory, 'DHCPFactory', autospec=True)
@mock.patch.object(pxe_utils, 'cache_ramdisk_kernel', autospec=True)
@mock.patch.object(pxe_utils, 'get_instance_image_info', autospec=True)
def test_prepare_instance_whole_disk_image_missing_root_uuid(
diff --git a/ironic/tests/unit/drivers/modules/test_snmp.py b/ironic/tests/unit/drivers/modules/test_snmp.py
index 5563cfe91..ba6fd6bc4 100644
--- a/ironic/tests/unit/drivers/modules/test_snmp.py
+++ b/ironic/tests/unit/drivers/modules/test_snmp.py
@@ -17,8 +17,8 @@
"""Test class for SNMP power driver module."""
import time
+from unittest import mock
-import mock
from oslo_config import cfg
from pysnmp import error as snmp_error
from pysnmp import hlapi as pysnmp
@@ -1603,7 +1603,8 @@ class SNMPDeviceDriverTestCase(db_base.DbTestCase):
mock_client.get.assert_called_once_with(SNMPDriverAuto.SYS_OBJ_OID)
- @mock.patch.object(snmp.SNMPDriverAPCRackPDU, "_snmp_power_on")
+ @mock.patch.object(snmp.SNMPDriverAPCRackPDU, "_snmp_power_on",
+ autospec=True)
def test_snmp_auto_cache_supports_pdu_replacement(
self, broken_pdu_power_on_mock, mock_get_client):
@@ -1695,7 +1696,7 @@ class SNMPDriverTestCase(db_base.DbTestCase):
task.driver.power.get_power_state, task)
mock_driver.power_state.assert_called_once_with()
- @mock.patch.object(snmp.LOG, 'warning')
+ @mock.patch.object(snmp.LOG, 'warning', autospec=True)
def test_set_power_state_on(self, mock_log, mock_get_driver):
mock_driver = mock_get_driver.return_value
mock_driver.power_on.return_value = states.POWER_ON
@@ -1704,7 +1705,7 @@ class SNMPDriverTestCase(db_base.DbTestCase):
mock_driver.power_on.assert_called_once_with()
self.assertFalse(mock_log.called)
- @mock.patch.object(snmp.LOG, 'warning')
+ @mock.patch.object(snmp.LOG, 'warning', autospec=True)
def test_set_power_state_on_timeout(self, mock_log, mock_get_driver):
mock_driver = mock_get_driver.return_value
mock_driver.power_on.return_value = states.POWER_ON
@@ -1763,7 +1764,7 @@ class SNMPDriverTestCase(db_base.DbTestCase):
task, states.POWER_OFF)
mock_driver.power_off.assert_called_once_with()
- @mock.patch.object(snmp.LOG, 'warning')
+ @mock.patch.object(snmp.LOG, 'warning', autospec=True)
def test_reboot(self, mock_log, mock_get_driver):
mock_driver = mock_get_driver.return_value
mock_driver.power_reset.return_value = states.POWER_ON
@@ -1772,7 +1773,7 @@ class SNMPDriverTestCase(db_base.DbTestCase):
mock_driver.power_reset.assert_called_once_with()
self.assertFalse(mock_log.called)
- @mock.patch.object(snmp.LOG, 'warning')
+ @mock.patch.object(snmp.LOG, 'warning', autospec=True)
def test_reboot_timeout(self, mock_log, mock_get_driver):
mock_driver = mock_get_driver.return_value
mock_driver.power_reset.return_value = states.POWER_ON
diff --git a/ironic/tests/unit/drivers/modules/xclarity/test_common.py b/ironic/tests/unit/drivers/modules/xclarity/test_common.py
index a7253bf8d..b4667534d 100644
--- a/ironic/tests/unit/drivers/modules/xclarity/test_common.py
+++ b/ironic/tests/unit/drivers/modules/xclarity/test_common.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import importutils
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/modules/xclarity/test_management.py b/ironic/tests/unit/drivers/modules/xclarity/test_management.py
index d66da9b8f..f63933c90 100644
--- a/ironic/tests/unit/drivers/modules/xclarity/test_management.py
+++ b/ironic/tests/unit/drivers/modules/xclarity/test_management.py
@@ -15,8 +15,8 @@
import importlib
import sys
+from unittest import mock
-import mock
from oslo_utils import importutils
from ironic.common import boot_devices
@@ -61,7 +61,7 @@ class XClarityManagementDriverTestCase(db_base.DbTestCase):
self.assertEqual(expected, driver.get_properties())
@mock.patch.object(management.XClarityManagement, 'get_boot_device',
- return_value='pxe')
+ return_value='pxe', autospec=True)
def test_set_boot_device(self, mock_get_boot_device,
mock_get_xc_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -88,14 +88,15 @@ class XClarityManagementDriverTestCase(db_base.DbTestCase):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.BIOS,
boot_devices.DISK, boot_devices.CDROM]
- self.assertItemsEqual(
+ self.assertCountEqual(
expected,
task.driver.management.get_supported_boot_devices(task))
@mock.patch.object(
management.XClarityManagement,
'get_boot_device',
- return_value={'boot_device': 'pxe', 'persistent': False})
+ return_value={'boot_device': 'pxe', 'persistent': False},
+ autospec=True)
def test_get_boot_device(self, mock_get_boot_device, mock_get_xc_client):
reference = {'boot_device': 'pxe', 'persistent': False}
with task_manager.acquire(self.context, self.node.uuid) as task:
diff --git a/ironic/tests/unit/drivers/modules/xclarity/test_power.py b/ironic/tests/unit/drivers/modules/xclarity/test_power.py
index 86f1e3318..e12ee837a 100644
--- a/ironic/tests/unit/drivers/modules/xclarity/test_power.py
+++ b/ironic/tests/unit/drivers/modules/xclarity/test_power.py
@@ -15,8 +15,8 @@
import importlib
import sys
+from unittest import mock
-import mock
from oslo_utils import importutils
from ironic.common import exception
@@ -66,10 +66,10 @@ class XClarityPowerDriverTestCase(db_base.DbTestCase):
mock_validate_driver_info.assert_called_with(task.node)
@mock.patch.object(power.XClarityPower, 'get_power_state',
- return_value=STATE_POWER_ON)
+ return_value=STATE_POWER_ON, autospec=True)
def test_get_power_state(self, mock_get_power_state, mock_get_xc_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
- result = power.XClarityPower.get_power_state(task)
+ result = power.XClarityPower.get_power_state(self, task)
self.assertEqual(STATE_POWER_ON, result)
@mock.patch.object(common, 'translate_xclarity_power_state',
@@ -89,9 +89,9 @@ class XClarityPowerDriverTestCase(db_base.DbTestCase):
task)
self.assertFalse(mock_translate_state.called)
- @mock.patch.object(power.LOG, 'warning')
+ @mock.patch.object(power.LOG, 'warning', autospec=True)
@mock.patch.object(power.XClarityPower, 'get_power_state',
- return_value=states.POWER_ON)
+ return_value=states.POWER_ON, autospec=True)
def test_set_power(self, mock_set_power_state, mock_log,
mock_get_xc_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -100,9 +100,9 @@ class XClarityPowerDriverTestCase(db_base.DbTestCase):
self.assertEqual(expected, states.POWER_ON)
self.assertFalse(mock_log.called)
- @mock.patch.object(power.LOG, 'warning')
+ @mock.patch.object(power.LOG, 'warning', autospec=True)
@mock.patch.object(power.XClarityPower, 'get_power_state',
- return_value=states.POWER_ON)
+ return_value=states.POWER_ON, autospec=True)
def test_set_power_timeout(self, mock_set_power_state, mock_log,
mock_get_xc_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
@@ -126,19 +126,21 @@ class XClarityPowerDriverTestCase(db_base.DbTestCase):
task.driver.power.set_power_state,
task, states.POWER_OFF)
- @mock.patch.object(power.LOG, 'warning')
- @mock.patch.object(power.XClarityPower, 'set_power_state')
+ @mock.patch.object(power.LOG, 'warning', autospec=True)
+ @mock.patch.object(power.XClarityPower, 'set_power_state', autospec=True)
def test_reboot(self, mock_set_power_state, mock_log, mock_get_xc_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.power.reboot(task)
- mock_set_power_state.assert_called_with(task, states.REBOOT)
+ mock_set_power_state.assert_called_with(
+ mock.ANY, task, states.REBOOT)
self.assertFalse(mock_log.called)
- @mock.patch.object(power.LOG, 'warning')
- @mock.patch.object(power.XClarityPower, 'set_power_state')
+ @mock.patch.object(power.LOG, 'warning', autospec=True)
+ @mock.patch.object(power.XClarityPower, 'set_power_state', autospec=True)
def test_reboot_timeout(self, mock_set_power_state, mock_log,
mock_get_xc_client):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.power.reboot(task, timeout=55)
- mock_set_power_state.assert_called_with(task, states.REBOOT)
+ mock_set_power_state.assert_called_with(
+ mock.ANY, task, states.REBOOT)
self.assertTrue(mock_log.called)
diff --git a/ironic/tests/unit/drivers/test_base.py b/ironic/tests/unit/drivers/test_base.py
index 19fca2067..d1e33b9b1 100644
--- a/ironic/tests/unit/drivers/test_base.py
+++ b/ironic/tests/unit/drivers/test_base.py
@@ -14,8 +14,7 @@
# under the License.
import json
-
-import mock
+from unittest import mock
from ironic.common import components
from ironic.common import exception
diff --git a/ironic/tests/unit/drivers/test_drac.py b/ironic/tests/unit/drivers/test_drac.py
index 748c5e466..8a551070b 100644
--- a/ironic/tests/unit/drivers/test_drac.py
+++ b/ironic/tests/unit/drivers/test_drac.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from oslo_utils import uuidutils
+
from ironic.conductor import task_manager
from ironic.drivers.modules import agent
from ironic.drivers.modules import drac
@@ -42,7 +44,7 @@ class IDRACHardwareTestCase(db_base.DbTestCase):
'no-inspect'],
enabled_network_interfaces=['flat', 'neutron', 'noop'],
enabled_raid_interfaces=[
- 'idrac', 'idrac-wsman', 'no-raid'],
+ 'idrac', 'idrac-wsman', 'no-raid', 'agent'],
enabled_vendor_interfaces=[
'idrac', 'idrac-wsman', 'no-vendor'],
enabled_bios_interfaces=[
@@ -108,11 +110,14 @@ class IDRACHardwareTestCase(db_base.DbTestCase):
inspect=inspector.Inspector)
def test_override_with_raid(self):
- node = obj_utils.create_test_node(self.context, driver='idrac',
- raid_interface='no-raid')
- with task_manager.acquire(self.context, node.id) as task:
- self._validate_interfaces(task.driver,
- raid=noop.NoRAID)
+ for iface, impl in [('agent', agent.AgentRAID),
+ ('no-raid', noop.NoRAID)]:
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='idrac',
+ raid_interface=iface)
+ with task_manager.acquire(self.context, node.id) as task:
+ self._validate_interfaces(task.driver, raid=impl)
def test_override_no_vendor(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
diff --git a/ironic/tests/unit/drivers/test_generic.py b/ironic/tests/unit/drivers/test_generic.py
index c8475aad6..d1acb59b8 100644
--- a/ironic/tests/unit/drivers/test_generic.py
+++ b/ironic/tests/unit/drivers/test_generic.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import driver_factory
from ironic.common import exception
@@ -68,7 +68,8 @@ class ManualManagementHardwareTestCase(db_base.DbTestCase):
def test_get_properties(self):
# These properties are from vendor (agent) and boot (pxe) interfaces
expected_prop_keys = [
- 'deploy_forces_oob_reboot', 'deploy_kernel', 'deploy_ramdisk',
+ 'agent_verify_ca', 'deploy_forces_oob_reboot',
+ 'deploy_kernel', 'deploy_ramdisk',
'force_persistent_boot_device', 'rescue_kernel', 'rescue_ramdisk']
hardware_type = driver_factory.get_hardware_type("manual-management")
properties = hardware_type.get_properties()
diff --git a/ironic/tests/unit/drivers/test_ibmc.py b/ironic/tests/unit/drivers/test_ibmc.py
index 731311b54..7e1a9fe30 100644
--- a/ironic/tests/unit/drivers/test_ibmc.py
+++ b/ironic/tests/unit/drivers/test_ibmc.py
@@ -16,7 +16,9 @@
from ironic.conductor import task_manager
from ironic.drivers.modules.ibmc import management as ibmc_mgmt
from ironic.drivers.modules.ibmc import power as ibmc_power
+from ironic.drivers.modules.ibmc import raid as ibmc_raid
from ironic.drivers.modules.ibmc import vendor as ibmc_vendor
+from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
@@ -31,7 +33,9 @@ class IBMCHardwareTestCase(db_base.DbTestCase):
self.config(enabled_hardware_types=['ibmc'],
enabled_power_interfaces=['ibmc'],
enabled_management_interfaces=['ibmc'],
- enabled_vendor_interfaces=['ibmc'])
+ enabled_vendor_interfaces=['ibmc'],
+ enabled_raid_interfaces=['ibmc'],
+ enabled_inspect_interfaces=['inspector', 'no-inspect'])
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context, driver='ibmc')
@@ -41,7 +45,8 @@ class IBMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.power,
ibmc_power.IBMCPower)
self.assertIsInstance(task.driver.boot, pxe.PXEBoot)
- self.assertIsInstance(task.driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(task.driver.console, noop.NoConsole)
- self.assertIsInstance(task.driver.raid, noop.NoRAID)
+ self.assertIsInstance(task.driver.deploy, iscsi_deploy.ISCSIDeploy)
+ self.assertIsInstance(task.driver.raid, ibmc_raid.IbmcRAID)
self.assertIsInstance(task.driver.vendor, ibmc_vendor.IBMCVendor)
+ self.assertIsInstance(task.driver.inspect, inspector.Inspector)
diff --git a/ironic/tests/unit/drivers/test_ilo.py b/ironic/tests/unit/drivers/test_ilo.py
index ac719b763..3e8526436 100644
--- a/ironic/tests/unit/drivers/test_ilo.py
+++ b/ironic/tests/unit/drivers/test_ilo.py
@@ -16,10 +16,11 @@
Test class for iLO Drivers
"""
+from oslo_utils import uuidutils
+
from ironic.conductor import task_manager
from ironic.drivers import ilo
from ironic.drivers.modules import agent
-from ironic.drivers.modules.ilo import management
from ironic.drivers.modules.ilo import raid
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
@@ -187,16 +188,6 @@ class Ilo5HardwareTestCase(db_base.DbTestCase):
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context, driver='ilo5')
with task_manager.acquire(self.context, node.id) as task:
- self.assertIsInstance(task.driver.raid, raid.Ilo5RAID)
- self.assertIsInstance(task.driver.management,
- management.Ilo5Management)
-
- def test_override_with_no_raid(self):
- self.config(enabled_raid_interfaces=['no-raid', 'ilo5'])
- node = obj_utils.create_test_node(self.context, driver='ilo5',
- raid_interface='no-raid')
- with task_manager.acquire(self.context, node.id) as task:
- self.assertIsInstance(task.driver.raid, noop.NoRAID)
self.assertIsInstance(task.driver.boot,
ilo.boot.IloVirtualMediaBoot)
self.assertIsInstance(task.driver.console,
@@ -209,7 +200,19 @@ class Ilo5HardwareTestCase(db_base.DbTestCase):
ilo.management.IloManagement)
self.assertIsInstance(task.driver.power,
ilo.power.IloPower)
+ self.assertIsInstance(task.driver.raid, raid.Ilo5RAID)
self.assertIsInstance(task.driver.rescue,
noop.NoRescue)
self.assertIsInstance(task.driver.vendor,
ilo.vendor.VendorPassthru)
+
+ def test_override_raid(self):
+ self.config(enabled_raid_interfaces=['agent', 'no-raid', 'ilo5'])
+ for iface, impl in [('agent', agent.AgentRAID),
+ ('no-raid', noop.NoRAID)]:
+ node = obj_utils.create_test_node(self.context,
+ uuid=uuidutils.generate_uuid(),
+ driver='ilo5',
+ raid_interface=iface)
+ with task_manager.acquire(self.context, node.id) as task:
+ self.assertIsInstance(task.driver.raid, impl)
diff --git a/ironic/tests/unit/drivers/test_irmc.py b/ironic/tests/unit/drivers/test_irmc.py
index abfc12b42..b5f15246f 100644
--- a/ironic/tests/unit/drivers/test_irmc.py
+++ b/ironic/tests/unit/drivers/test_irmc.py
@@ -16,6 +16,8 @@
Test class for iRMC Deploy Driver
"""
+from unittest import mock
+
from ironic.conductor import task_manager
from ironic.drivers import irmc
from ironic.drivers.modules import agent
@@ -23,6 +25,7 @@ from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules import ipxe
from ironic.drivers.modules.irmc import bios as irmc_bios
+from ironic.drivers.modules.irmc import boot as irmc_boot
from ironic.drivers.modules.irmc import raid
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
@@ -30,11 +33,11 @@ from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
+@mock.patch.object(irmc_boot, 'check_share_fs_mounted', spec_set=True,
+ autospec=True)
class IRMCHardwareTestCase(db_base.DbTestCase):
def setUp(self):
- irmc.boot.check_share_fs_mounted_patcher.start()
- self.addCleanup(irmc.boot.check_share_fs_mounted_patcher.stop)
super(IRMCHardwareTestCase, self).setUp()
self.config_temp_dir('http_root', group='deploy')
self.config(enabled_hardware_types=['irmc'],
@@ -48,7 +51,7 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
enabled_rescue_interfaces=['no-rescue', 'agent'],
enabled_bios_interfaces=['irmc', 'no-bios', 'fake'])
- def test_default_interfaces(self):
+ def test_default_interfaces(self, check_share_fs_mounted_mock):
node = obj_utils.create_test_node(self.context, driver='irmc')
with task_manager.acquire(self.context, node.id) as task:
self.assertIsInstance(task.driver.boot,
@@ -70,7 +73,7 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.bios,
irmc_bios.IRMCBIOS)
- def test_override_with_inspector(self):
+ def test_override_with_inspector(self, check_share_fs_mounted_mock):
self.config(enabled_inspect_interfaces=['inspector', 'irmc'])
node = obj_utils.create_test_node(
self.context, driver='irmc',
@@ -95,7 +98,7 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.rescue,
noop.NoRescue)
- def test_override_with_agent_rescue(self):
+ def test_override_with_agent_rescue(self, check_share_fs_mounted_mock):
node = obj_utils.create_test_node(
self.context, driver='irmc',
deploy_interface='direct',
@@ -119,7 +122,7 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.rescue,
agent.AgentRescue)
- def test_override_with_ipmitool_power(self):
+ def test_override_with_ipmitool_power(self, check_share_fs_mounted_mock):
node = obj_utils.create_test_node(
self.context, driver='irmc', power_interface='ipmitool')
with task_manager.acquire(self.context, node.id) as task:
@@ -140,7 +143,8 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.rescue,
noop.NoRescue)
- def test_override_with_raid_configuration(self):
+ def test_override_with_raid_configuration(self,
+ check_share_fs_mounted_mock):
node = obj_utils.create_test_node(
self.context, driver='irmc',
deploy_interface='direct',
@@ -164,7 +168,8 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.rescue,
agent.AgentRescue)
- def test_override_with_bios_configuration(self):
+ def test_override_with_bios_configuration(self,
+ check_share_fs_mounted_mock):
node = obj_utils.create_test_node(
self.context, driver='irmc',
deploy_interface='direct',
@@ -188,7 +193,8 @@ class IRMCHardwareTestCase(db_base.DbTestCase):
self.assertIsInstance(task.driver.rescue,
agent.AgentRescue)
- def test_override_with_boot_configuration(self):
+ def test_override_with_boot_configuration(self,
+ check_share_fs_mounted_mock):
node = obj_utils.create_test_node(
self.context, driver='irmc',
boot_interface='ipxe')
diff --git a/ironic/tests/unit/drivers/test_snmp.py b/ironic/tests/unit/drivers/test_snmp.py
index e2b941e48..10692383c 100644
--- a/ironic/tests/unit/drivers/test_snmp.py
+++ b/ironic/tests/unit/drivers/test_snmp.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.conductor import task_manager
from ironic.drivers.modules import fake
diff --git a/ironic/tests/unit/drivers/test_utils.py b/ironic/tests/unit/drivers/test_utils.py
index b09915704..44e173c6b 100644
--- a/ironic/tests/unit/drivers/test_utils.py
+++ b/ironic/tests/unit/drivers/test_utils.py
@@ -15,8 +15,8 @@
import datetime
import os
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import timeutils
@@ -252,7 +252,16 @@ class UtilsRamdiskLogsTestCase(tests_base.TestCase):
logs = 'Gary the Snail'
mock_collect.return_value = {'command_result': {'system_logs': logs}}
driver_utils.collect_ramdisk_logs(self.node)
- mock_store.assert_called_once_with(self.node, logs)
+ mock_store.assert_called_once_with(self.node, logs, label=None)
+
+ @mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
+ @mock.patch.object(agent_client.AgentClient,
+ 'collect_system_logs', autospec=True)
+ def test_collect_ramdisk_logs_with_label(self, mock_collect, mock_store):
+ logs = 'Gary the Snail'
+ mock_collect.return_value = {'command_result': {'system_logs': logs}}
+ driver_utils.collect_ramdisk_logs(self.node, label='logs')
+ mock_store.assert_called_once_with(self.node, logs, label='logs')
@mock.patch.object(driver_utils.LOG, 'error', autospec=True)
@mock.patch.object(driver_utils, 'store_ramdisk_logs', autospec=True)
@@ -286,7 +295,7 @@ class UtilsRamdiskLogsTestCase(tests_base.TestCase):
logs = 'Gary the Snail'
mock_collect.return_value = {'command_result': {'system_logs': logs}}
driver_utils.collect_ramdisk_logs(self.node)
- mock_store.assert_called_once_with(self.node, logs)
+ mock_store.assert_called_once_with(self.node, logs, label=None)
@mock.patch.object(driver_utils.LOG, 'exception', autospec=True)
def test_collect_ramdisk_logs_storage_fail_fs(self, mock_log):
diff --git a/ironic/tests/unit/drivers/third_party_driver_mocks.py b/ironic/tests/unit/drivers/third_party_driver_mocks.py
index 4dcd45e51..547b41c4a 100644
--- a/ironic/tests/unit/drivers/third_party_driver_mocks.py
+++ b/ironic/tests/unit/drivers/third_party_driver_mocks.py
@@ -31,8 +31,8 @@ Current list of mocked libraries:
import importlib
import sys
+from unittest import mock
-import mock
from oslo_utils import importutils
from ironic.drivers.modules import ipmitool
@@ -173,9 +173,6 @@ if 'ironic.drivers.modules.irmc' in sys.modules:
irmc_boot = importutils.import_module(
'ironic.drivers.modules.irmc.boot')
irmc_boot.check_share_fs_mounted_orig = irmc_boot.check_share_fs_mounted
-irmc_boot.check_share_fs_mounted_patcher = mock.patch(
- 'ironic.drivers.modules.irmc.boot.check_share_fs_mounted')
-irmc_boot.check_share_fs_mounted_patcher.return_value = None
class MockKwargsException(Exception):
@@ -267,8 +264,8 @@ if not ibmc_client:
# Mock iBMC client exceptions
exceptions = mock.MagicMock()
- exceptions.ConnectionError = (
- type('ConnectionError', (MockKwargsException,), {}))
+ exceptions.IBMCConnectionError = (
+ type('IBMCConnectionError', (MockKwargsException,), {}))
exceptions.IBMCClientError = (
type('IBMCClientError', (MockKwargsException,), {}))
sys.modules['ibmc_client.exceptions'] = exceptions
diff --git a/ironic/tests/unit/objects/test_allocation.py b/ironic/tests/unit/objects/test_allocation.py
index 33fffddb5..84ec56b81 100644
--- a/ironic/tests/unit/objects/test_allocation.py
+++ b/ironic/tests/unit/objects/test_allocation.py
@@ -11,8 +11,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from testtools import matchers
from ironic.common import exception
diff --git a/ironic/tests/unit/objects/test_bios.py b/ironic/tests/unit/objects/test_bios.py
index 0d31ae4ec..57538fbbf 100644
--- a/ironic/tests/unit/objects/test_bios.py
+++ b/ironic/tests/unit/objects/test_bios.py
@@ -11,8 +11,7 @@
# under the License.
import types
-
-import mock
+from unittest import mock
from ironic.common import context
from ironic.db import api as dbapi
diff --git a/ironic/tests/unit/objects/test_chassis.py b/ironic/tests/unit/objects/test_chassis.py
index 1e3b26d31..aaa64b71d 100644
--- a/ironic/tests/unit/objects/test_chassis.py
+++ b/ironic/tests/unit/objects/test_chassis.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_utils import uuidutils
from testtools import matchers
diff --git a/ironic/tests/unit/objects/test_conductor.py b/ironic/tests/unit/objects/test_conductor.py
index 109a89edf..42010da0c 100644
--- a/ironic/tests/unit/objects/test_conductor.py
+++ b/ironic/tests/unit/objects/test_conductor.py
@@ -16,8 +16,8 @@
import datetime
import types
+from unittest import mock
-import mock
from oslo_utils import timeutils
from ironic.common import exception
diff --git a/ironic/tests/unit/objects/test_deploy_template.py b/ironic/tests/unit/objects/test_deploy_template.py
index 7a871ef4b..11863bcda 100644
--- a/ironic/tests/unit/objects/test_deploy_template.py
+++ b/ironic/tests/unit/objects/test_deploy_template.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import context
from ironic.db import api as dbapi
diff --git a/ironic/tests/unit/objects/test_fields.py b/ironic/tests/unit/objects/test_fields.py
index 02ce1222d..35cc050e6 100644
--- a/ironic/tests/unit/objects/test_fields.py
+++ b/ironic/tests/unit/objects/test_fields.py
@@ -13,9 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import hashlib
-import inspect
-
from ironic.common import exception
from ironic.objects import fields
from ironic.tests import base as test_base
@@ -73,8 +70,6 @@ class TestStringFieldThatAcceptsCallable(test_base.TestCase):
def test_default_function():
return "default value"
- self.test_default_function_hash = hashlib.md5(
- inspect.getsource(test_default_function).encode()).hexdigest()
self.field = fields.StringFieldThatAcceptsCallable(
default=test_default_function)
@@ -102,8 +97,8 @@ class TestStringFieldThatAcceptsCallable(test_base.TestCase):
self.field.coerce('obj', 'attr', None))
def test__repr__includes_default_function_name_and_source_hash(self):
- expected = ('StringAcceptsCallable(default=test_default_function-%s,'
- 'nullable=False)' % self.test_default_function_hash)
+ expected = ('StringAcceptsCallable(default=<function '
+ 'test_default_function>,nullable=False)')
self.assertEqual(expected, repr(self.field))
diff --git a/ironic/tests/unit/objects/test_node.py b/ironic/tests/unit/objects/test_node.py
index b027cff10..707d09e8d 100644
--- a/ironic/tests/unit/objects/test_node.py
+++ b/ironic/tests/unit/objects/test_node.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from testtools import matchers
@@ -171,7 +171,6 @@ class TestNodeObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
mock_update_node.assert_called_once_with(
uuid, {'properties': {"fake": "property"},
'driver': 'fake-driver',
- 'driver_internal_info': {},
'version': objects.Node.VERSION})
self.assertEqual(self.context, n._context)
res_updated_at = (n.updated_at).replace(tzinfo=None)
@@ -221,7 +220,6 @@ class TestNodeObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
{
'properties': {'fake': 'property'},
'driver': 'fake-driver',
- 'driver_internal_info': {},
'version': objects.Node.VERSION,
'maintenance_reason':
maintenance_reason[
diff --git a/ironic/tests/unit/objects/test_notification.py b/ironic/tests/unit/objects/test_notification.py
index e6e40ba82..82c2a8dd9 100644
--- a/ironic/tests/unit/objects/test_notification.py
+++ b/ironic/tests/unit/objects/test_notification.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import exception
from ironic.objects import base
diff --git a/ironic/tests/unit/objects/test_objects.py b/ironic/tests/unit/objects/test_objects.py
index 601915d87..ffdf375fd 100644
--- a/ironic/tests/unit/objects/test_objects.py
+++ b/ironic/tests/unit/objects/test_objects.py
@@ -15,9 +15,9 @@
import contextlib
import datetime
import types
+from unittest import mock
import iso8601
-import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as object_base
from oslo_versionedobjects import exception as object_exception
@@ -676,7 +676,7 @@ class TestObject(_LocalTest, _TestObject):
# version bump. It is an MD5 hash of the object fields and remotable methods.
# The fingerprint values should only be changed if there is a version bump.
expected_object_fingerprints = {
- 'Node': '1.34-ae873e627cf30bf28fe9f98a807b6200',
+ 'Node': '1.35-aee8ecf5c4d0ed590eb484762aee7fca',
'MyObj': '1.5-9459d30d6954bffc7a9afd347a807ca6',
'Chassis': '1.3-d656e039fd8ae9f34efc232ab3980905',
'Port': '1.9-0cb9202a4ec442e8c0d87a324155eaaf',
diff --git a/ironic/tests/unit/objects/test_port.py b/ironic/tests/unit/objects/test_port.py
index 32df1e52a..43c58876e 100644
--- a/ironic/tests/unit/objects/test_port.py
+++ b/ironic/tests/unit/objects/test_port.py
@@ -15,8 +15,8 @@
import datetime
import types
+from unittest import mock
-import mock
from oslo_config import cfg
from testtools import matchers
@@ -66,7 +66,7 @@ class TestPortObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
port = objects.Port.get(self.context, address)
- mock_get_port.assert_called_once_with(address, owner=None)
+ mock_get_port.assert_called_once_with(address, project=None)
self.assertEqual(self.context, port._context)
def test_get_bad_id_and_uuid_and_address(self):
@@ -146,6 +146,22 @@ class TestPortObject(db_base.DbTestCase, obj_utils.SchemasTestMixIn):
self.assertThat(ports, matchers.HasLength(1))
self.assertIsInstance(ports[0], objects.Port)
self.assertEqual(self.context, ports[0]._context)
+ mock_get_list.assert_called_once_with(
+ limit=None, marker=None, project=None, sort_dir=None,
+ sort_key=None)
+
+ def test_list_deprecated_owner(self):
+ with mock.patch.object(self.dbapi, 'get_port_list',
+ autospec=True) as mock_get_list:
+ mock_get_list.return_value = [self.fake_port]
+ ports = objects.Port.list(self.context,
+ owner='12345')
+ self.assertThat(ports, matchers.HasLength(1))
+ self.assertIsInstance(ports[0], objects.Port)
+ self.assertEqual(self.context, ports[0]._context)
+ mock_get_list.assert_called_once_with(
+ limit=None, marker=None, project='12345', sort_dir=None,
+ sort_key=None)
@mock.patch.object(obj_base.IronicObject, 'supports_version',
spec_set=types.FunctionType)
diff --git a/ironic/tests/unit/objects/test_portgroup.py b/ironic/tests/unit/objects/test_portgroup.py
index d69ac13e0..29bab20d0 100644
--- a/ironic/tests/unit/objects/test_portgroup.py
+++ b/ironic/tests/unit/objects/test_portgroup.py
@@ -11,8 +11,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from testtools import matchers
from ironic.common import exception
diff --git a/ironic/tests/unit/objects/test_trait.py b/ironic/tests/unit/objects/test_trait.py
index b64248af6..ba18f6ace 100644
--- a/ironic/tests/unit/objects/test_trait.py
+++ b/ironic/tests/unit/objects/test_trait.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from ironic.common import context
from ironic.db import api as dbapi
diff --git a/ironic/tests/unit/objects/test_volume_connector.py b/ironic/tests/unit/objects/test_volume_connector.py
index fad9b4a06..7030f4766 100644
--- a/ironic/tests/unit/objects/test_volume_connector.py
+++ b/ironic/tests/unit/objects/test_volume_connector.py
@@ -14,8 +14,8 @@
import datetime
import types
+from unittest import mock
-import mock
from testtools.matchers import HasLength
from ironic.common import exception
diff --git a/ironic/tests/unit/objects/test_volume_target.py b/ironic/tests/unit/objects/test_volume_target.py
index 2734d7d35..3882a368c 100644
--- a/ironic/tests/unit/objects/test_volume_target.py
+++ b/ironic/tests/unit/objects/test_volume_target.py
@@ -14,8 +14,8 @@
import datetime
import types
+from unittest import mock
-import mock
from testtools.matchers import HasLength
from ironic.common import exception
diff --git a/ironic/tests/unit/test_base.py b/ironic/tests/unit/test_base.py
index 338983516..922e25e75 100644
--- a/ironic/tests/unit/test_base.py
+++ b/ironic/tests/unit/test_base.py
@@ -12,9 +12,9 @@
# limitations under the License.
import subprocess
+from unittest import mock
from ironic_lib import utils
-import mock
from oslo_concurrency import processutils
from ironic.tests import base
@@ -29,10 +29,7 @@ class BlockExecuteTestCase(base.TestCase):
subprocess.check_output, utils.execute)
for function_name in execute_functions:
- exc = self.assertRaises(
- Exception,
- function_name,
- ["echo", "%s" % function_name]) # noqa
+ exc = self.assertRaises(Exception, function_name, ["echo", "%s" % function_name]) # noqa
# Have to use 'noqa' as we are raising plain Exception and we will
# get H202 error in 'pep8' check.
@@ -58,10 +55,7 @@ class BlockExecuteTestCase(base.TestCase):
# still get an exception for a child. So in this case
# ironic_lib.utils.execute() calls processutils.execute(). Make sure an
# exception is raised even though we mocked processutils.execute()
- exc = self.assertRaises(
- Exception,
- utils.execute,
- "ls") # noqa
+ exc = self.assertRaises(Exception, utils.execute, "ls") # noqa
# Have to use 'noqa' as we are raising plain Exception and we will get
# H202 error in 'pep8' check.
diff --git a/lower-constraints.txt b/lower-constraints.txt
index bac7eabaf..182fdf50c 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -1,29 +1,67 @@
alembic==0.9.6
+amqp==2.5.2
+appdirs==1.4.3
automaton==1.9.0
Babel==2.3.4
bandit==1.1.0
bashate==0.5.1
+beautifulsoup4==4.9.0
+cachetools==4.1.0
+cffi==1.14.0
+chardet==3.0.4
+cliff==3.1.0
+cmd2==0.8.9
+contextlib2==0.6.0.post1
coverage==4.0
+cryptography==2.9.2
ddt==1.0.1
+debtcollector==2.0.1
+decorator==4.4.2
doc8==0.6.0
+docutils==0.16
+dogpile.cache==0.9.2
+entrypoints==0.3
eventlet==0.18.2
+extras==1.0.0
+fasteners==0.15
fixtures==3.0.0
+flake8==3.7.0
flake8-import-order==0.17.1
+future==0.18.2
futurist==1.2.0
-hacking==3.0.0
-ironic-lib==2.17.1
+gitdb==4.0.5
+GitPython==3.1.2
+greenlet==0.4.15
+hacking==3.0.1
+ifaddr==0.1.6
+importlib-metadata==1.6.0
+ironic-lib==4.3.0
iso8601==0.1.11
Jinja2==2.10
+jmespath==0.9.5
jsonpatch==1.16
-jsonschema==2.6.0
-keystoneauth1==3.18.0
+jsonpointer==2.0
+jsonschema==3.2.0
+keystoneauth1==4.2.0
keystonemiddleware==4.17.0
-mock==3.0.0
-openstackdocstheme==1.31.2
+kombu==4.6.8
+linecache2==1.0.0
+logutils==0.3.5
+Mako==1.1.2
+MarkupSafe==1.1.1
+mccabe==0.6.1
+monotonic==1.5
+mox3==1.0.0
+msgpack-python==0.5.6
+munch==2.5.0
+netaddr==0.7.19
+netifaces==0.10.9
openstacksdk==0.37.0
-os-api-ref==1.4.0
+os-client-config==2.1.0
+os-service-types==1.7.0
os-traits==0.4.0
-oslo.concurrency==3.26.0
+osc-lib==2.0.0
+oslo.concurrency==4.2.0
oslo.config==5.2.0
oslo.context==2.19.2
oslo.db==4.40.0
@@ -41,38 +79,74 @@ oslo.utils==3.38.0
oslo.versionedobjects==1.31.2
oslotest==3.2.0
osprofiler==1.5.0
+Paste==3.4.0
+PasteDeploy==2.1.0
pbr==2.0.0
pecan==1.0.0
pika==0.10.0
+pika-pool==0.1.3
+positional==1.2.1
+prettytable==0.7.2
psutil==3.2.2
psycopg2==2.7.3
+pycadf==3.0.0
+pycodestyle==2.5.0
+pycparser==2.20
+pyflakes==2.1.1
Pygments==2.2.0
+pyinotify==0.9.6
PyMySQL==0.7.6
+pyOpenSSL==19.1.0
+pyparsing==2.4.7
+pyperclip==1.8.0
pysendfile==2.0.0
python-cinderclient==3.3.0
+python-dateutil==2.8.1
+python-editor==1.0.4
python-glanceclient==2.8.0
+python-keystoneclient==4.0.0
+python-mimeparse==1.6.0
python-neutronclient==6.7.0
+python-subunit==1.4.0
python-swiftclient==3.2.0
pytz==2013.6
-reno==2.5.0
+PyYAML==5.3.1
+repoze.lru==0.7
requests==2.14.2
requestsexceptions==1.4.0
+restructuredtext-lint==1.3.0
retrying==1.2.3
rfc3986==0.3.1
-Sphinx==1.6.2
-sphinxcontrib-httpdomain==1.6.1
-sphinxcontrib-pecanwsme==0.10.0
-sphinxcontrib-seqdiag==0.8.4
-sphinxcontrib-svg2pdfconverter==0.1.0
-sphinxcontrib-websupport==1.0.1
+Routes==2.4.1
+simplegeneric==0.8.1
+simplejson==3.17.0
+six==1.14.0
+smmap==3.0.4
+soupsieve==2.0
SQLAlchemy==1.0.10
sqlalchemy-migrate==0.11.0
+sqlparse==0.3.1
+statsd==3.3.0
stestr==1.0.0
stevedore==1.20.0
+Tempita==0.5.2
+tenacity==6.2.0
+testrepository==0.0.20
testresources==2.0.0
testscenarios==0.4
testtools==2.2.0
-tooz==1.58.0
+tooz==2.7.0
+traceback2==1.4.0
+unittest2==1.1.0
+vine==1.3.0
+virtualbmc==1.4.0
+voluptuous==0.11.7
+waitress==1.4.3
+warlock==1.3.3
+wcwidth==0.1.9
WebOb==1.7.1
WebTest==2.0.27
+wrapt==1.12.1
WSME==0.9.3
+zeroconf==0.26.1
+zipp==3.1.0
diff --git a/playbooks/ci-workarounds/get_tftpd.yaml b/playbooks/ci-workarounds/get_tftpd.yaml
new file mode 100644
index 000000000..8a0e1f3b4
--- /dev/null
+++ b/playbooks/ci-workarounds/get_tftpd.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ tasks:
+ - name: Get tftpd info from journald
+ shell: "journalctl -t in.tftpd > {{ zuul_output_dir }}/logs/tftpd-journal.txt"
+ become: yes \ No newline at end of file
diff --git a/playbooks/legacy/grenade-dsvm-ironic/run.yaml b/playbooks/legacy/grenade-dsvm-ironic/run.yaml
deleted file mode 100644
index 080287a12..000000000
--- a/playbooks/legacy/grenade-dsvm-ironic/run.yaml
+++ /dev/null
@@ -1,121 +0,0 @@
-# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# NOTE(sambetts) DO NOT UPDATE this job when you update the other jobs with
-# changes related to the current branch. The devstack local config defined in
-# this job is run against the last (old) version of the devstack plugin in the
-# grenade steps.
-# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
-- hosts: all
- name: Autoconverted job legacy-grenade-dsvm-ironic from old job gate-grenade-dsvm-ironic-ubuntu-xenial-nv
- tasks:
-
- - name: Show the environment
- shell:
- cmd: |
- env
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- cat << 'EOF' >> ironic-vars-early
- # Set this early so that we do not have to be as careful with builder ordering in jobs.
- export GRENADE_PLUGINRC="enable_grenade_plugin ironic https://opendev.org/openstack/ironic"
-
- EOF
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- cat << 'EOF' >> ironic-extra-vars
- export PROJECTS="openstack/grenade $PROJECTS"
- export DEVSTACK_GATE_GRENADE=pullup
- export DEVSTACK_GATE_OS_TEST_TIMEOUT=2600
- export DEVSTACK_GATE_TEMPEST_BAREMETAL_BUILD_TIMEOUT=1200
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_BUILD_DEPLOY_RAMDISK=False"
- export DEVSTACK_GATE_TLSPROXY=0
- export DEVSTACK_GATE_USE_PYTHON3=True
- export BUILD_TIMEOUT
-
- # Standardize VM size for each supported ramdisk
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_RAM=384"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=tinyipa"
-
- EOF
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- cat << 'EOF' >> ironic-vars-early
- # use tempest plugin
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"TEMPEST_PLUGINS+=' /opt/stack/new/ironic-tempest-plugin'"
- export TEMPEST_CONCURRENCY=1
- EOF
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PROJECTS="openstack/ironic $PROJECTS"
- export PROJECTS="openstack/ironic-lib $PROJECTS"
- export PROJECTS="openstack/ironic-python-agent $PROJECTS"
- export PROJECTS="openstack/ironic-python-agent-builder $PROJECTS"
- export PROJECTS="openstack/ironic-tempest-plugin $PROJECTS"
- export PROJECTS="openstack/python-ironicclient $PROJECTS"
- export PROJECTS="openstack/virtualbmc $PROJECTS"
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_IRONIC=1
- export DEVSTACK_GATE_NEUTRON=1
- export DEVSTACK_GATE_VIRT_DRIVER=ironic
- export DEVSTACK_GATE_CONFIGDRIVE=1
- export DEVSTACK_GATE_IRONIC_DRIVER=ipmi
- export BRANCH_OVERRIDE="{{ zuul.override_checkout | default('default') }}"
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
-
- if [[ "$ZUUL_BRANCH" != "stable/ocata" && "$BRANCH_OVERRIDE" != "stable/ocata" ]]; then
- export DEVSTACK_GATE_TLSPROXY=1
- fi
-
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_TEMPEST_WHOLE_DISK_IMAGE=False"
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_EPHEMERAL_DISK=1"
-
- export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=0
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_INSPECTOR_BUILD_RAMDISK=False"
-
- # NOTE(TheJulia): Keep the runtime down by disabling cleaning
- # the nodes and focus on the server related tests as opposed
- # to network scenario testing
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_AUTOMATED_CLEAN_ENABLED=False"
- export DEVSTACK_GATE_TEMPEST_REGEX=test_server_basic_ops
-
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_COUNT=7"
-
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_REQUIRE_AGENT_TOKEN=False"
-
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEFAULT_BOOT_OPTION=netboot"
-
- # Ensure the ironic-vars-EARLY file exists
- touch ironic-vars-early
- # Pull in the EARLY variables injected by the optional builders
- source ironic-vars-early
-
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic https://opendev.org/openstack/ironic"
-
- # Ensure the ironic-EXTRA-vars file exists
- touch ironic-extra-vars
- # Pull in the EXTRA variables injected by the optional builders
- source ironic-extra-vars
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ironic-dsvm-base/post.yaml b/playbooks/legacy/ironic-dsvm-base/post.yaml
deleted file mode 100644
index e07f5510a..000000000
--- a/playbooks/legacy/ironic-dsvm-base/post.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ironic-dsvm-base/pre.yaml b/playbooks/legacy/ironic-dsvm-base/pre.yaml
deleted file mode 100644
index c2530b2bf..000000000
--- a/playbooks/legacy/ironic-dsvm-base/pre.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-- hosts: primary
- name: Clone devstack-gate to /opt/git
- tasks:
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- https://opendev.org \
- openstack/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/releasenotes/notes/add-ibmc-raid-interface-0c13826e134fb4ce.yaml b/releasenotes/notes/add-ibmc-raid-interface-0c13826e134fb4ce.yaml
new file mode 100644
index 000000000..2bac1e5f3
--- /dev/null
+++ b/releasenotes/notes/add-ibmc-raid-interface-0c13826e134fb4ce.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds raid interface for ibmc driver which includes ``delete_configuration``
+ and ``create_configuration`` steps.
diff --git a/releasenotes/notes/add-ilo-inband-deploy-step-update-firmware-using-sum-cfee84a19120dd3c.yaml b/releasenotes/notes/add-ilo-inband-deploy-step-update-firmware-using-sum-cfee84a19120dd3c.yaml
new file mode 100644
index 000000000..fe1956e92
--- /dev/null
+++ b/releasenotes/notes/add-ilo-inband-deploy-step-update-firmware-using-sum-cfee84a19120dd3c.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Adds inband deploy step ``flash_firmware_sum`` to the ``management``
+ interface of the ``ilo`` and ``ilo5`` hardware types. The required
+ minimum version for the proliantutils library is 2.9.5.
+other:
+ - |
+ The proliantutils library version 2.9.5 enables ``ssacli`` based
+ in-band deploy step ``apply_configuration`` of ``agent`` RAID
+ interface for ``ilo`` and ``ilo5`` hardware types.
diff --git a/releasenotes/notes/add-ipxe-boot-iso-support-6ae2f5cc2250be3e.yaml b/releasenotes/notes/add-ipxe-boot-iso-support-6ae2f5cc2250be3e.yaml
new file mode 100644
index 000000000..a9139eb44
--- /dev/null
+++ b/releasenotes/notes/add-ipxe-boot-iso-support-6ae2f5cc2250be3e.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ Adds functionality to the ``ipxe`` boot interface to support use of an
+ ``instance_info\boot_iso`` value with the ``ramdisk`` deployment interface.
+other:
+ - |
+ Support for iPXE booting a ISO medium will only work if the ramdisk loaded
+ by the bootloader contains all artifacts required for the booting operating
+ system to load. This is a limitation of iPXE and x86 systems architecture,
+ as the memory allocated for the rest of the ISO disk image in memory is
+ freed by the booting kernel.
diff --git a/releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml b/releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml
new file mode 100644
index 000000000..823991020
--- /dev/null
+++ b/releasenotes/notes/add-redfish-boot_iso-pass-through-8a6f4d0c98ada1d5.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Adds functionality to allow a user to supply a node
+ ``instance_info/boot_iso`` parameter on machines utilizing the
+ ``redfish-virtual-media`` boot interface. When combined with the
+ ``ramdisk`` deployment interface, this allows an instance to boot
+ into a user supplied ISO image.
diff --git a/releasenotes/notes/agent-client-poll-ce16fd589e88c95a.yaml b/releasenotes/notes/agent-client-poll-ce16fd589e88c95a.yaml
new file mode 100644
index 000000000..693adebe8
--- /dev/null
+++ b/releasenotes/notes/agent-client-poll-ce16fd589e88c95a.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Instead of increasing timeout when running long synchronous tasks on
+ ironic-python-agent, ironic now runs them asynchronously and polls
+ the agent until completion. It is no longer necessary to account for
+ long-running tasks when setting ``[agent]command_timeout``.
diff --git a/releasenotes/notes/agent-power-a000fdf37cb870e4.yaml b/releasenotes/notes/agent-power-a000fdf37cb870e4.yaml
new file mode 100644
index 000000000..549a78b21
--- /dev/null
+++ b/releasenotes/notes/agent-power-a000fdf37cb870e4.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The new **experimental** ``agent`` power interface allows limited
+ provisioning operations on nodes without BMC credentials. See `story
+ 2007771 <https://storyboard.openstack.org/#!/story/2007771>`_ for details.
diff --git a/releasenotes/notes/agent-power-off-2115fcfaac030bd0.yaml b/releasenotes/notes/agent-power-off-2115fcfaac030bd0.yaml
new file mode 100644
index 000000000..f6fca6117
--- /dev/null
+++ b/releasenotes/notes/agent-power-off-2115fcfaac030bd0.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes a rare issue where agent successfully powers off a node after
+ deployment, but ironic never learns about it and does another reboot.
diff --git a/releasenotes/notes/agent-raid-647acfd599e83476.yaml b/releasenotes/notes/agent-raid-647acfd599e83476.yaml
new file mode 100644
index 000000000..84248fa10
--- /dev/null
+++ b/releasenotes/notes/agent-raid-647acfd599e83476.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ The ``agent`` RAID interface now supports building RAID as a deploy step
+ ``apply_configuration``.
diff --git a/releasenotes/notes/agent-raid-validate-f7348ac034606b83.yaml b/releasenotes/notes/agent-raid-validate-f7348ac034606b83.yaml
new file mode 100644
index 000000000..54a3f0a10
--- /dev/null
+++ b/releasenotes/notes/agent-raid-validate-f7348ac034606b83.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Adds raid configuration validation to deploy step ``apply_configuration``
+ of ``agent`` RAID interface. Also, a post deploy hook has been added to
+ this deploy step to update root device hint.
diff --git a/releasenotes/notes/agent-token-817a03776bd46d5b.yaml b/releasenotes/notes/agent-token-817a03776bd46d5b.yaml
new file mode 100644
index 000000000..88071d67c
--- /dev/null
+++ b/releasenotes/notes/agent-token-817a03776bd46d5b.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes deployment in fast-track mode by keeping the required internal fields
+ (``agent_url`` and ``agent_secret_token``) intact when starting and
+ finishing deployment and cleaning.
diff --git a/releasenotes/notes/agent-verify-ca-ddbfbb0f27198d82.yaml b/releasenotes/notes/agent-verify-ca-ddbfbb0f27198d82.yaml
new file mode 100644
index 000000000..3f945f22d
--- /dev/null
+++ b/releasenotes/notes/agent-verify-ca-ddbfbb0f27198d82.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Adds a new ``driver_info`` parameter ``agent_verify_ca`` that allows
+ specifying a file with certificates to use when accessing IPA. Set
+ to ``False`` to disable certificate validation.
diff --git a/releasenotes/notes/allocation-delete-26c7c2f1651759f5.yaml b/releasenotes/notes/allocation-delete-26c7c2f1651759f5.yaml
new file mode 100644
index 000000000..0973a725e
--- /dev/null
+++ b/releasenotes/notes/allocation-delete-26c7c2f1651759f5.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes deleting nodes with maintenance mode on and an allocation present.
+ Previously it caused an internal server error. See `story 2007823
+ <https://storyboard.openstack.org/#!/story/2007823>`_ for details.
diff --git a/releasenotes/notes/bug-2007963-idrac-wsman-raid-apply-configuration-792ccf195057016b.yaml b/releasenotes/notes/bug-2007963-idrac-wsman-raid-apply-configuration-792ccf195057016b.yaml
new file mode 100644
index 000000000..166bfbbda
--- /dev/null
+++ b/releasenotes/notes/bug-2007963-idrac-wsman-raid-apply-configuration-792ccf195057016b.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixes RAID ``apply_configuration`` deploy step for ``idrac-wsman`` where
+ deployment failed with ``TypeError``.
+ See `story 2007963 <https://storyboard.openstack.org/#!/story/2007963>`_. \ No newline at end of file
diff --git a/releasenotes/notes/change_default_use_ipmitool_retries-2529ce032eae7d1b.yaml b/releasenotes/notes/change_default_use_ipmitool_retries-2529ce032eae7d1b.yaml
new file mode 100644
index 000000000..04ca61cf6
--- /dev/null
+++ b/releasenotes/notes/change_default_use_ipmitool_retries-2529ce032eae7d1b.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Change the default for ``use_ipmitool_retries`` to ``False`` so that
+ Ironic will do the retries by default. This is needed for certain BMCs
+ that don't support the Cipher Suites command and ipmitool retries take an
+ excessively long time. See `story 2007632
+ <https://storyboard.openstack.org/#!/story/2007632>`_ for additional
+ information.
diff --git a/releasenotes/notes/cleaning-logs-dc115b0926ae3982.yaml b/releasenotes/notes/cleaning-logs-dc115b0926ae3982.yaml
new file mode 100644
index 000000000..c90e2edd4
--- /dev/null
+++ b/releasenotes/notes/cleaning-logs-dc115b0926ae3982.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - |
+ Ramdisk logs are now collected during cleaning the same way as during
+ deployment.
diff --git a/releasenotes/notes/del-api-url-eb2ea29aa63a2cb5.yaml b/releasenotes/notes/del-api-url-eb2ea29aa63a2cb5.yaml
new file mode 100644
index 000000000..517d012ec
--- /dev/null
+++ b/releasenotes/notes/del-api-url-eb2ea29aa63a2cb5.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The ``[conductor]api_url`` was deprecated and removed, use
+ ``[service_catalog]endpoint_override`` instead if required to use
+ a specific ironic api url.
diff --git a/releasenotes/notes/del-cinder-url-cf43cd0336c22878.yaml b/releasenotes/notes/del-cinder-url-cf43cd0336c22878.yaml
new file mode 100644
index 000000000..17732537e
--- /dev/null
+++ b/releasenotes/notes/del-cinder-url-cf43cd0336c22878.yaml
@@ -0,0 +1,3 @@
+upgrade:
+ - |
+ The ``[cinder]url`` was removed, use ``[cinder]endpoint_override`` instead.
diff --git a/releasenotes/notes/del-fatal_exception_format_errors-f63b15c8aa460dff.yaml b/releasenotes/notes/del-fatal_exception_format_errors-f63b15c8aa460dff.yaml
new file mode 100644
index 000000000..54f2dd31a
--- /dev/null
+++ b/releasenotes/notes/del-fatal_exception_format_errors-f63b15c8aa460dff.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ The ``[DEFAULT]fatal_exception_format_errors`` was removed,
+ use ``[ironic_lib]fatal_exception_format_errors`` instead.
diff --git a/releasenotes/notes/deleting-dcdb9cf0d2a6a1a6.yaml b/releasenotes/notes/deleting-dcdb9cf0d2a6a1a6.yaml
new file mode 100644
index 000000000..3b53b2e9a
--- /dev/null
+++ b/releasenotes/notes/deleting-dcdb9cf0d2a6a1a6.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - |
+ Cleans up nodes stuck in the ``deleting`` state on conductor restart.
diff --git a/releasenotes/notes/deploy-step-validate-76b2aa97e02ba669.yaml b/releasenotes/notes/deploy-step-validate-76b2aa97e02ba669.yaml
new file mode 100644
index 000000000..1dea110a8
--- /dev/null
+++ b/releasenotes/notes/deploy-step-validate-76b2aa97e02ba669.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes deployment hanging on an invalid in-band deploy step in a deploy
+ templates.
diff --git a/releasenotes/notes/destroy-broken-8b13de8382199aca.yaml b/releasenotes/notes/destroy-broken-8b13de8382199aca.yaml
new file mode 100644
index 000000000..7db6a1ff8
--- /dev/null
+++ b/releasenotes/notes/destroy-broken-8b13de8382199aca.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Allows deleting nodes with a broken driver unless they require stopping
+ serial console.
diff --git a/releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml b/releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml
new file mode 100644
index 000000000..d45da4114
--- /dev/null
+++ b/releasenotes/notes/direct-deploy-steps-36486987156017d7.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ The ``deploy`` deploy step of the ``direct`` deploy interface has been
+ split into three deploy steps:
+
+ * ``deploy`` itself (priority 100) boots the deploy ramdisk
+
+ * ``write_image`` (priority 80) downloads the user image from inside
+ the ramdisk and writes it to the disk.
+
+ * ``prepare_instance_boot`` (priority 60) prepares the boot device and
+ writes the bootloader (if needed).
+
+ Priorities 81 to 99 to be used for in-band deploy steps that run before
+ the image is written. Priorities 61 to 79 can be used for in-band deploy
+ steps that modify the written image before the bootloader is installed.
diff --git a/releasenotes/notes/direct-fast-track-d0f43850b6e80751.yaml b/releasenotes/notes/direct-fast-track-d0f43850b6e80751.yaml
new file mode 100644
index 000000000..fee9738a9
--- /dev/null
+++ b/releasenotes/notes/direct-fast-track-d0f43850b6e80751.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes fast-track deployments with the ``direct`` deploy interface that
+ used to hang previously.
diff --git a/releasenotes/notes/disable_periodic_task-590a91c0a5235cfb.yaml b/releasenotes/notes/disable_periodic_task-590a91c0a5235cfb.yaml
new file mode 100644
index 000000000..d7b23f16f
--- /dev/null
+++ b/releasenotes/notes/disable_periodic_task-590a91c0a5235cfb.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes periodic task initialization options to prevent a negative number.
+ If ``[conductor]clean_callback_timeout``, ``[conductor]inspect_wait_timeout``
+ or ``[conductor]inspect_wait_timeout`` have a negative value an error
+ will be triggered.
diff --git a/releasenotes/notes/dont-cleanup-cache-twice-0395a50ad723bca8.yaml b/releasenotes/notes/dont-cleanup-cache-twice-0395a50ad723bca8.yaml
new file mode 100644
index 000000000..a9f832786
--- /dev/null
+++ b/releasenotes/notes/dont-cleanup-cache-twice-0395a50ad723bca8.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Ironic now does not try to allocate the space needed for instance image
+ conversion to raw format if it is already raw.
diff --git a/releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml b/releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml
new file mode 100644
index 000000000..0a7bf25e0
--- /dev/null
+++ b/releasenotes/notes/driver-maintenance-0945c2939fa4e917.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes updating driver fields for nodes with a broken driver. This is
+ required to be able to set maintenance for such nodes.
diff --git a/releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml b/releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml
new file mode 100644
index 000000000..acf5daccf
--- /dev/null
+++ b/releasenotes/notes/explicit_ipxe_config_options-d7bf9a743a13f523.yaml
@@ -0,0 +1,17 @@
+---
+upgrade:
+ - |
+ Operators upgrading from earlier versions using PXE should explicitly set
+ ``[pxe]ipxe_bootfile_name``, ``[pxe]uefi_ipxe_bootfile_name``, and
+ possibly ``[pxe]ipxe_bootfile_name_by_arch`` settings, as well as a
+ iPXE specific ``[pxe]ipxe_config_template`` override, if required.
+
+ Setting the ``[pxe]ipxe_config_template`` to no value will result in the
+ ``[pxe]pxe_config_template`` being used. The default value points to the
+ supplied standard iPXE template, so only highly customized operators may
+ have to tune this setting.
+fixes:
+ - |
+ Addresses the lack of an ability to explicitly set different bootloaders
+ for ``iPXE`` and ``PXE`` based boot operations via their respective
+ ``ipxe`` and ``pxe`` boot interfaces.
diff --git a/releasenotes/notes/fast-track-with-cleaning-438225116a11662d.yaml b/releasenotes/notes/fast-track-with-cleaning-438225116a11662d.yaml
new file mode 100644
index 000000000..803977a0f
--- /dev/null
+++ b/releasenotes/notes/fast-track-with-cleaning-438225116a11662d.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes a bug in "fast track" where Ironic would delete the ``agent token``
+ upon exiting cleaning steps. However, if we are in fast track mode, we can
+ preserve the token and continue operations with the agent as it is not
+ powered off during fast track operations.
diff --git a/releasenotes/notes/fix-json-rpc-client-ssl-2438a731beb3d5f9.yaml b/releasenotes/notes/fix-json-rpc-client-ssl-2438a731beb3d5f9.yaml
new file mode 100644
index 000000000..5181f9051
--- /dev/null
+++ b/releasenotes/notes/fix-json-rpc-client-ssl-2438a731beb3d5f9.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes json_rpc client connections always using HTTP even if `use_ssl` was
+ set to True.
diff --git a/releasenotes/notes/fix-redfish-sadness-workaround-ed02cb310ff369f4.yaml b/releasenotes/notes/fix-redfish-sadness-workaround-ed02cb310ff369f4.yaml
new file mode 100644
index 000000000..989a7a646
--- /dev/null
+++ b/releasenotes/notes/fix-redfish-sadness-workaround-ed02cb310ff369f4.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Fixes a workaround for hardware that does not support persistent
+ boot device setting with the ``redfish`` or ``idrac-redfish``
+ management interface implementation. When such situation is
+ detected, ironic falls back to one-time boot device setting,
+ restoring it on every reboot or power on.
+
+ For more information, see `story 2007733
+ <https://storyboard.openstack.org/#!/story/2007733>`_.
diff --git a/releasenotes/notes/hash-ring-algo-4337c18117b33070.yaml b/releasenotes/notes/hash-ring-algo-4337c18117b33070.yaml
new file mode 100644
index 000000000..181a5ed9d
--- /dev/null
+++ b/releasenotes/notes/hash-ring-algo-4337c18117b33070.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Provides a new option ``[DEFAULT]hash_ring_algorithm`` that specifies
+ which cryptographic algorithm to use when building the hash ring. Set
+ to something other than ``md5`` when using ironic on a system in FIPS
+ mode.
diff --git a/releasenotes/notes/http-basic-auth-f8c0536eba989918.yaml b/releasenotes/notes/http-basic-auth-f8c0536eba989918.yaml
new file mode 100644
index 000000000..356df84ed
--- /dev/null
+++ b/releasenotes/notes/http-basic-auth-f8c0536eba989918.yaml
@@ -0,0 +1,34 @@
+---
+features:
+ - |
+ Enable Basic HTTP authentication middleware.
+
+ Having noauth as the only option for standalone ironic causes constraints
+ on how the API is exposed on the network. Having some kind of
+ authentication layer behind a TLS deployment eases these constraints.
+
+ When the config option ``auth_strategy`` is set to ``http_basic`` then
+ non-public API calls require a valid HTTP Basic authentication header to
+ be set. The config option ``http_basic_auth_user_file`` defaults to
+ ``/etc/ironic/htpasswd`` and points to a file which supports the Apache
+ htpasswd syntax[1]. This file is read for every request, so no service
+ restart is required when changes are made.
+
+ Like the ``noauth`` auth strategy, the ``http_basic`` auth strategy is
+ intended for standalone deployments of ironic, and integration with other
+ OpenStack services cannot depend on a service catalog.
+
+ The only password digest supported is bcrypt, and the ``bcrypt`` python
+ library is used for password checks since it supports ``$2y$`` prefixed
+ bcrypt passwords as generated by the Apache htpasswd utility.
+
+ To try HTTP basic authentication, the following can be done:
+
+ * Set ``/etc/ironic/ironic.conf`` ``DEFAULT`` ``auth_strategy`` to
+ ``http_basic``
+ * Populate the htpasswd file with entries, for example:
+ ``htpasswd -nbB myName myPassword >> /etc/ironic/htpassw``
+ * Make basic authenticated HTTP requests, for example:
+ ``curl --user myName:myPassword http://localhost:6385/v1/drivers``
+
+ [1] https://httpd.apache.org/docs/current/misc/password_encryptions.html
diff --git a/releasenotes/notes/ibmcclient-fix-8c6cb49be0aef5f2.yaml b/releasenotes/notes/ibmcclient-fix-8c6cb49be0aef5f2.yaml
new file mode 100644
index 000000000..bf2fdfa80
--- /dev/null
+++ b/releasenotes/notes/ibmcclient-fix-8c6cb49be0aef5f2.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ Updates required ibmcclient version for ibmc drivers to 0.2.2.
diff --git a/releasenotes/notes/idrac-add-ehba-support-10b90c92b8865364.yaml b/releasenotes/notes/idrac-add-ehba-support-10b90c92b8865364.yaml
new file mode 100644
index 000000000..baf41b3e1
--- /dev/null
+++ b/releasenotes/notes/idrac-add-ehba-support-10b90c92b8865364.yaml
@@ -0,0 +1,15 @@
+fixes:
+ - |
+ Fixes the virtual disks creation by changing PERC H740P controller
+ mode from `Enhanced HBA` to `RAID` in delete_configuration clean
+ step.
+ PERC H740P controllers supports RAID mode and Enhanced HBA mode.
+ When the controller is in Enhanced HBA, it creates single disk
+ RAID0 virtual disks of NON-RAID physical disks.
+ Hence the request for VD creation with supported RAID
+ fails due to no available physical disk.
+ This patch converts the PERC H740P RAID controllers to RAID mode
+ if enhanced HBA mode found enabled
+ See bug
+ `bug 2007711 <https://storyboard.openstack.org/#!/story/2007711>`_
+ for more details
diff --git a/releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml b/releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml
new file mode 100644
index 000000000..bb3f9ae52
--- /dev/null
+++ b/releasenotes/notes/ilo-support-boot-mode-management-apis-8173002daf79894c.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Adds support for boot mode retrieval and setting with the ``ilo`` and
+ ``ilo5`` hardware types.
diff --git a/releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml b/releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml
new file mode 100644
index 000000000..d3867a344
--- /dev/null
+++ b/releasenotes/notes/in-band-steps-e4a1fe759029fea5.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ Adds support for running custom in-band deploy steps when provisioning.
+ Step priorities from 41 to 59 can be used for steps that run after
+ the image is written and the bootloader is installed.
+deprecations:
+ - |
+ Running the whole deployment process as a monolithic ``deploy.deploy``
+ deploy step is now deprecated. In a future release this step will only be
+ used to prepare deployment and starting the agent, and special handling
+ will be removed. All third party deploy interfaces must be updated
+ to provide real deploy steps instead and set the
+ ``has_decomposed_deploy_steps`` attribute to ``True`` on the deploy
+ interface level.
+other:
+ - |
+ As part of the agent deploy interfaces refactoring, breaking changes will
+ be made to implementations of ``AgentDeploy`` and ``ISCSIDeploy``.
+ Third party deploy interfaces must be updated to inherit
+ ``HeartbeatMixin``, ``AgentBaseMixin`` or ``AgentDeployMixin``
+ from ``ironic.drivers.modules.agent_base`` instead since their API is
+ considered more stable.
diff --git a/releasenotes/notes/inspection-fast-track-ab5165e11d3e9522.yaml b/releasenotes/notes/inspection-fast-track-ab5165e11d3e9522.yaml
new file mode 100644
index 000000000..1af84f5bd
--- /dev/null
+++ b/releasenotes/notes/inspection-fast-track-ab5165e11d3e9522.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes fast track deployment preceeded by managed inspection by providing
+ the ironic API URL to the ramdisk so that it can heartbeat.
diff --git a/releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml b/releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml
new file mode 100644
index 000000000..b530dd807
--- /dev/null
+++ b/releasenotes/notes/ipa-erase_devices-skip-read-only-9f8cd9278c35a84e.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ Adds the capability for an operator to set a configuration setting which
+ tells the ironic-python-agent it is okay to skip read-only block devices
+ when performing an ``erase_devices`` cleaning operation. This requires
+ ironic-python-agent version 6.0.0 or greater and can be set using the
+ ``[deploy]erase_skip_read_only`` configuration option.
+other:
+ - |
+ Starting in ironic-python-agent 6.0.0, metadata erasure of read-only
+ devices is skipped by default.
diff --git a/releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml b/releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml
new file mode 100644
index 000000000..413224b02
--- /dev/null
+++ b/releasenotes/notes/ipmi-retries-min-command-interval-070cd7eff5eb74dd.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ When Ironic is doing IPMI retries the configured ``min_command_interval``
+ should be used instead of a default value of ``1``, which may be too short
+ for some BMCs.
diff --git a/releasenotes/notes/ipmitool-use_ipmitool_retries-b55b2b8ed5cab603.yaml b/releasenotes/notes/ipmitool-use_ipmitool_retries-b55b2b8ed5cab603.yaml
new file mode 100644
index 000000000..3a051f028
--- /dev/null
+++ b/releasenotes/notes/ipmitool-use_ipmitool_retries-b55b2b8ed5cab603.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ Adds a new ``[ipmi]use_ipmitool_retries`` option. When set to
+ ``True`` and timing is supported by ipmitool, the number of
+ retries and command interval will be passed to ipmitool so
+ that ipmitool will do the retries. When set to ``False``,
+ ironic will do the retries. Default is ``True``.
+issues:
+ - |
+ Some BMCs do not support the ``Channel Cipher Suites`` command
+ that newer versions of ipmitool use. These versions of
+ ipmitool will resend this command for each ipmitool retry,
+ resulting in long response times. Setting
+ ``[ipmi]use_ipmitool_retries`` to ``false`` will avoid this
+ situation by implementing retries on the ironic level.
diff --git a/releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml b/releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml
new file mode 100644
index 000000000..fec61904c
--- /dev/null
+++ b/releasenotes/notes/iscsi-ansible-steps-817b52269d2455b0.yaml
@@ -0,0 +1,28 @@
+---
+features:
+ - |
+ The ``deploy`` deploy step of the ``iscsi`` deploy interface has been
+ split into three deploy steps:
+
+ * ``deploy`` itself (priority 100) boots the deploy ramdisk
+
+ * ``write_image`` (priority 80) writes the image to the disk exposed
+ via iSCSI.
+
+ * ``prepare_instance_boot`` (priority 60) prepares the boot device and
+ writes the bootloader (if needed).
+
+ Priorities 81 to 99 to be used for in-band deploy steps that run before
+ the image is written. Priorities 61 to 79 can be used for in-band deploy
+ steps that modify the written image before the bootloader is installed.
+ - |
+ The ``deploy`` deploy step of the ``ansible`` deploy interface has been
+ split into two deploy steps:
+
+ * ``deploy`` itself (priority 100) boots the deploy ramdisk
+
+ * ``write_image`` (priority 80) writes the image to the disk and configures
+ the bootloader.
+
+ Priorities 81 to 99 to be used for in-band deploy steps that run before
+ the image is written.
diff --git a/releasenotes/notes/json-rpc-timeout-ac30eea164b3a294.yaml b/releasenotes/notes/json-rpc-timeout-ac30eea164b3a294.yaml
new file mode 100644
index 000000000..b177d8e8b
--- /dev/null
+++ b/releasenotes/notes/json-rpc-timeout-ac30eea164b3a294.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes the JSON RPC backend potentially hanging on inability to connect
+ to a conductor. The default timeout is now 120 seconds. The timeout and
+ the number of retries can be adjusted via the configuration options
+ ``[json_rpc]timeout`` and ``[json_rpc]connect_retries`` accordingly.
diff --git a/releasenotes/notes/json_rpc_http_basic-42dfc6ca2471a30e.yaml b/releasenotes/notes/json_rpc_http_basic-42dfc6ca2471a30e.yaml
new file mode 100644
index 000000000..ee1eeef18
--- /dev/null
+++ b/releasenotes/notes/json_rpc_http_basic-42dfc6ca2471a30e.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The configuration options ``[json_rpc]http_basic_username`` and
+ ``[json_rpc]http_basic_password`` have been deprecated in favour of the
+ more generic ``[json_rpc]username`` and ``[json_rpc]password``.
diff --git a/releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml b/releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml
new file mode 100644
index 000000000..0b90cfda7
--- /dev/null
+++ b/releasenotes/notes/missing-sw-raid-b7fdc9259612970d.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes missing ``agent`` RAID compatibility for the ``ilo5`` and ``idrac``
+ hardware type preventing software RAID for working with them.
diff --git a/releasenotes/notes/netboot-fallback-b208b2c3b40a0d01.yaml b/releasenotes/notes/netboot-fallback-b208b2c3b40a0d01.yaml
new file mode 100644
index 000000000..61f61a15f
--- /dev/null
+++ b/releasenotes/notes/netboot-fallback-b208b2c3b40a0d01.yaml
@@ -0,0 +1,12 @@
+---
+issues:
+ - |
+ The SNMP hardware type cannot change boot devices and thus may fail
+ to deploy nodes with local boot. To work around this problem, set
+ ``[pxe]enable_netboot_fallback`` to ``True``.
+features:
+ - |
+ Adds an ability to generate network boot templates even for nodes that
+ use local boot via the new ``[pxe]enable_netboot_fallback`` option.
+ This is required to work around the situation where switching boot devices
+ does not work reliably.
diff --git a/releasenotes/notes/no-power-on-842b21d55b07a632.yaml b/releasenotes/notes/no-power-on-842b21d55b07a632.yaml
new file mode 100644
index 000000000..71f4dc9eb
--- /dev/null
+++ b/releasenotes/notes/no-power-on-842b21d55b07a632.yaml
@@ -0,0 +1,9 @@
+---
+other:
+ - |
+ A new method ``supports_power_sync`` has been added to ``PowerInterface``.
+ If it returns ``False``, the conductor will not try to assert power state
+ for the node, merely recording the returned state instead.
+ - |
+ The base agent deploy interface code now correctly handles power interfaces
+ that do not support the ``power on`` action but support ``reboot``.
diff --git a/releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml b/releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml
new file mode 100644
index 000000000..d10f42b2b
--- /dev/null
+++ b/releasenotes/notes/node-network-data-6f998aaa57020f4b.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adds `network_data` property to the node, a dictionary that represents the
+ node static network configuration. The Ironic API performs formal JSON
+ validation of node `network_data` content against user-supplied JSON schema
+ at driver validation step.
diff --git a/releasenotes/notes/port-list-by-project-8cfaf3b2cf0dd627.yaml b/releasenotes/notes/port-list-by-project-8cfaf3b2cf0dd627.yaml
new file mode 100644
index 000000000..32d2821af
--- /dev/null
+++ b/releasenotes/notes/port-list-by-project-8cfaf3b2cf0dd627.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Allow port lists to be filtered by project. Doing so checks the specified
+ project against the port's node's owner and lessee.
diff --git a/releasenotes/notes/prevent-ports-with-vif-deletion-3edac3df5aa1becf.yaml b/releasenotes/notes/prevent-ports-with-vif-deletion-3edac3df5aa1becf.yaml
new file mode 100644
index 000000000..3d7fed1e2
--- /dev/null
+++ b/releasenotes/notes/prevent-ports-with-vif-deletion-3edac3df5aa1becf.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes logic that is applied to port deletions to also consider the
+ presence of a VIF attachment record, which should be removed before
+ attempting to delete the node. Failure to do so can result in
+ erroneous records in the Networking Service.
diff --git a/releasenotes/notes/raid-max-c0920cc44b9779ee.yaml b/releasenotes/notes/raid-max-c0920cc44b9779ee.yaml
new file mode 100644
index 000000000..3035d2b8d
--- /dev/null
+++ b/releasenotes/notes/raid-max-c0920cc44b9779ee.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ No longer tries to set ``local_gb`` to ``MAX`` when building RAID with
+ the root disk using ``MAX`` for its size.
diff --git a/releasenotes/notes/redfish-noop-mgmt-b61d02b77b1c9d6b.yaml b/releasenotes/notes/redfish-noop-mgmt-b61d02b77b1c9d6b.yaml
new file mode 100644
index 000000000..b4ca4e44b
--- /dev/null
+++ b/releasenotes/notes/redfish-noop-mgmt-b61d02b77b1c9d6b.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ To provide a workaround for incorrect boot order problems on some hardware,
+ the ``redfish`` hardware type now supports the ``noop`` management
+ interface, similarly to IPMI and SNMP.
diff --git a/releasenotes/notes/redfish-power-87062756bce8b047.yaml b/releasenotes/notes/redfish-power-87062756bce8b047.yaml
new file mode 100644
index 000000000..03f3e2dfb
--- /dev/null
+++ b/releasenotes/notes/redfish-power-87062756bce8b047.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Rebooting a node with the ``redfish`` power interface is now implemented
+ via a power off request followed by power on to avoid returning success
+ when a node stays powered on after the reboot request.
diff --git a/releasenotes/notes/redfish-sadness-6e2a37b3f45ef1aa.yaml b/releasenotes/notes/redfish-sadness-6e2a37b3f45ef1aa.yaml
new file mode 100644
index 000000000..a8cb499be
--- /dev/null
+++ b/releasenotes/notes/redfish-sadness-6e2a37b3f45ef1aa.yaml
@@ -0,0 +1,18 @@
+---
+fixes:
+ - |
+ Provides a workaround for hardware that does not support persistent boot
+ device setting with the ``redfish`` hardware type. When such situation is
+ detected, ironic will fall back to one-time boot device setting, restoring
+ it on every reboot.
+issues:
+ - |
+ Some redfish-enabled hardware is known not to support persistent boot
+ device setting that is used by the Bare Metal service for deployed
+ instances. The ``redfish`` hardware type tries to work around this problem,
+ but rebooting such an instance in-band may cause it to boot incorrectly.
+ A predictable boot order should be configured in the node's boot firmware
+ to avoid issues and at least metadata cleaning must be enabled.
+ See `this mailing list thread
+ <http://lists.openstack.org/pipermail/openstack-discuss/2020-April/014543.html>`_
+ for technical details.
diff --git a/releasenotes/notes/redfish-virtual-media-permission-fix-1909b9cdbbbf9fd1.yaml b/releasenotes/notes/redfish-virtual-media-permission-fix-1909b9cdbbbf9fd1.yaml
new file mode 100644
index 000000000..fb0bfed67
--- /dev/null
+++ b/releasenotes/notes/redfish-virtual-media-permission-fix-1909b9cdbbbf9fd1.yaml
@@ -0,0 +1,15 @@
+---
+upgrade:
+ - |
+ A permission setting has been added for ``redfish-virtual-media`` boot
+ interface, which allows for explicit file permission setting when the
+ driver is used. The default for the new ``[redfish]file_permission
+ setting is ``0u644``, or 644 if manually changed using ``chmod`` on the
+ command line.
+ Operators may need to check ``/httpboot/redfish`` folder permissions if
+ using ``redfish-virtual-media`` if they were running the conductor with
+ a specific ``umask`` to work around the permission setting defect.
+fixes:
+ - |
+ Fixes an issue where the folder ``/httpboot/redfish`` was being created
+ with incorrect permissions.
diff --git a/releasenotes/notes/reloadable-301ec2aa421abf66.yaml b/releasenotes/notes/reloadable-301ec2aa421abf66.yaml
new file mode 100644
index 000000000..c952d473e
--- /dev/null
+++ b/releasenotes/notes/reloadable-301ec2aa421abf66.yaml
@@ -0,0 +1,48 @@
+---
+other:
+ - |
+ The following configuration options can now be reloaded without restarting
+ ironic:
+
+ From ``[agent]``: ``memory_consumed_by_agent``, ``stream_raw_images``,
+ ``deploy_logs_*``, ``image_download_source``, ``command_timeout``
+ and ``neutron_agent_poll_interval``.
+
+ From ``[api]``: ``max_limit``, ``public_endpoint``
+ and ``ramdisk_heartbeat_timeout``.
+
+ From ``[conductor]``: ``heartbeat_timeout``,
+ ``force_power_state_during_sync``, ``automated_clean``,
+ ``soft_power_off_timeout``, ``power_state_change_timeout``,
+ ``rescue_password_hash_algorithm`` and ``require_rescue_password_hashed``.
+
+ From ``[DEFAULT]``: ``default_resource_class``, ``force_raw_images``,
+ ``parallel_image_downloads``, ``default_portgroup_mode``
+ and ``require_agent_token``.
+
+ From ``[deploy]``: ``enable_ata_secure_erase``, ``erase_devices_priority``,
+ ``erase_devices_metadata_priority``, ``shred_random_overwrite_iterations``,
+ ``shred_final_overwrite_with_zeros``,
+ ``continue_if_disk_secure_erase_fails``, ``disk_erasure_concurrency``,
+ ``power_off_after_deploy_failure``, ``default_boot_option``,
+ ``default_boot_mode``, ``configdrive_use_object_store``, ``fast_track``,
+ and ``fast_track_timeout``.
+
+ From ``[ipmi]``: ``kill_on_timeout``, ``disable_boot_timeout``,
+ ``command_retry_interval``, ``min_command_interval``, ``debug``
+ and ``additional_retryable_ipmi_errors``.
+
+ From ``[iscsi]``: ``portal_port``, ``conv_flags`` and ``verify_attempts``.
+
+ From ``[neutron]``: ``port_setup_delay``, ``*_network``,
+ ``*_network_security_groups``, ``request_timeout``, ``add_all_ports``
+ and ``dhcpv6_stateful_address_count``.
+
+ From ``[nova]``: ``send_power_notifications``.
+
+ From ``[pxe]``: ``pxe_append_params``, ``default_ephemeral_format``,
+ ``pxe_config_template``, ``uefi_pxe_config_template``,
+ ``pxe_config_template_by_arch``, ``ip_version`` and ``ipxe_use_swift``.
+
+ From ``[redfish]``: ``use_swift``, ``swift_container``,
+ ``swift_object_expiry_timeout`` and ``kernel_append_params``.
diff --git a/releasenotes/notes/remove-locks-first-d12ac27106f800f8.yaml b/releasenotes/notes/remove-locks-first-d12ac27106f800f8.yaml
new file mode 100644
index 000000000..a7e0cb958
--- /dev/null
+++ b/releasenotes/notes/remove-locks-first-d12ac27106f800f8.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Fixes an issue where ``ironic-conductor`` initialization could return a
+ ``NodeNotLocked`` error for requests requiring locks when the conductor
+ was starting. This was due to the conductor removing locks after
+ beginning accepting new work. The lock removal has been moved to after
+ the Database connectivity has been established but before the RPC bus
+ is initialized.
diff --git a/releasenotes/notes/skip-power-sync-for-adoptfail-d2498f1a2e997ed7.yaml b/releasenotes/notes/skip-power-sync-for-adoptfail-d2498f1a2e997ed7.yaml
new file mode 100644
index 000000000..2bff91797
--- /dev/null
+++ b/releasenotes/notes/skip-power-sync-for-adoptfail-d2498f1a2e997ed7.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes the conductor so the power sync operations are not asserted for
+ nodes in the ``adopt failed`` state.
diff --git a/releasenotes/notes/socat-console-port-alloc-ipv6-26760f53f86209d0.yaml b/releasenotes/notes/socat-console-port-alloc-ipv6-26760f53f86209d0.yaml
new file mode 100644
index 000000000..f8087363b
--- /dev/null
+++ b/releasenotes/notes/socat-console-port-alloc-ipv6-26760f53f86209d0.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Fixes the issue that port auto allocation for the socat console failed to
+ correctly identify the availablility of ports under IPv6 networks.
diff --git a/releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml b/releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml
new file mode 100644
index 000000000..9ee2ca70a
--- /dev/null
+++ b/releasenotes/notes/token-reboot-b48b5981a58a30ae.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Removes stale agent tokens when rebooting nodes using API. This prevents
+ lookup failures for nodes that get rebooted between fast-track operations.
diff --git a/releasenotes/notes/unrescue-token-ae664a17343e0610.yaml b/releasenotes/notes/unrescue-token-ae664a17343e0610.yaml
new file mode 100644
index 000000000..7ce3273e7
--- /dev/null
+++ b/releasenotes/notes/unrescue-token-ae664a17343e0610.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Removes stale agent token on rescue and unrescue operations. Previously it
+ would cause subsequent rescue operations to fail.
diff --git a/releasenotes/notes/unsave-power-state-on-adopt-failed-09194c8269c779de.yaml b/releasenotes/notes/unsave-power-state-on-adopt-failed-09194c8269c779de.yaml
new file mode 100644
index 000000000..cebb92035
--- /dev/null
+++ b/releasenotes/notes/unsave-power-state-on-adopt-failed-09194c8269c779de.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Fixes the preservation of potentially incorrect power state
+ information when adoption process fails. Power state is now
+ wiped as part of the failure handling process instead of
+ being preserved.
diff --git a/releasenotes/notes/use-image-format-for-memory-check-25b1f06701ccdc47.yaml b/releasenotes/notes/use-image-format-for-memory-check-25b1f06701ccdc47.yaml
new file mode 100644
index 000000000..9684fb9a9
--- /dev/null
+++ b/releasenotes/notes/use-image-format-for-memory-check-25b1f06701ccdc47.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ If the disk format of the image is provided in the instance_info, skip the
+ memory check if it is set to `raw` and raw image streaming is enabled. That
+ allows to stream raw images provided as URL and not through Glance.
diff --git a/releasenotes/notes/vif-port-attach-17a9993bf5c21d69.yaml b/releasenotes/notes/vif-port-attach-17a9993bf5c21d69.yaml
new file mode 100644
index 000000000..f5b791c0c
--- /dev/null
+++ b/releasenotes/notes/vif-port-attach-17a9993bf5c21d69.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Adds the ability for Ironic to attach a node to a specific port or portgroup.
+ This is accomplished by having the node vif_attach API accept a port_uuid or
+ portgroup_uuid key within vif_info. If one is specified, then Ironic will
+ attempt to attach to the specified port/portgroup. Specifying both returns
+ an error.
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index c1b9c0b46..9fd61cf7a 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -47,10 +47,9 @@ try:
except ImportError:
openstackdocstheme = None
-repository_name = 'openstack/ironic'
-bug_project = 'ironic'
-bug_tag = ''
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
+openstackdocs_repo_name = 'openstack/ironic'
+openstackdocs_use_storyboard = True
+openstackdocs_auto_name = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -105,7 +104,7 @@ exclude_patterns = []
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 3ece1ab9f..6c70ca708 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ ussuri
train
stein
rocky
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 78fd98db6..9f3210299 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,14 +1,16 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
+# Andi Chandler <andi@gowling.com>, 2019. #zanata
+# Andi Chandler <andi@gowling.com>, 2020. #zanata
msgid ""
msgstr ""
"Project-Id-Version: ironic\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-03-21 20:13+0000\n"
+"POT-Creation-Date: 2020-04-30 16:14+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2018-10-11 09:38+0000\n"
+"PO-Revision-Date: 2020-04-07 08:15+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -16,6 +18,21 @@ msgstr ""
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid ""
+"\"Choooooo! Choooooo!\" The Train is now departing the station. The "
+"OpenStack Bare Metal as a service team is proud to announce the release of "
+"Ironic 13.0.0. This release brings the long desired feature of software RAID "
+"configuration, Redfish virtual media boot support, sensor data improvements, "
+"and numerous bug fixes. We hope you enjoy your ride on the OpenStack Ironic "
+"Train."
+msgstr ""
+"\"Choooooo! Choooooo!\" The Train is now departing the station. The "
+"OpenStack Bare Metal as a service team is proud to announce the release of "
+"Ironic 13.0.0. This release brings the long desired feature of software RAID "
+"configuration, Redfish virtual media boot support, sensor data improvements, "
+"and numerous bug fixes. We hope you enjoy your ride on the OpenStack Ironic "
+"Train."
+
+msgid ""
"\"Dynamic drivers\" is a revamp of how drivers are composed. Rather than a "
"huge matrix of hardware drivers supporting different things, now users "
"select a \"hardware type\" for a machine, and can independently change the "
@@ -66,6 +83,9 @@ msgstr "10.1.0"
msgid "10.1.1"
msgstr "10.1.1"
+msgid "10.1.10"
+msgstr "10.1.10"
+
msgid "10.1.2"
msgstr "10.1.2"
@@ -78,12 +98,48 @@ msgstr "10.1.4"
msgid "10.1.6"
msgstr "10.1.6"
+msgid "10.1.7"
+msgstr "10.1.7"
+
+msgid "10.1.8"
+msgstr "10.1.8"
+
+msgid "10.1.9"
+msgstr "10.1.9"
+
msgid "11.0.0"
msgstr "11.0.0"
msgid "11.1.0"
msgstr "11.1.0"
+msgid "11.1.1"
+msgstr "11.1.1"
+
+msgid "11.1.2"
+msgstr "11.1.2"
+
+msgid "11.1.3"
+msgstr "11.1.3"
+
+msgid "11.1.4"
+msgstr "11.1.4"
+
+msgid "12.0.0"
+msgstr "12.0.0"
+
+msgid "12.1.0"
+msgstr "12.1.0"
+
+msgid "12.1.1"
+msgstr "12.1.1"
+
+msgid "12.1.2"
+msgstr "12.1.2"
+
+msgid "12.1.3"
+msgstr "12.1.3"
+
msgid "4.2.2"
msgstr "4.2.2"
diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst
new file mode 100644
index 000000000..52b1355a7
--- /dev/null
+++ b/releasenotes/source/ussuri.rst
@@ -0,0 +1,6 @@
+=============================================
+Ussuri Series (14.0.0 - 15.0.x) Release Notes
+=============================================
+
+.. release-notes::
+ :branch: stable/ussuri
diff --git a/requirements.txt b/requirements.txt
index da87b3037..9f7593b3c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,13 +10,13 @@ WebOb>=1.7.1 # MIT
python-cinderclient!=4.0.0,>=3.3.0 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
-keystoneauth1>=3.18.0 # Apache-2.0
-ironic-lib>=2.17.1 # Apache-2.0
+keystoneauth1>=4.2.0 # Apache-2.0
+ironic-lib>=4.3.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
pytz>=2013.6 # MIT
stevedore>=1.20.0 # Apache-2.0
pysendfile>=2.0.0;sys_platform!='win32' # MIT
-oslo.concurrency>=3.26.0 # Apache-2.0
+oslo.concurrency>=4.2.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
oslo.context>=2.19.2 # Apache-2.0
oslo.db>=4.40.0 # Apache-2.0
@@ -34,14 +34,13 @@ pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
requests>=2.14.2 # Apache-2.0
rfc3986>=0.3.1 # Apache-2.0
jsonpatch!=1.20,>=1.16 # BSD
-WSME>=0.9.3 # MIT
Jinja2>=2.10 # BSD License (3 clause)
keystonemiddleware>=4.17.0 # Apache-2.0
oslo.messaging>=5.29.0 # Apache-2.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
-jsonschema>=2.6.0 # MIT
+jsonschema>=3.2.0 # MIT
psutil>=3.2.2 # BSD
futurist>=1.2.0 # Apache-2.0
-tooz>=1.58.0 # Apache-2.0
+tooz>=2.7.0 # Apache-2.0
openstacksdk>=0.37.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 1be964ad4..c314e21a5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,7 @@ classifier =
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
[files]
data_files =
@@ -120,6 +121,7 @@ ironic.hardware.interfaces.network =
noop = ironic.drivers.modules.network.noop:NoopNetwork
ironic.hardware.interfaces.power =
+ agent = ironic.drivers.modules.agent_power:AgentPower
fake = ironic.drivers.modules.fake:FakePower
ibmc = ironic.drivers.modules.ibmc.power:IBMCPower
idrac = ironic.drivers.modules.drac.power:DracPower
@@ -135,6 +137,7 @@ ironic.hardware.interfaces.power =
ironic.hardware.interfaces.raid =
agent = ironic.drivers.modules.agent:AgentRAID
fake = ironic.drivers.modules.fake:FakeRAID
+ ibmc = ironic.drivers.modules.ibmc.raid:IbmcRAID
idrac = ironic.drivers.modules.drac.raid:DracRAID
idrac-wsman = ironic.drivers.modules.drac.raid:DracWSManRAID
ilo5 = ironic.drivers.modules.ilo.raid:Ilo5RAID
@@ -183,22 +186,10 @@ tag_build =
tag_date = 0
tag_svn_revision = 0
-[compile_catalog]
-directory = ironic/locale
-domain = ironic
-
-[update_catalog]
-domain = ironic
-output_dir = ironic/locale
-input_file = ironic/locale/ironic.pot
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = ironic/locale/ironic.pot
-
[extras]
guru_meditation_reports =
oslo.reports>=1.18.0 # Apache-2.0
i18n =
oslo.i18n>=3.15.3 # Apache-2.0
+devstack =
+ virtualbmc>=1.4.0 # Apache-2.0
diff --git a/test-requirements.txt b/test-requirements.txt
index 497bcdc99..4d3bdfcea 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,13 +1,11 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-hacking>=3.0.0,<3.1.0 # Apache-2.0
+hacking>=3.0.1,<3.1.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
ddt>=1.0.1 # MIT
doc8>=0.6.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD
-mock>=3.0.0 # BSD
-Babel!=2.4.0,>=2.3.4 # BSD
PyMySQL>=0.7.6 # MIT License
iso8601>=0.1.11 # MIT
oslo.reports>=1.18.0 # Apache-2.0
@@ -15,10 +13,9 @@ oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
psycopg2>=2.7.3 # LGPL/ZPL
testtools>=2.2.0 # MIT
-testresources>=2.0.0 # Apache-2.0/BSD
-testscenarios>=0.4 # Apache-2.0/BSD
WebTest>=2.0.27 # MIT
bashate>=0.5.1 # Apache-2.0
+pycodestyle>=2.0.0,<2.6.0 # MIT
flake8-import-order>=0.17.1 # LGPLv3
Pygments>=2.2.0 # BSD
bandit!=1.6.0,>=1.1.0,<2.0.0 # Apache-2.0
diff --git a/tools/bandit.yml b/tools/bandit.yml
index d99694d39..028d1a214 100644
--- a/tools/bandit.yml
+++ b/tools/bandit.yml
@@ -89,7 +89,6 @@ tests:
# (optional) list skipped test IDs here, eg '[B101, B406]':
skips:
- B104
- - B303
- B604
### (optional) plugin settings - some test plugins require configuration data
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index 186507e1d..c82f4716a 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -23,8 +23,8 @@ sudo -H mysqladmin -u root password $DB_ROOT_PW
sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e "
DELETE FROM mysql.user WHERE User='';
FLUSH PRIVILEGES;
- GRANT ALL PRIVILEGES ON *.*
- TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;"
+ CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW';
+ GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;"
# Now create our database.
mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e "
diff --git a/tox.ini b/tox.ini
index 2ce36f9f5..3395e567d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-minversion = 3.1.0
+minversion = 3.2.1
skipsdist = True
envlist = py3,pep8
ignore_basepython_conflict=true
@@ -109,8 +109,10 @@ deps =
commands = {posargs}
[flake8]
+# [E129] visually indented line with same indent as next logical line
+# [E741] ambiguous variable name
# [W503] Line break before binary operator.
-ignore = E129,W503
+ignore = E129,E741,W503
filename = *.py,app.wsgi
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build
import-order-style = pep8
@@ -120,9 +122,19 @@ max-complexity=18
# [H203] Use assertIs(Not)None to check for None.
# [H204] Use assert(Not)Equal to check for equality.
# [H205] Use assert(Greater|Less)(Equal) for comparison.
-# TODO(dtantsur): [H210] Require ‘autospec’, ‘spec’, or ‘spec_set’ in mock.patch/mock.patch.object calls
+# [H210] Require ‘autospec’, ‘spec’, or ‘spec_set’ in mock.patch/mock.patch.object calls
# [H904] Delay string interpolations at logging calls.
-enable-extensions=H106,H203,H204,H205,H904
+enable-extensions=H106,H203,H204,H205,H210,H904
+# TODO(rpittau) remove the ignores below when we're ready to apply H210 to
+# the various modules. This can be done in batches changing the filters.
+per-file-ignores =
+ ironic/cmd/__init__.py:E402
+ ironic/tests/base.py:E402
+ ironic/tests/unit/api/controllers/*:H210
+ ironic/tests/unit/common/*:H210
+ ironic/tests/unit/drivers/modules/test_console_utils.py:H210
+ ironic/tests/unit/drivers/modules/ilo/*:H210
+ ironic/tests/unit/drivers/modules/irmc/*:H210
[hacking]
import_exceptions = testtools.matchers, ironic.common.i18n
@@ -142,4 +154,3 @@ deps =
[testenv:bandit]
deps = -r{toxinidir}/test-requirements.txt
commands = bandit -r ironic -x tests -n5 -ll -c tools/bandit.yml
-
diff --git a/zuul.d/ironic-jobs.yaml b/zuul.d/ironic-jobs.yaml
index b381a1282..91f45a2c7 100644
--- a/zuul.d/ironic-jobs.yaml
+++ b/zuul.d/ironic-jobs.yaml
@@ -4,6 +4,7 @@
description: Base job for devstack/tempest based ironic jobs.
parent: devstack-tempest
nodeset: openstack-single-node-bionic
+ post-run: playbooks/ci-workarounds/get_tftpd.yaml
timeout: 10800
required-projects:
- openstack/ironic
@@ -21,57 +22,61 @@
- ^ironic/tests/.*$
- ^releasenotes/.*$
- ^setup.cfg$
+ - ^test-requirements.txt$
- ^tools/.*$
- ^tox.ini$
vars:
tox_envlist: all
+ tempest_test_timeout: 2400
tempest_test_regex: ironic_tempest_plugin.tests.scenario
tempest_concurrency: 1
devstack_localrc:
DEFAULT_INSTANCE_TYPE: baremetal
FORCE_CONFIG_DRIVE: True
INSTALL_TEMPEST: False # Don't install a tempest package globaly
- TEMPEST_PLUGINS: "{{ ansible_user_dir }}/src/opendev.org/openstack/ironic-tempest-plugin"
VIRT_DRIVER: ironic
- BUILD_TIMEOUT: 720
+ BUILD_TIMEOUT: 1800
IRONIC_BAREMETAL_BASIC_OPS: True
IRONIC_BUILD_DEPLOY_RAMDISK: False
- IRONIC_CALLBACK_TIMEOUT: 600
+ IRONIC_CALLBACK_TIMEOUT: 1800
+ IRONIC_PXE_BOOT_RETRY_TIMEOUT: 900
IRONIC_DEPLOY_DRIVER: ipmi
IRONIC_INSPECTOR_BUILD_RAMDISK: False
- IRONIC_TEMPEST_BUILD_TIMEOUT: 720
+ IRONIC_INSPECTOR_TEMPEST_INTROSPECTION_TIMEOUT: 1200
+ IRONIC_TEMPEST_BUILD_TIMEOUT: 1800
IRONIC_TEMPEST_WHOLE_DISK_IMAGE: False
- IRONIC_VM_COUNT: 1
+ IRONIC_VM_COUNT: 2
IRONIC_VM_EPHEMERAL_DISK: 1
- IRONIC_VM_SPECS_RAM: 2048
+ IRONIC_VM_SPECS_RAM: 3072
IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs'
# NOTE(dtantsur): in some jobs we end up with 12 disks total, so reduce
# each of them. For don't need all 10 GiB for CirrOS anyway.
IRONIC_VM_SPECS_DISK: 4
+ IRONIC_VM_SPECS_CPU: 2
IRONIC_DEFAULT_DEPLOY_INTERFACE: iscsi
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
SERVICE_TIMEOUT: 90
devstack_plugins:
ironic: https://opendev.org/openstack/ironic
+ tempest_plugins:
+ - ironic-tempest-plugin
zuul_copy_output:
'{{ devstack_base_dir }}/ironic-bm-logs': 'logs'
'{{ devstack_base_dir }}/data/networking-generic-switch/netmiko_session.log': 'logs'
devstack_services:
- q-agt: false
- q-dhcp: false
- q-l3: false
- q-meta: false
- q-metering: false
- q-svc: false
- neutron-api: true
- neutron-agent: true
- neutron-dhcp: true
- neutron-l3: true
- neutron-metadata-agent: true
- neutron-metering: true
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ q-svc: true
+ ovn-controller: false
+ ovn-northd: false
+ q-ovn-metadata-agent: false
c-api: False
c-bak: False
@@ -86,7 +91,9 @@
- job:
name: ironic-standalone
- description: Test ironic standalone
+ description:
+ Test ironic standalone configured with ipmi hardware type, iscsi and
+ direct deploy interfaces, rescue enabled.
parent: ironic-base
irrelevant-files:
- ^.*\.rst$
@@ -108,13 +115,75 @@
IRONIC_AUTOMATED_CLEAN_ENABLED: False
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
IRONIC_DEFAULT_RESCUE_INTERFACE: agent
- IRONIC_ENABLED_DEPLOY_INTERFACES: "iscsi,direct,ansible"
+ IRONIC_ENABLED_DEPLOY_INTERFACES: "iscsi,direct"
IRONIC_ENABLED_RESCUE_INTERFACES: "fake,agent,no-rescue"
+ IRONIC_JSON_RPC_AUTH_STRATEGY: 'http_basic'
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_RPC_TRANSPORT: json-rpc
IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_CPU: 1
+ IRONIC_VM_COUNT: 6
+ IRONIC_VM_VOLUME_COUNT: 2
+ # We're using a lot of disk space in this job. Some testing nodes have
+ # a small root partition, so use /opt which is mounted from a bigger
+ # ephemeral partition on such nodes
+ LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
+ SWIFT_ENABLE_TEMPURLS: True
+ SWIFT_TEMPURL_KEY: secretkey
+ devstack_services:
+ n-api: False
+ n-api-meta: False
+ n-cauth: False
+ n-cond: False
+ n-cpu: False
+ n-novnc: False
+ n-obj: False
+ n-sch: False
+ nova: False
+ placement-api: False
+ s-account: True
+ s-container: True
+ s-object: True
+ s-proxy: True
+
+- job:
+ name: ironic-standalone-redfish
+ parent: ironic-base
+ description:
+ Test ironic standalone configured with redfish hardware type, iscsi and
+ direct and ansible deploy interfaces, rescue it's not enabled.
+ required-projects:
+ - openstack/sushy-tools
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^install-guide/.*$
+ - ^ironic/locale/.*$
+ - ^ironic/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^test-requirements.txt$
+ - ^tools/.*$
+ - ^tox.ini$
+ vars:
+ tempest_test_regex: ironic_standalone
+ tempest_concurrency: 2
+ devstack_localrc:
+ FORCE_CONFIG_DRIVE: False
+ IRONIC_AUTOMATED_CLEAN_ENABLED: False
+ IRONIC_DEPLOY_DRIVER: redfish
+ IRONIC_ENABLED_HARDWARE_TYPES: redfish
+ IRONIC_ENABLED_POWER_INTERFACES: redfish
+ IRONIC_ENABLED_MANAGEMENT_INTERFACES: redfish
+ IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
+ IRONIC_ENABLED_DEPLOY_INTERFACES: "iscsi,direct,ansible"
+ IRONIC_RPC_TRANSPORT: json-rpc
+ IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_VM_COUNT: 6
IRONIC_VM_VOLUME_COUNT: 2
+ IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_CPU: 1
# We're using a lot of disk space in this job. Some testing nodes have
# a small root partition, so use /opt which is mounted from a bigger
# ephemeral partition on such nodes
@@ -141,7 +210,6 @@
name: ironic-tempest-partition-bios-redfish-pxe
description: "Deploy ironic node over PXE using BIOS boot mode"
parent: ironic-base
- timeout: 5400
required-projects:
- openstack/sushy-tools
vars:
@@ -164,6 +232,10 @@
SWIFT_ENABLE_TEMPURLS: True
SWIFT_TEMPURL_KEY: secretkey
IRONIC_AUTOMATED_CLEAN_ENABLED: False
+ # Ironic has to master a new image, and this CAN take longer as a
+ # result and makes this job VERY sensitive to heavy disk IO of the
+ # underlying hypervisor/cloud.
+ IRONIC_CALLBACK_TIMEOUT: 800
devstack_services:
s-account: True
s-container: True
@@ -177,8 +249,9 @@
required-projects:
- openstack/ironic-inspector
vars:
- # NOTE(dtantsur): the inspector job includes booting an instance too
- tempest_test_regex: Inspector
+ # NOTE(dtantsur): the inspector job includes booting an instance too.
+ # Excluding the abort tests since it hits "node locked" too often.
+ tempest_test_regex: test_baremetal_introspection
devstack_localrc:
IRONIC_BOOT_MODE: bios
IRONIC_INSPECTOR_MANAGED_BOOT: True
@@ -210,7 +283,6 @@
name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
parent: ironic-base
- timeout: 9600
vars:
devstack_localrc:
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
@@ -228,10 +300,9 @@
s-proxy: True
- job:
- name: ironic-tempest-ipa-wholedisk-bios-pxe_snmp
- description: ironic-tempest-ipa-wholedisk-bios-pxe_snmp
+ name: ironic-tempest-wholedisk-bios-snmp-pxe
+ description: SNMP power, no-op management, netboot and whole disk images.
parent: ironic-base
- timeout: 5400
vars:
devstack_localrc:
IRONIC_ENABLED_HARDWARE_TYPES: snmp
@@ -244,34 +315,28 @@
name: ironic-tempest-ipa-partition-uefi-pxe_ipmitool
description: ironic-tempest-ipa-partition-uefi-pxe_ipmitool
parent: ironic-base
- timeout: 5400
+ nodeset: openstack-single-node-focal
vars:
devstack_localrc:
IRONIC_BOOT_MODE: uefi
- IRONIC_VM_SPECS_RAM: 3096
+ IRONIC_VM_SPECS_RAM: 4096
IRONIC_AUTOMATED_CLEAN_ENABLED: False
IRONIC_DEFAULT_BOOT_OPTION: netboot
- job:
name: ironic-tempest-ipa-partition-pxe_ipmitool
- description: ironic-tempest-ipa-partition-pxe_ipmitool
+ description: ironic-tempest-ipa-partition-pxe_ipmitool that also tests cleaning.
parent: ironic-base
- timeout: 5400
vars:
devstack_localrc:
- # This test runs cleaning by default, and with a larger
- # IPA image means that it takes longer to boot for deploy
- # and cleaning. As such, CI performance variations can
- # cause this job to fail easily due to the extra steps
- # and boot cycle of the cleaning operation.
- IRONIC_TEMPEST_BUILD_TIMEOUT: 850
IRONIC_DEFAULT_BOOT_OPTION: netboot
+ IRONIC_AUTOMATED_CLEAN_ENABLED: True
+
- job:
name: ironic-tempest-bfv
description: ironic-tempest-bfv
parent: ironic-base
- timeout: 9600
vars:
tempest_test_regex: baremetal_boot_from_volume
devstack_localrc:
@@ -322,7 +387,6 @@
name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
- timeout: 5400
vars:
devstack_localrc:
IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE: http
@@ -334,7 +398,6 @@
name: ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
description: ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
parent: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool
- timeout: 5400
vars:
devstack_localrc:
IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE: http
@@ -348,7 +411,6 @@
name: ironic-tempest-functional-python3
description: ironic-tempest-functional-python3
parent: ironic-base
- timeout: 5400
pre-run: playbooks/ci-workarounds/etc-neutron.yaml
vars:
tempest_test_regex: ironic_tempest_plugin.tests.api
@@ -379,19 +441,12 @@
q-meta: False
q-metering: False
q-svc: False
- neutron-api: False
- neutron-agent: False
- neutron-dhcp: False
- neutron-l3: False
- neutron-metadata-agent: False
- neutron-metering: False
- job:
name: ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
description: ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
parent: tempest-multinode-full-py3
pre-run: playbooks/ci-workarounds/pre.yaml
- timeout: 10800
required-projects:
- openstack/ironic
- openstack/ironic-python-agent
@@ -416,7 +471,7 @@
vars:
tox_envlist: all
tempest_concurrency: 3
- tempest_test_regex: "(ironic_tempest_plugin.tests.scenario|test_schedule_to_all_nodes)"
+ tempest_test_regex: "ironic_tempest_plugin.tests.scenario"
tempest_test_timeout: 2400
devstack_localrc:
BUILD_TIMEOUT: 2400
@@ -446,11 +501,12 @@
IRONIC_TEMPEST_BUILD_TIMEOUT: 600
IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True
IRONIC_USE_LINK_LOCAL: True
- IRONIC_VM_COUNT: 6
+ IRONIC_VM_COUNT: 3
IRONIC_VM_EPHEMERAL_DISK: 0
IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs'
- IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_RAM: 512
IRONIC_VM_SPECS_DISK: 4
+ IRONIC_VM_SPECS_CPU: 1
OVS_BRIDGE_MAPPINGS: 'mynetwork:brbm,public:br-infra'
OVS_PHYSICAL_BRIDGE: brbm
PHYSICAL_NETWORK: mynetwork
@@ -458,9 +514,9 @@
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vlan
Q_PLUGIN: ml2
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
SWIFT_ENABLE_TEMPURLS: True
SWIFT_TEMPURL_KEY: secretkey
- TEMPEST_PLUGINS: "{{ ansible_user_dir }}/src/opendev.org/openstack/ironic-tempest-plugin"
TENANT_VLAN_RANGE: 100:150
VIRT_DRIVER: ironic
# We're using a lot of disk space in this job. Some testing nodes have
@@ -471,6 +527,8 @@
devstack_plugins:
ironic: https://opendev.org/openstack/ironic
networking-generic-switch: https://opendev.org/openstack/networking-generic-switch
+ tempest_plugins:
+ - ironic-tempest-plugin
zuul_copy_output:
'{{ devstack_base_dir }}/ironic-bm-logs': 'logs'
'{{ devstack_base_dir }}/data/networking-generic-switch/netmiko_session.log': 'logs'
@@ -506,6 +564,9 @@
q-meta: True
q-metering: True
q-svc: True
+ ovn-controller: False
+ ovn-northd: False
+ q-ovn-metadata-agent: False
rabbit: True
group-vars:
subnode:
@@ -523,14 +584,17 @@
IRONIC_PROVISION_NETWORK_NAME: ironic-provision
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_USE_LINK_LOCAL: True
- IRONIC_VM_COUNT: 6
+ IRONIC_VM_COUNT: 3
IRONIC_VM_EPHEMERAL_DISK: 0
IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs'
IRONIC_VM_NETWORK_BRIDGE: sub1brbm
- IRONIC_VM_SPECS_RAM: 384
+ IRONIC_VM_SPECS_RAM: 512
+ IRONIC_VM_SPECS_DISK: 4
+ IRONIC_VM_SPECS_CPU: 1
OVS_BRIDGE_MAPPINGS: 'mynetwork:sub1brbm,public:br-infra'
OVS_PHYSICAL_BRIDGE: sub1brbm
PHYSICAL_NETWORK: mynetwork
+ Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vlan
VIRT_DRIVER: ironic
PUBLIC_BRIDGE: br-infra
@@ -543,6 +607,9 @@
cinder: False
q-agt: True
+ ovn-controller: False
+ ovn-northd: False
+ q-ovn-metadata-agent: False
n-cpu: True
- job:
@@ -568,14 +635,17 @@
name: ironic-tempest-ipa-partition-uefi-pxe-grub2
description: Ironic tempest scenario test utilizing PXE, UEFI, and Grub2
parent: ironic-base
+ nodeset: openstack-single-node-focal
vars:
devstack_localrc:
IRONIC_ENABLED_HARDWARE_TYPES: ipmi
IRONIC_ENABLED_BOOT_INTERFACES: pxe
IRONIC_IPXE_ENABLED: False
IRONIC_BOOT_MODE: uefi
+ IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_AUTOMATED_CLEAN_ENABLED: False
IRONIC_DEFAULT_BOOT_OPTION: netboot
+ IRONIC_VM_SPECS_RAM: 4096
- job:
# Security testing for known issues
@@ -602,9 +672,7 @@
- job:
name: ironic-tempest-ipa-wholedisk-bios-ipmi-direct-dib
parent: ironic-base
- timeout: 9600
vars:
- tempest_test_timeout: 2400
devstack_services:
s-account: True
s-container: True
@@ -614,23 +682,79 @@
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
IRONIC_DIB_RAMDISK_OS: centos8
IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True
- IRONIC_TEMPEST_BUILD_TIMEOUT: 900
IRONIC_VM_EPHEMERAL_DISK: 0
IRONIC_VM_INTERFACE_COUNT: 1
IRONIC_AUTOMATED_CLEAN_ENABLED: False
SWIFT_ENABLE_TEMPURLS: True
SWIFT_TEMPURL_KEY: secretkey
+- job:
+ name: ironic-tempest-ipxe-ipv6
+ description: ironic-tempest-ipxe-ipv6
+ parent: ironic-base
+ required-projects:
+ - openstack/networking-generic-switch
+ vars:
+ tempest_test_timeout: 2400
+ devstack_services:
+ # NOTE(TheJulia): It seems our devstack plugin does not play well
+ # with multitenancy and the newer neutron service names.
+ q-agt: True
+ q-dhcp: True
+ q-l3: True
+ q-meta: False
+ q-metering: False
+ q-svc: True
+ swift: True
+ devstack_plugins:
+ ironic: https://opendev.org/openstack/ironic
+ networking-generic-switch: https://opendev.org/openstack/networking-generic-switch
+ # NOTE(TheJulia): Nova default behavior is to rely upon stack defaults, v6 needs to
+ # be explicit. This is the best place to wire it in.
+ tempest_test_regex: BaremetalSingleTenant
+ devstack_localrc:
+ IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE: http
+ IPV6_ENABLED: True
+ IP_VERSION: 6
+ SERVICE_IP_VERSION: 6
+ IRONIC_AUTOMATED_CLEAN_ENABLED: False
+ IRONIC_ENABLED_BOOT_INTERFACES: ipxe,pxe
+ IRONIC_DEFAULT_BOOT_INTERFACE: ipxe
+ IRONIC_IPXE_ENABLED: True
+ IRONIC_PROVISION_NETWORK_NAME: ironic-provision
+ OVS_PHYSICAL_BRIDGE: brbm
+ PHYSICAL_NETWORK: mynetwork
+ TENANT_VLAN_RANGE: 100:150
+ IRONIC_ENABLED_NETWORK_INTERFACES: flat,neutron
+ IRONIC_NETWORK_INTERFACE: neutron
+ IRONIC_DEFAILT_DEPLOY_INTERFACE: direct
+ IRONIC_DEFAILT_RESCUE_INTERFACE: no-rescue
+ IRONIC_USE_LINK_LOCAL: True
+ IRONIC_TEMPEST_WHOLE_DISK_IMAGE: True
+ IRONIC_VM_EPHEMERAL_DISK: 0
+ # This will swap and needs to get to tinycore soon.
+ IRONIC_VM_SPECS_CPU: 2
+ Q_PLUGIN: ml2
+ ENABLE_TENANT_VLANS: True
+ Q_ML2_TENANT_NETWORK_TYPE: vlan
+ OVS_BRIDGE_MAPPINGS: "public:br-ex,mynetwork:brbm"
+ USE_PROVIDER_NETWORKING: True
+ PUBLIC_PHYSICAL_NETWORK: public
+ PUBLIC_PROVIDERNET_TYPE: flat
+ Q_USE_PROVIDERNET_FOR_PUBLIC: True
+ BUILD_TIMEOUT: 2000
+ IRONIC_TEMPEST_BUILD_TIMEOUT: 2000
+ IRONIC_PING_TIMEOUT: 1440
+
# NOTE(rpittau): OLD TINYIPA JOBS
# Those jobs are used by other projects, we leave them here until
# we can convert them to dib.
# Used by devstack/ironic/nova/neutron
- job:
- name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
- description: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ name: ironic-tempest-bios-ipmi-direct-tinyipa
+ description: ironic-tempest-wholedisk-bios-ipmi-direct-tinyipa that also tests cleaning.
parent: ironic-base
- timeout: 5400
vars:
devstack_localrc:
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
@@ -647,3 +771,81 @@
s-container: True
s-object: True
s-proxy: True
+
+- job:
+ name: ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ description: Alias for ironic-tempest-wholedisk-bios-ipmi-direct-tinyipa
+ parent: ironic-tempest-bios-ipmi-direct-tinyipa
+
+- job:
+ name: ironic-grenade
+ parent: grenade
+ timeout: 10800
+ irrelevant-files:
+ - ^driver-requirements.txt$
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^install-guide/.*$
+ - ^ironic/locale/.*$
+ - ^ironic/tests/.*$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
+ required-projects:
+ - openstack/grenade
+ - openstack/ironic
+ - openstack/ironic-python-agent
+ - openstack/ironic-python-agent-builder
+ - openstack/ironic-tempest-plugin
+ - openstack/virtualbmc
+ vars:
+ grenade_devstack_localrc:
+ shared:
+ DEFAULT_INSTANCE_TYPE: baremetal
+ FORCE_CONFIG_DRIVE: True
+ INSTALL_TEMPEST: False
+ VIRT_DRIVER: ironic
+ BUILD_TIMEOUT: 1200
+ IRONIC_TEMPEST_BUILD_TIMEOUT: 1200
+ IRONIC_BAREMETAL_BASIC_OPS: True
+ IRONIC_BUILD_DEPLOY_RAMDISK: False
+ IRONIC_CALLBACK_TIMEOUT: 600
+ IRONIC_DEPLOY_DRIVER: ipmi
+ IRONIC_INSPECTOR_BUILD_RAMDISK: False
+ IRONIC_RAMDISK_TYPE: tinyipa
+ IRONIC_TEMPEST_WHOLE_DISK_IMAGE: False
+ IRONIC_VM_COUNT: 6
+ IRONIC_VM_EPHEMERAL_DISK: 1
+ IRONIC_VM_SPECS_RAM: 384
+ IRONIC_DEFAULT_BOOT_OPTION: netboot
+ IRONIC_AUTOMATED_CLEAN_ENABLED: False
+ IRONIC_REQUIRE_AGENT_TOKEN: False
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ EBTABLES_RACE_FIX: True
+ LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
+ old:
+ IRONIC_VM_LOG_DIR: '{{ devstack_bases.old }}/ironic-bm-logs'
+ grenade_localrc:
+ BASE_RUN_SMOKE: False
+ grenade_tempest_concurrency: 1
+ grenade_test_timeout: 2600
+ devstack_plugins:
+ ironic: https://opendev.org/openstack/ironic
+ devstack_services:
+ c-api: False
+ c-bak: False
+ c-sch: False
+ c-vol: False
+ cinder: False
+ ir-api: True
+ ir-cond: True
+ tempest_plugins:
+ - ironic-tempest-plugin
+ tempest_test_regex: ironic_tempest_plugin.tests.scenario
+ tox_envlist: all
+ tempest_concurrency: 1
+ zuul_copy_output:
+ '{{ devstack_bases.old }}/ironic-bm-logs': logs
diff --git a/zuul.d/legacy-ironic-jobs.yaml b/zuul.d/legacy-ironic-jobs.yaml
index 1ae8342b4..cfc2ca267 100644
--- a/zuul.d/legacy-ironic-jobs.yaml
+++ b/zuul.d/legacy-ironic-jobs.yaml
@@ -1,31 +1,3 @@
-- job:
- name: legacy-ironic-dsvm-base
- parent: legacy-dsvm-base
- irrelevant-files:
- - ^driver-requirements.txt$
- - ^.*\.rst$
- - ^api-ref/.*$
- - ^doc/.*$
- - ^install-guide/.*$
- - ^ironic/locale/.*$
- - ^ironic/tests/.*$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tools/.*$
- - ^tox.ini$
- # NOTE: When adding to 'required-projects' also need to add a corresponding
- # "export PROJECTS=..." line in all the playbooks/legacy/*/run.yaml files
- required-projects:
- - openstack/ironic
- - openstack/ironic-lib
- - openstack/ironic-python-agent
- - openstack/ironic-python-agent-builder
- - openstack/ironic-tempest-plugin
- - openstack/python-ironicclient
- - openstack/virtualbmc
- pre-run: playbooks/legacy/ironic-dsvm-base/pre.yaml
- post-run: playbooks/legacy/ironic-dsvm-base/post.yaml
-
# TODO(TheJulia): When we migrate to a non-legacy job, we will need to set the BUILD_TIMEOUT
# and the DEVSTACK_GATE_TEMPEST_BAREMETAL_BUILD_TIMEOUT to 1200 seconds to prevent
# needless CI job timeouts as the scale of the job is greater than a normal test jobs.
@@ -57,22 +29,6 @@
pre-run: playbooks/legacy/ironic-dsvm-base-multinode/pre.yaml
post-run: playbooks/legacy/ironic-dsvm-base-multinode/post.yaml
-# TODO(TheJulia): When we migrate to a non-legacy job, we will need to set the BUILD_TIMEOUT
-# and the DEVSTACK_GATE_TEMPEST_BAREMETAL_BUILD_TIMEOUT to 1200 seconds to prevent
-# needless CI job timeouts as the scale of the job is greater than a normal test jobs.
-- job:
- name: ironic-grenade-dsvm
- parent: legacy-ironic-dsvm-base
- run: playbooks/legacy/grenade-dsvm-ironic/run.yaml
- timeout: 10800
- required-projects:
- - openstack/grenade
- - openstack/devstack-gate
- - openstack/ironic
- - openstack/ironic-lib
- - openstack/ironic-python-agent
- - openstack/python-ironicclient
- - openstack/virtualbmc
- job:
name: ironic-grenade-dsvm-multinode-multitenant
@@ -88,4 +44,3 @@
- openstack/networking-generic-switch
- openstack/python-ironicclient
- openstack/virtualbmc
-
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index d63bd36a3..75b72448b 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -3,7 +3,7 @@
- check-requirements
- openstack-cover-jobs
- openstack-lower-constraints-jobs
- - openstack-python3-ussuri-jobs
+ - openstack-python3-victoria-jobs
- periodic-stable-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
@@ -11,30 +11,33 @@
jobs:
- ironic-tox-unit-with-driver-libs
- ironic-standalone
+ - ironic-standalone-redfish:
+ voting: false
- ironic-tempest-functional-python3
- - ironic-grenade-dsvm
+ - ironic-grenade
# Temporary disable voting because of end of cycle CI instability.
- ironic-grenade-dsvm-multinode-multitenant:
voting: false
- ironic-tempest-partition-bios-redfish-pxe
- ironic-tempest-partition-uefi-redfish-vmedia
+ - ironic-tempest-wholedisk-bios-snmp-pxe
- ironic-tempest-ipa-partition-pxe_ipmitool
- ironic-tempest-ipa-partition-uefi-pxe_ipmitool
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
- - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ - ironic-tempest-bios-ipmi-direct-tinyipa
- ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
- ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
- ironic-tempest-bfv
- ironic-tempest-ipa-partition-uefi-pxe-grub2
+ - ironic-tempest-ipxe-ipv6:
+ voting: false
- metalsmith-integration-glance-localboot-centos7
# Non-voting jobs
- ironic-tox-bandit:
voting: false
- - ironic-tempest-ipa-wholedisk-bios-pxe_snmp:
- voting: false
- ironic-inspector-tempest:
voting: false
- - ironic-inspector-tempest-managed:
+ - ironic-inspector-tempest-managed-non-standalone:
voting: false
- ironic-inspector-tempest-partition-bios-redfish-vmedia:
voting: false
@@ -50,15 +53,16 @@
- ironic-tox-unit-with-driver-libs
- ironic-standalone
- ironic-tempest-functional-python3
- - ironic-grenade-dsvm
+ - ironic-grenade
# removing from voting due to end of cycle gate instability.
# - ironic-grenade-dsvm-multinode-multitenant
- ironic-tempest-partition-bios-redfish-pxe
- ironic-tempest-partition-uefi-redfish-vmedia
+ - ironic-tempest-wholedisk-bios-snmp-pxe
- ironic-tempest-ipa-partition-pxe_ipmitool
- ironic-tempest-ipa-partition-uefi-pxe_ipmitool
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
- - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa
+ - ironic-tempest-bios-ipmi-direct-tinyipa
- ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-indirect
- ironic-tempest-ipa-partition-bios-agent_ipmitool-indirect
- ironic-tempest-bfv