summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2022-07-01 10:07:15 -0500
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2022-07-01 16:34:09 +0000
commitcff8a8b47acf7048ad08bd121e677fb86e73635b (patch)
tree011f38ddb27b3df74d78115ebe59be951558edea
parent15d691e3b0b32c67b0589665b49e9d2755296d1b (diff)
downloadcloud-init-git-cff8a8b47acf7048ad08bd121e677fb86e73635b.tar.gz
22.2-64-g1fcd55d6-0ubuntu1~22.10.1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.github/workflows/check_format.yml51
-rw-r--r--.pylintrc3
-rw-r--r--.travis.yml20
-rw-r--r--.vscode/extensions.json5
-rw-r--r--CONTRIBUTING.rst4
-rw-r--r--ChangeLog49
-rw-r--r--Makefile9
-rw-r--r--README.md2
-rw-r--r--bash_completion/cloud-init4
-rw-r--r--cloudinit/analyze/show.py31
-rw-r--r--cloudinit/apport.py12
-rwxr-xr-xcloudinit/cmd/devel/hotplug_hook.py4
-rwxr-xr-xcloudinit/cmd/devel/logs.py15
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py10
-rwxr-xr-xcloudinit/cmd/main.py10
-rwxr-xr-xcloudinit/cmd/query.py1
-rw-r--r--cloudinit/config/cc_apt_configure.py2
-rw-r--r--cloudinit/config/cc_chef.py25
-rw-r--r--cloudinit/config/cc_debug.py2
-rw-r--r--cloudinit/config/cc_growpart.py2
-rw-r--r--cloudinit/config/cc_keyboard.py6
-rw-r--r--cloudinit/config/cc_ntp.py13
-rw-r--r--cloudinit/config/cc_phone_home.py15
-rw-r--r--cloudinit/config/cc_puppet.py1
-rw-r--r--cloudinit/config/cc_rsyslog.py4
-rw-r--r--cloudinit/config/cc_runcmd.py13
-rw-r--r--cloudinit/config/cc_set_hostname.py6
-rw-r--r--cloudinit/config/cc_snap.py15
-rw-r--r--cloudinit/config/cc_spacewalk.py2
-rw-r--r--cloudinit/config/cc_ssh.py2
-rw-r--r--cloudinit/config/cc_ssh_import_id.py2
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py4
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py36
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py4
-rw-r--r--cloudinit/config/cc_update_hostname.py7
-rw-r--r--cloudinit/config/cc_yum_add_repo.py20
-rw-r--r--cloudinit/config/schema.py34
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json117
-rw-r--r--cloudinit/config/schemas/versions.schema.cloud-config.json2
-rw-r--r--cloudinit/distros/__init__.py3
-rw-r--r--cloudinit/distros/bsd.py11
-rw-r--r--cloudinit/distros/netbsd.py9
-rw-r--r--cloudinit/distros/openmandriva.py14
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py14
-rw-r--r--cloudinit/distros/ubuntu.py10
-rw-r--r--cloudinit/dmi.py66
-rw-r--r--cloudinit/handlers/jinja_template.py17
-rw-r--r--cloudinit/helpers.py2
-rw-r--r--cloudinit/net/__init__.py289
-rw-r--r--cloudinit/net/dhcp.py114
-rw-r--r--cloudinit/net/ephemeral.py445
-rw-r--r--cloudinit/net/netplan.py30
-rw-r--r--cloudinit/net/network_state.py14
-rw-r--r--cloudinit/net/networkd.py9
-rw-r--r--cloudinit/net/sysconfig.py3
-rw-r--r--cloudinit/reporting/__init__.py20
-rw-r--r--cloudinit/reporting/events.py9
-rw-r--r--cloudinit/reporting/handlers.py87
-rw-r--r--cloudinit/safeyaml.py2
-rw-r--r--cloudinit/serial.py46
-rw-r--r--cloudinit/sources/DataSourceAliYun.py12
-rw-r--r--cloudinit/sources/DataSourceAzure.py158
-rw-r--r--cloudinit/sources/DataSourceBigstep.py40
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py6
-rw-r--r--cloudinit/sources/DataSourceEc2.py26
-rw-r--r--cloudinit/sources/DataSourceGCE.py7
-rw-r--r--cloudinit/sources/DataSourceHetzner.py3
-rw-r--r--cloudinit/sources/DataSourceLXD.py34
-rw-r--r--cloudinit/sources/DataSourceOVF.py15
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py3
-rw-r--r--cloudinit/sources/DataSourceOracle.py174
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py31
-rw-r--r--cloudinit/sources/DataSourceScaleway.py21
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py9
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py3
-rw-r--r--cloudinit/sources/DataSourceVMware.py20
-rw-r--r--cloudinit/sources/__init__.py50
-rw-r--r--cloudinit/sources/helpers/cloudsigma.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py6
-rw-r--r--cloudinit/sources/helpers/vultr.py3
-rw-r--r--cloudinit/stages.py2
-rw-r--r--cloudinit/templater.py21
-rw-r--r--cloudinit/url_helper.py10
-rw-r--r--cloudinit/util.py132
-rw-r--r--config/cloud.cfg.tmpl18
-rw-r--r--debian/changelog102
-rw-r--r--debian/control2
-rw-r--r--debian/gbp.conf12
-rwxr-xr-xdebian/gbp_format_changelog84
-rw-r--r--doc/examples/cloud-config-reporting.txt2
-rw-r--r--doc/rtd/topics/datasources/ec2.rst3
-rw-r--r--doc/rtd/topics/datasources/gce.rst2
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst2
-rw-r--r--doc/rtd/topics/datasources/vmware.rst2
-rw-r--r--doc/rtd/topics/faq.rst153
-rw-r--r--doc/rtd/topics/logging.rst174
-rw-r--r--doc/rtd/topics/module_creation.rst2
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst2
-rw-r--r--doc/rtd/topics/network-config.rst14
-rw-r--r--integration-requirements.txt2
-rwxr-xr-xpackages/bddeb12
-rw-r--r--packages/debian/control.in3
-rw-r--r--packages/pkg-deps.json3
-rw-r--r--packages/redhat/cloud-init.spec.in7
-rw-r--r--pyproject.toml105
-rw-r--r--requirements.txt5
-rw-r--r--setup.py10
-rw-r--r--systemd/cloud-init-generator.tmpl2
-rw-r--r--systemd/cloud-init.service.tmpl2
-rw-r--r--tests/integration_tests/bugs/test_lp1835584.py2
-rw-r--r--tests/integration_tests/clouds.py13
-rw-r--r--tests/integration_tests/cmd/test_status.py3
-rw-r--r--tests/integration_tests/datasources/test_ec2_ipv6.py12
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py4
-rw-r--r--tests/integration_tests/datasources/test_network_dependency.py4
-rw-r--r--tests/integration_tests/datasources/test_oci_networking.py118
-rw-r--r--tests/integration_tests/instances.py12
-rw-r--r--tests/integration_tests/integration_settings.py5
-rw-r--r--tests/integration_tests/modules/test_cli.py20
-rw-r--r--tests/integration_tests/modules/test_ubuntu_drivers.py37
-rw-r--r--tests/integration_tests/test_paths.py66
-rw-r--r--tests/unittests/cmd/devel/test_logs.py191
-rw-r--r--tests/unittests/cmd/devel/test_net_convert.py187
-rw-r--r--tests/unittests/cmd/test_clean.py4
-rw-r--r--tests/unittests/cmd/test_cloud_id.py5
-rw-r--r--tests/unittests/cmd/test_main.py7
-rw-r--r--tests/unittests/config/test_cc_ntp.py9
-rw-r--r--tests/unittests/config/test_cc_phone_home.py87
-rw-r--r--tests/unittests/config/test_cc_rh_subscription.py2
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py40
-rw-r--r--tests/unittests/config/test_cc_snap.py187
-rw-r--r--tests/unittests/config/test_cc_ubuntu_drivers.py454
-rw-r--r--tests/unittests/config/test_cc_users_groups.py4
-rw-r--r--tests/unittests/config/test_cc_yum_add_repo.py8
-rw-r--r--tests/unittests/config/test_schema.py83
-rw-r--r--tests/unittests/distros/test_networking.py2
-rw-r--r--tests/unittests/distros/test_sysconfig.py4
-rw-r--r--tests/unittests/helpers.py48
-rw-r--r--tests/unittests/net/test_dhcp.py36
-rw-r--r--tests/unittests/net/test_init.py32
-rw-r--r--tests/unittests/reporting/test_reporting.py (renamed from tests/unittests/test_reporting.py)117
-rw-r--r--tests/unittests/reporting/test_reporting_hyperv.py (renamed from tests/unittests/test_reporting_hyperv.py)0
-rw-r--r--tests/unittests/reporting/test_webhook_handler.py120
-rw-r--r--tests/unittests/sources/test_aliyun.py2
-rw-r--r--tests/unittests/sources/test_azure.py892
-rw-r--r--tests/unittests/sources/test_bigstep.py46
-rw-r--r--tests/unittests/sources/test_cloudsigma.py8
-rw-r--r--tests/unittests/sources/test_cloudstack.py5
-rw-r--r--tests/unittests/sources/test_digitalocean.py2
-rw-r--r--tests/unittests/sources/test_ec2.py13
-rw-r--r--tests/unittests/sources/test_gce.py4
-rw-r--r--tests/unittests/sources/test_hetzner.py2
-rw-r--r--tests/unittests/sources/test_init.py33
-rw-r--r--tests/unittests/sources/test_lxd.py77
-rw-r--r--tests/unittests/sources/test_opennebula.py8
-rw-r--r--tests/unittests/sources/test_openstack.py8
-rw-r--r--tests/unittests/sources/test_oracle.py758
-rw-r--r--tests/unittests/sources/test_ovf.py3
-rw-r--r--tests/unittests/sources/test_scaleway.py2
-rw-r--r--tests/unittests/sources/test_smartos.py12
-rw-r--r--tests/unittests/sources/test_upcloud.py4
-rw-r--r--tests/unittests/sources/test_vmware.py4
-rw-r--r--tests/unittests/sources/test_vultr.py23
-rw-r--r--tests/unittests/test_apport.py23
-rw-r--r--tests/unittests/test_cli.py483
-rw-r--r--tests/unittests/test_dmi.py4
-rw-r--r--tests/unittests/test_net.py16
-rw-r--r--tests/unittests/test_net_activators.py14
-rw-r--r--tests/unittests/test_persistence.py3
-rw-r--r--tests/unittests/test_url_helper.py20
-rw-r--r--tests/unittests/test_util.py79
-rw-r--r--tests/unittests/util.py3
-rw-r--r--tools/.github-cla-signers5
-rwxr-xr-xtools/read-version16
-rwxr-xr-xtools/render-cloudcfg1
-rw-r--r--tox.ini63
176 files changed, 4860 insertions, 2990 deletions
diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml
new file mode 100644
index 00000000..7b52d278
--- /dev/null
+++ b/.github/workflows/check_format.yml
@@ -0,0 +1,51 @@
+name: Lint Tests
+on:
+ pull_request:
+
+concurrency:
+ group: 'ci-${{ github.workflow }}-${{ github.ref }}'
+ cancel-in-progress: true
+defaults:
+ run:
+ shell: sh -ex {0}
+
+jobs:
+ check_format:
+ strategy:
+ fail-fast: false
+ matrix:
+ env: [flake8, mypy, pylint, black, isort]
+ lint-with:
+ - {tip-versions: false, os: ubuntu-18.04}
+ - {tip-versions: true, os: ubuntu-latest}
+ name: ${{ matrix.lint-with.tip-versions && 'Check format (tip)' || 'Check format (pinned)' }}
+ runs-on: ${{ matrix.lint-with.os }}
+ steps:
+ - name: "Checkout #1"
+ uses: actions/checkout@v3.0.0
+
+ - name: "Checkout #2 (for tools/read-version)"
+ run: |
+ git fetch --unshallow
+ git remote add upstream https://git.launchpad.net/cloud-init
+
+ - name: Dependencies
+ run: |
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -qy update
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -qy install tox
+
+ - name: Print version
+ run: python3 --version
+
+ - name: Test
+ if: matrix.lint-with.tip-versions
+ env:
+ # matrix env: not to be confused w/environment variables or testenv
+ TOXENV: ${{ matrix.env }}
+ run: tox
+ - name: Test (tip versions)
+ if: matrix.lint-with.tip-versions
+ continue-on-error: true
+ env:
+ TOXENV: tip-${{ matrix.env }}
+ run: tox
diff --git a/.pylintrc b/.pylintrc
index 3edb0092..ea686815 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -25,8 +25,9 @@ jobs=4
# W0703(broad-except)
# W1401(anomalous-backslash-in-string)
# W1514(unspecified-encoding)
+# E0012(bad-option-value)
-disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514
+disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514, E0012
[REPORTS]
diff --git a/.travis.yml b/.travis.yml
index a529ace1..fbb0b3ef 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -28,6 +28,9 @@ install:
# Required so `git describe` will definitely find a tag; see
# https://github.com/travis-ci/travis-ci/issues/7422
- git fetch --unshallow
+ # Not pinning setuptools can cause failures on python 3.7 and 3.8 builds
+ # See https://github.com/pypa/setuptools/issues/3118
+ - pip install setuptools==59.6.0
- pip install tox
script:
@@ -130,27 +133,20 @@ matrix:
TOXENV=lowest-supported
PYTEST_ADDOPTS=-v # List all tests run by pytest
dist: bionic
- - python: 3.6
- env: TOXENV=flake8
- - python: 3.6
- env: TOXENV=mypy
- - python: 3.6
- env: TOXENV=pylint
- - python: 3.6
- env: TOXENV=black
- - python: 3.6
- env: TOXENV=isort
- python: 3.7
env: TOXENV=doc
install:
- git fetch --unshallow
+ # Not pinning setuptools can cause failures on python 3.7 and 3.8 builds
+ # See https://github.com/pypa/setuptools/issues/3118
+ - pip install setuptools==59.6.0
- sudo apt-get install lintian
- pip install tox
script:
- - make check_spelling
- - tox
+ - make check_spelling && tox
# Test all supported Python versions (but at the end, so we schedule
# longer-running jobs first)
+ - python: 3.11-dev
- python: "3.10"
- python: 3.9
- python: 3.8
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
new file mode 100644
index 00000000..6098b6eb
--- /dev/null
+++ b/.vscode/extensions.json
@@ -0,0 +1,5 @@
+{
+ "recommendations": [
+ "redhat.vscode-yaml"
+ ]
+}
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 819572c6..50ca7cfb 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -14,7 +14,7 @@ Summary
Before any pull request can be accepted, you must do the following:
* Sign the Canonical `contributor license agreement`_
-* Add yourself (alphabetically) to the in-repository list that we use
+* Add your Github username (alphabetically) to the in-repository list that we use
to track CLA signatures:
`tools/.github-cla-signers`_
* Add or update any `unit tests`_ accordingly
@@ -80,7 +80,7 @@ Follow these steps to submit your first pull request to cloud-init:
* Read through the cloud-init `Code Review Process`_, so you understand
how your changes will end up in cloud-init's codebase.
-* Submit your first cloud-init pull request, adding yourself to the
+* Submit your first cloud-init pull request, adding your Github username to the
in-repository list that we use to track CLA signatures:
`tools/.github-cla-signers`_
diff --git a/ChangeLog b/ChangeLog
index a90a8986..d23d129d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -21,16 +21,15 @@
- tests: cc_set_passoword update for systemd, non-systemd distros (#1449)
- Fix bug in url_helper/dual_stack() logging (#1426)
- schema: render schema paths from _CustomSafeLoaderWithMarks (#1391)
- (GH: SC-929)
- testing: Make integration tests kinetic friendly (#1441)
- Handle error if SSH service no present. (#1422)
- [Alberto Contreras] (GH: #1969526)
+ [Alberto Contreras] (LP: #1969526)
- Fix network-manager activator availability and order (#1438)
- sources/azure: remove reprovisioning marker (#1414) [Chris Patterson]
- upstart: drop vestigial support for upstart (#1421)
- testing: Ensure NoCloud detected in test (#1439)
- Update .github-cla-signers kallioli [Kevin Allioli]
- - Consistently strip top-level network key (#1417) (GH: #1906187)
+ - Consistently strip top-level network key (#1417) (LP: #1906187)
- testing: Fix LXD VM metadata test (#1430)
- testing: Add NoCloud setup for NoCloud test (#1425)
- Update linters and adapt code for compatibility (#1434) [Paride Legovini]
@@ -43,9 +42,9 @@
- tests: verify_ordered_items fallback to re.escape if needed (#1420)
- Misc module cleanup (#1418)
- docs: Fix doc warnings and enable errors (#1419)
- [Alberto Contreras] (GH: #1876341)
+ [Alberto Contreras] (LP: #1876341)
- Refactor cloudinit.sources.NetworkConfigSource to enum (#1413)
- [Alberto Contreras] (GH: #1874875)
+ [Alberto Contreras] (LP: #1874875)
- Don't fail if IB and Ethernet devices 'collide' (#1411)
- Use cc_* module meta defintion over hardcoded vars (SC-888) (#1385)
- Fix cc_rsyslog.py initialization (#1404) [Alberto Contreras]
@@ -57,13 +56,13 @@
- Allow growpart to resize encrypted partitions (#1316)
- Fix typo in integration_test.rst (#1405) [Alberto Contreras]
- cloudinit.net refactor: apply_network_config_names (#1388)
- [Alberto Contreras] (GH: #1884602)
+ [Alberto Contreras] (LP: #1884602)
- tests/azure: add fixtures for hardcoded paths (markers and data_dir)
(#1399) [Chris Patterson]
- testing: Add responses workaround for focal/impish (#1403)
- cc_ssh_import_id: fix is_key_in_nested_dict to avoid early False
- Fix ds-identify not detecting NoCloud seed in config (#1381)
- (GH: #1876375)
+ (LP: #1876375)
- sources/azure: retry dhcp for failed processes (#1401) [Chris Patterson]
- Move notes about refactorization out of CONTRIBUTING.rst (#1389)
- Shave ~8ms off generator runtime (#1387)
@@ -78,28 +77,27 @@
- sources/azure: only wait for primary nic to be attached during restore
(#1378) [Anh Vo]
- cc_ntp: migrated legacy schema to cloud-init-schema.json (#1384)
- (GH: SC-803)
- Network functions refactor and bugfixes (#1383)
- schema: add JSON defs for modules cc_users_groups (#1379)
- (GH: SC-928, SC-846, SC-897, #1858930)
+ (LP: #1858930)
- Fix doc typo (#1382) [Alberto Contreras]
- Add support for dual stack IPv6/IPv4 IMDS to Ec2 (#1160)
- - Fix KeyError when rendering sysconfig IPv6 routes (#1380) (GH: #1958506)
+ - Fix KeyError when rendering sysconfig IPv6 routes (#1380) (LP: #1958506)
- Return a namedtuple from subp() (#1376)
- Mypy stubs and other tox maintenance (SC-920) (#1374)
- Distro Compatibility Fixes (#1375)
- Pull in Gentoo patches (#1372)
- schema: add json defs for modules U-Z (#1360)
- (GH: #1858928, #1858929, #1858931, #1858932)
+ (LP: #1858928, #1858929, #1858931, #1858932)
- util: atomically update sym links to avoid Suppress FileNotFoundError
- when reading status (#1298) [Adam Collard] (GH: LP:1962150)
+ when reading status (#1298) [Adam Collard] (LP: #1962150)
- schema: add json defs for modules scripts-timezone (SC-801) (#1365)
- docs: Add first tutorial (SC-900) (#1368)
- BUG 1473527: module ssh-authkey-fingerprints fails Input/output error…
- (#1340) [Andrew Lee] (GH: #1473527)
+ (#1340) [Andrew Lee] (LP: #1473527)
- add arch hosts template (#1371)
- ds-identify: detect LXD for VMs launched from host with > 5.10 kernel
- (#1370) (GH: #1968085)
+ (#1370) (LP: #1968085)
- Support EC2 tags in instance metadata (#1309) [Eduardo Dobay]
- schema: add json defs for modules e-install (SC-651) (#1366)
- Improve "(no_create_home|system): true" test (#1367) [Jeffrey 'jf' Lim]
@@ -122,7 +120,7 @@
- testing: Add missing is_FreeBSD mock to networking test (#1353)
- Add --no-update to add-apt-repostory call (SC-880) (#1337)
- schema: add json defs for modules K-L (#1321)
- (GH: #1858899, #1858900, #1858901, #1858902)
+ (LP: #1858899, #1858900, #1858901, #1858902)
- docs: Re-order readthedocs install (#1354)
- Stop cc_ssh_authkey_fingerprints from ALWAYS creating home (#1343)
[Jeffrey 'jf' Lim]
@@ -131,14 +129,14 @@
- sources/azure: move get_ip_from_lease_value out of shim (#1324)
[Chris Patterson]
- Fix cloud-init status --wait when no datasource found (#1349)
- (GH: #1966085)
+ (LP: #1966085)
- schema: add JSON defs for modules resize-salt (SC-654) (#1341)
- Add myself as a future contributor (#1345) [Neal Gompa (ニール・ゴンパ)]
- Update .github-cla-signers (#1342) [Jeffrey 'jf' Lim]
- add Requires=cloud-init-hotplugd.socket in cloud-init-hotplugd.service
file (#1335) [yangzz-97]
- Fix sysconfig render when set-name is missing (#1327)
- [Andrew Kutz] (GH: #1855945)
+ [Andrew Kutz] (LP: #1855945)
- Refactoring helper funcs out of NetworkState (#1336) [Andrew Kutz]
- url_helper: add tuple support for readurl timeout (#1328)
[Chris Patterson]
@@ -162,7 +160,7 @@
- Doc cleanups (#1317)
- docs improvements (#1312)
- add support for jinja do statements, add unit test (#1314)
- [Paul Bruno] (GH: #1962759)
+ [Paul Bruno] (LP: #1962759)
- sources/azure: prevent tight loops for DHCP retries (#1285)
[Chris Patterson]
- net/dhcp: surface type of DHCP lease failure to caller (#1276)
@@ -177,7 +175,7 @@
[Adam Collard]
- check for existing symlink while force creating symlink (#1281)
[Shreenidhi Shedi]
- - Do not silently ignore integer uid (#1280) (GH: #1875772)
+ - Do not silently ignore integer uid (#1280) (LP: #1875772)
- tests: create a IPv4/IPv6 VPC in Ec2 integration tests (#1291)
- Integration test fix ppa (#1296)
- tests: on official EC2. cloud-id actually startswith aws not ec2 (#1289)
@@ -319,8 +317,7 @@
- sources/azure: remove unnecessary hostname bounce (#1143)
[Chris Patterson]
- find_devs/openbsd: accept ISO on disk (#1132)
- [Gonéri Le Bouder] (GH:
- https://github.com/ContainerCraft/kmi/issues/12)
+ [Gonéri Le Bouder]
- Improve error log message when mount failed (#1140) [Ksenija Stanojevic]
- add KsenijaS as a contributor (#1145) [Ksenija Stanojevic]
- travis - don't run integration tests if no deb (#1139)
@@ -328,14 +325,14 @@
- testing: Add deterministic test id (#1138)
- mock sleep() in azure test (#1137)
- Add miraclelinux support (#1128) [Haruki TSURUMOTO]
- - docs: Make MACs lowercase in network config (#1135) (GH: #1876941)
+ - docs: Make MACs lowercase in network config (#1135) (LP: #1876941)
- Add Strict Metaschema Validation (#1101)
- update dead link (#1133)
- cloudinit/net: handle two different routes for the same ip (#1124)
[Emanuele Giuseppe Esposito]
- docs: pin mistune dependency (#1134)
- Reorganize unit test locations under tests/unittests (#1126)
- - Fix exception when no activator found (#1129) (GH: #1948681)
+ - Fix exception when no activator found (#1129) (LP: #1948681)
- jinja: provide and document jinja-safe key aliases in instance-data
(SC-622) (#1123)
- testing: Remove date from final_message test (SC-638) (#1127)
@@ -352,7 +349,7 @@
- lxd: add preference for LXD cloud-init.* config keys over user keys
(#1108)
- VMware: source /etc/network/interfaces.d/* on Debian
- [chengcheng-chcheng] (GH: #1950136)
+ [chengcheng-chcheng] (LP: #1950136)
- Add cjp256 as contributor (#1109) [Chris Patterson]
- integration_tests: Ensure log directory exists before symlinking to it
(#1110)
@@ -362,8 +359,8 @@
- tests: specialize lxd_discovery test for lxd_vm vendordata (#1106)
- Add convenience symlink to integration test output (#1105)
- Fix for set-name bug in networkd renderer (#1100)
- [Andrew Kutz] (GH: #1949407)
- - Wait for apt lock (#1034) (GH: #1944611)
+ [Andrew Kutz] (LP: #1949407)
+ - Wait for apt lock (#1034) (LP: #1944611)
- testing: stop chef test from running on openstack (#1102)
- alpine.py: add options to the apk upgrade command (#1089) [dermotbradley]
diff --git a/Makefile b/Makefile
index 9584ccc1..72faa04a 100644
--- a/Makefile
+++ b/Makefile
@@ -128,13 +128,20 @@ deb-src:
doc:
tox -e doc
+fmt:
+ tox -e do_format && tox -e check_format
+
+fmt-tip:
+ tox -e do_format_tip && tox -e check_format_tip
+
# Spell check && filter false positives
_CHECK_SPELLING := find doc -type f -exec spellintian {} + | \
grep -v -e 'doc/rtd/topics/cli.rst: modules modules' \
-e 'doc/examples/cloud-config-mcollective.txt: WARNING WARNING' \
-e 'doc/examples/cloud-config-power-state.txt: Bye Bye' \
-e 'doc/examples/cloud-config.txt: Bye Bye' \
- -e 'doc/rtd/topics/cli.rst: DOCS DOCS'
+ -e 'doc/rtd/topics/cli.rst: DOCS DOCS' \
+ -e 'dependant'
# For CI we require a failing return code when spellintian finds spelling errors
diff --git a/README.md b/README.md
index f2a745f8..0a4d36c6 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index 1eceb472..579005d2 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -10,7 +10,7 @@ _cloudinit_complete()
cur_word="${COMP_WORDS[COMP_CWORD]}"
prev_word="${COMP_WORDS[COMP_CWORD-1]}"
- subcmds="analyze clean collect-logs devel dhclient-hook features init modules query single status"
+ subcmds="analyze clean collect-logs devel dhclient-hook features init modules query schema single status"
base_params="--help --file --version --debug --force"
case ${COMP_CWORD} in
1)
@@ -28,7 +28,7 @@ _cloudinit_complete()
COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
;;
devel)
- COMPREPLY=($(compgen -W "--help hotplug-hook schema net-convert" -- $cur_word))
+ COMPREPLY=($(compgen -W "--help hotplug-hook net-convert" -- $cur_word))
;;
dhclient-hook)
COMPREPLY=($(compgen -W "--help up down" -- $cur_word))
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index abfa0913..04621f12 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -8,7 +8,6 @@ import base64
import datetime
import json
import os
-import sys
import time
from cloudinit import subp, util
@@ -257,25 +256,21 @@ def gather_timestamps_using_systemd():
status = SUCCESS_CODE
# lxc based containers do not set their monotonic zero point to be when
# the container starts, instead keep using host boot as zero point
- # time.CLOCK_MONOTONIC_RAW is only available in python 3.3
if util.is_container():
# clock.monotonic also uses host boot as zero point
- if sys.version_info >= (3, 3):
- base_time = float(time.time()) - float(time.monotonic())
- # TODO: lxcfs automatically truncates /proc/uptime to seconds
- # in containers when https://github.com/lxc/lxcfs/issues/292
- # is fixed, util.uptime() should be used instead of stat on
- try:
- file_stat = os.stat("/proc/1/cmdline")
- kernel_start = file_stat.st_atime
- except OSError as err:
- raise RuntimeError(
- "Could not determine container boot "
- "time from /proc/1/cmdline. ({})".format(err)
- ) from err
- status = CONTAINER_CODE
- else:
- status = FAIL_CODE
+ base_time = float(time.time()) - float(time.monotonic())
+ # TODO: lxcfs automatically truncates /proc/uptime to seconds
+ # in containers when https://github.com/lxc/lxcfs/issues/292
+ # is fixed, util.uptime() should be used instead of stat on
+ try:
+ file_stat = os.stat("/proc/1/cmdline")
+ kernel_start = file_stat.st_atime
+ except OSError as err:
+ raise RuntimeError(
+ "Could not determine container boot "
+ "time from /proc/1/cmdline. ({})".format(err)
+ ) from err
+ status = CONTAINER_CODE
kernel_end = base_time + delta_k_end
cloudinit_sysd = base_time + delta_ci_s
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 92068aa9..aa3a6c5c 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -3,6 +3,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Cloud-init apport interface"""
+from cloudinit.cmd.devel import read_cfg_paths
try:
from apport.hookutils import (
@@ -53,7 +54,11 @@ KNOWN_CLOUD_NAMES = [
# Potentially clear text collected logs
CLOUDINIT_LOG = "/var/log/cloud-init.log"
CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log"
-USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
+
+
+def _get_user_data_file() -> str:
+ paths = read_cfg_paths()
+ return paths.get_ipath_cur("userdata_raw")
def attach_cloud_init_logs(report, ui=None):
@@ -106,18 +111,19 @@ def attach_cloud_info(report, ui=None):
def attach_user_data(report, ui=None):
"""Optionally provide user-data if desired."""
if ui:
+ user_data_file = _get_user_data_file()
prompt = (
"Your user-data or cloud-config file can optionally be provided"
" from {0} and could be useful to developers when addressing this"
" bug. Do you wish to attach user-data to this bug?".format(
- USER_DATA_FILE
+ user_data_file
)
)
response = ui.yesno(prompt)
if response is None:
raise StopIteration # User cancelled
if response:
- attach_file(report, USER_DATA_FILE, "user_data.txt")
+ attach_file(report, user_data_file, "user_data.txt")
def add_bug_tags(report):
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index 29439911..bc8f3ef3 100755
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -202,12 +202,12 @@ def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction):
return
handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0]
LOG.debug("Creating %s event handler", subsystem)
- event_handler = handler_cls(
+ event_handler: UeventHandler = handler_cls(
datasource=datasource,
devpath=devpath,
action=udevaction,
success_fn=hotplug_init._write_to_cache,
- ) # type: UeventHandler
+ )
wait_times = [1, 3, 5, 10, 30]
last_exception = Exception("Bug while processing hotplug event.")
for attempt, wait in enumerate(wait_times):
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index fbe8c500..a87b7043 100755
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -12,6 +12,7 @@ import shutil
import sys
from datetime import datetime
+from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.subp import ProcessExecutionError, subp
from cloudinit.temp_utils import tempdir
@@ -19,7 +20,11 @@ from cloudinit.util import chdir, copy, ensure_dir, write_file
CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"]
CLOUDINIT_RUN_DIR = "/run/cloud-init"
-USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional
+
+
+def _get_user_data_file() -> str:
+ paths = read_cfg_paths()
+ return paths.get_ipath_cur("userdata_raw")
def get_parser(parser=None):
@@ -53,6 +58,7 @@ def get_parser(parser=None):
" Default: cloud-init.tar.gz"
),
)
+ user_data_file = _get_user_data_file()
parser.add_argument(
"--include-userdata",
"-u",
@@ -61,7 +67,7 @@ def get_parser(parser=None):
dest="userdata",
help=(
"Optionally include user-data from {0} which could contain"
- " sensitive information.".format(USER_DATA_FILE)
+ " sensitive information.".format(user_data_file)
),
)
return parser
@@ -104,7 +110,7 @@ def _collect_file(path, out_dir, verbosity):
_debug("file %s did not exist\n" % path, 2, verbosity)
-def collect_logs(tarfile, include_userdata, verbosity=0):
+def collect_logs(tarfile, include_userdata: bool, verbosity=0):
"""Collect all cloud-init logs and tar them up into the provided tarfile.
@param tarfile: The path of the tar-gzipped file to create.
@@ -152,7 +158,8 @@ def collect_logs(tarfile, include_userdata, verbosity=0):
for log in CLOUDINIT_LOGS:
_collect_file(log, log_dir, verbosity)
if include_userdata:
- _collect_file(USER_DATA_FILE, log_dir, verbosity)
+ user_data_file = _get_user_data_file()
+ _collect_file(user_data_file, log_dir, verbosity)
run_dir = os.path.join(log_dir, "run")
ensure_dir(run_dir)
if os.path.exists(CLOUDINIT_RUN_DIR):
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index e3f58e90..3e6cdd95 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -7,6 +7,8 @@ import json
import os
import sys
+import yaml
+
from cloudinit import distros, log, safeyaml
from cloudinit.net import (
eni,
@@ -124,7 +126,9 @@ def handle_args(name, args):
json.loads(net_data), known_macs=known_macs
)
elif args.kind == "azure-imds":
- pre_ns = azure.parse_network_config(json.loads(net_data))
+ pre_ns = azure.generate_network_config_from_instance_network_metadata(
+ json.loads(net_data)["network"]
+ )
elif args.kind == "vmware-imc":
config = ovf.Config(ovf.ConfigFile(args.network_data.name))
pre_ns = ovf.get_network_config_from_conf(config, False)
@@ -132,9 +136,7 @@ def handle_args(name, args):
ns = network_state.parse_net_config_data(pre_ns)
if args.debug:
- sys.stderr.write(
- "\n".join(["", "Internal State", safeyaml.dumps(ns), ""])
- )
+ sys.stderr.write("\n".join(["", "Internal State", yaml.dump(ns), ""]))
distro_cls = distros.fetch(args.distro)
distro = distro_cls(args.distro, {}, None)
config = {}
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index fcdaf725..4f157870 100755
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -47,6 +47,7 @@ from cloudinit import atomic_helper
from cloudinit.config import cc_set_hostname
from cloudinit import dhclient_hook
+from cloudinit.cmd.devel import read_cfg_paths
# Welcome message template
@@ -454,7 +455,9 @@ def main_init(name, args):
# Validate user-data adheres to schema definition
if os.path.exists(init.paths.get_ipath_cur("userdata_raw")):
- validate_cloudconfig_schema(config=init.cfg, strict=False)
+ validate_cloudconfig_schema(
+ config=init.cfg, strict=False, log_details=False
+ )
else:
LOG.debug("Skipping user-data validation. No user-data found.")
@@ -661,7 +664,8 @@ def main_single(name, args):
def status_wrapper(name, args, data_d=None, link_d=None):
if data_d is None:
- data_d = os.path.normpath("/var/lib/cloud/data")
+ paths = read_cfg_paths()
+ data_d = paths.get_cpath("data")
if link_d is None:
link_d = os.path.normpath("/run/cloud-init")
@@ -790,7 +794,7 @@ def _maybe_set_hostname(init, stage, retry_stage):
@param retry_stage: String represented logs upon error setting hostname.
"""
cloud = init.cloudify()
- (hostname, _fqdn) = util.get_hostname_fqdn(
+ (hostname, _fqdn, _) = util.get_hostname_fqdn(
init.cfg, cloud, metadata_only=True
)
if hostname: # meta-data or user-data hostname content
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index b9347200..2dcd8e44 100755
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -150,7 +150,6 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict:
:raise: IOError/OSError on absence of instance-data.json file or invalid
access perms.
"""
- paths = None
uid = os.getuid()
if not all([instance_data, user_data, vendor_data]):
paths = read_cfg_paths()
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 7ca50194..5403499e 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -756,7 +756,7 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
raise ValueError("unknown mirror type")
# if we have a fqdn, then search its domain portion first
- (_, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ fqdn = util.get_hostname_fqdn(cfg, cloud).fqdn
mydom = ".".join(fqdn.split(".")[1:])
if mydom:
doms.append(".%s" % mydom)
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index fdb3a6e3..11060f3b 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -71,19 +71,20 @@ CHEF_RB_TPL_PATH_KEYS = frozenset(
"encrypted_data_bag_secret",
]
)
-CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend(
- [
- "server_url",
- "node_name",
- "environment",
- "validation_name",
- "chef_license",
- ]
+CHEF_RB_TPL_KEYS = frozenset(
+ itertools.chain(
+ CHEF_RB_TPL_DEFAULTS.keys(),
+ CHEF_RB_TPL_BOOL_KEYS,
+ CHEF_RB_TPL_PATH_KEYS,
+ [
+ "server_url",
+ "node_name",
+ "environment",
+ "validation_name",
+ "chef_license",
+ ],
+ )
)
-CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
CHEF_RB_PATH = "/etc/chef/client.rb"
CHEF_EXEC_PATH = "/usr/bin/chef-client"
CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"])
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index c51818c3..a00f2823 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -95,7 +95,7 @@ def handle(name, cfg, cloud, log, args):
"Datasource: %s\n" % (type_utils.obj_name(cloud.datasource))
)
to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
- to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
+ to_print.write("Hostname: %s\n" % (cloud.get_hostname(True).hostname))
to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
to_print.write("Locale: %s\n" % (cloud.get_locale()))
to_print.write("Launch IDX: %s\n" % (cloud.launch_index))
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 14a2c0b8..f23a6bb8 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -346,7 +346,7 @@ def is_encrypted(blockdev, partition) -> bool:
def get_underlying_partition(blockdev):
command = ["dmsetup", "deps", "--options=devname", blockdev]
- dep: str = subp.subp(command)[0] # type: ignore
+ dep: str = subp.subp(command)[0] # pyright: ignore
# Returned result should look something like:
# 1 dependencies : (vdb1)
if not dep.startswith("1 depend"):
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
index 211cb015..fbb166f6 100644
--- a/cloudinit/config/cc_keyboard.py
+++ b/cloudinit/config/cc_keyboard.py
@@ -18,14 +18,16 @@ from cloudinit.settings import PER_INSTANCE
DEFAULT_KEYBOARD_MODEL = "pc105"
-distros = distros.Distro.expand_osfamily(["arch", "debian", "redhat", "suse"])
+supported_distros = distros.Distro.expand_osfamily(
+ ["arch", "debian", "redhat", "suse"]
+)
meta: MetaSchema = {
"id": "cc_keyboard",
"name": "Keyboard",
"title": "Set keyboard layout",
"description": "Handle keyboard configuration.",
- "distros": distros,
+ "distros": supported_distros,
"examples": [
dedent(
"""\
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 3bc1d303..ef1c02ca 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -30,6 +30,7 @@ distros = [
"fedora",
"miraclelinux",
"openEuler",
+ "openmandriva",
"opensuse",
"photon",
"rhel",
@@ -92,6 +93,18 @@ DISTRO_CLIENT_CONFIG = {
"confpath": "/etc/chrony/chrony.conf",
},
},
+ "openmandriva": {
+ "chrony": {
+ "service_name": "chronyd",
+ },
+ "ntp": {
+ "confpath": "/etc/ntp.conf",
+ "service_name": "ntpd",
+ },
+ "systemd-timesyncd": {
+ "check_exe": "/lib/systemd/systemd-timesyncd",
+ },
+ },
"opensuse": {
"chrony": {
"service_name": "chronyd",
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 681c3729..0534a83a 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -129,8 +129,8 @@ def handle(name, cfg, cloud, log, args):
post_list = ph_cfg.get("post", "all")
tries = ph_cfg.get("tries")
try:
- tries = int(tries) # type: ignore
- except ValueError:
+ tries = int(tries) # pyright: ignore
+ except (ValueError, TypeError):
tries = 10
util.logexc(
log,
@@ -141,10 +141,11 @@ def handle(name, cfg, cloud, log, args):
if post_list == "all":
post_list = POST_LIST_ALL
- all_keys = {}
- all_keys["instance_id"] = cloud.get_instance_id()
- all_keys["hostname"] = cloud.get_hostname()
- all_keys["fqdn"] = cloud.get_hostname(fqdn=True)
+ all_keys = {
+ "instance_id": cloud.get_instance_id(),
+ "hostname": cloud.get_hostname().hostname,
+ "fqdn": cloud.get_hostname(fqdn=True).hostname,
+ }
pubkeys = {
"pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub",
@@ -190,7 +191,7 @@ def handle(name, cfg, cloud, log, args):
url_helper.read_file_or_url(
url,
data=real_submit_keys,
- retries=tries,
+ retries=tries - 1,
sec_between=3,
ssl_details=util.fetch_ssl_details(cloud.paths),
)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index c0b073b5..2e964dcf 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -257,7 +257,6 @@ def handle(name, cfg, cloud, log, _args):
# (TODO(harlowja) is this really needed??)
cleaned_lines = [i.lstrip() for i in contents.splitlines()]
cleaned_contents = "\n".join(cleaned_lines)
- # Move to puppet_config.read_file when dropping py2.7
puppet_config.read_file(
StringIO(cleaned_contents), source=p_constants.conf_path
)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 57b8aa62..5b55028c 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -67,7 +67,7 @@ __doc__ = get_meta_doc(meta)
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
DEF_RELOAD = "auto"
-DEF_REMOTES = {}
+DEF_REMOTES: dict = {}
KEYNAME_CONFIGS = "configs"
KEYNAME_FILENAME = "config_filename"
@@ -113,7 +113,7 @@ def load_config(cfg: dict) -> dict:
if KEYNAME_LEGACY_DIR in cfg:
mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR]
- fillup = (
+ fillup: tuple = (
(KEYNAME_CONFIGS, [], list),
(KEYNAME_DIR, DEF_DIR, str),
(KEYNAME_FILENAME, DEF_FILENAME, str),
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 7c614f57..b883e107 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -24,11 +24,14 @@ from cloudinit.settings import PER_INSTANCE
MODULE_DESCRIPTION = """\
-Run arbitrary commands at a rc.local like level with output to the
-console. Each item can be either a list or a string. If the item is a
-list, it will be properly quoted. Each item is written to
-``/var/lib/cloud/instance/runcmd`` to be later interpreted using
-``sh``.
+Run arbitrary commands at a rc.local like time-frame with output to the
+console. Each item can be either a list or a string. The item type affects
+how it is executed:
+
+
+* If the item is a string, it will be interpreted by ``sh``.
+* If the item is a list, the items will be executed as if passed to execve(3)
+ (with the first arg as the command).
Note that the ``runcmd`` module only writes the script to be run
later. The module that actually runs the script is ``scripts-user``
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index a5b989d0..4cf6e448 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -91,7 +91,7 @@ def handle(name, cfg, cloud, log, _args):
if hostname_fqdn is not None:
cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, fqdn, is_default) = util.get_hostname_fqdn(cfg, cloud)
# Check for previous successful invocation of set-hostname
# set-hostname artifact file accounts for both hostname and fqdn
@@ -109,6 +109,10 @@ def handle(name, cfg, cloud, log, _args):
if not hostname_changed:
log.debug("No hostname changes. Skipping set-hostname")
return
+ if is_default and hostname == "localhost":
+ # https://github.com/systemd/systemd/commit/d39079fcaa05e23540d2b1f0270fa31c22a7e9f1
+ log.debug("Hostname is localhost. Let other services handle this.")
+ return
log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
try:
cloud.distro.set_hostname(hostname, fqdn)
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 41a6adf9..2e595934 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -4,6 +4,7 @@
"""Snap: Install, configure and manage snapd and snap packages."""
+import os
import sys
from textwrap import dedent
@@ -110,10 +111,9 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
SNAP_CMD = "snap"
-ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
-def add_assertions(assertions):
+def add_assertions(assertions, assertions_file):
"""Import list of assertions.
Import assertions by concatenating each assertion into a
@@ -133,14 +133,14 @@ def add_assertions(assertions):
)
)
- snap_cmd = [SNAP_CMD, "ack"]
+ snap_cmd = [SNAP_CMD, "ack", assertions_file]
combined = "\n".join(assertions)
for asrt in assertions:
LOG.debug("Snap acking: %s", asrt.split("\n")[0:2])
- util.write_file(ASSERTIONS_FILE, combined.encode("utf-8"))
- subp.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
+ util.write_file(assertions_file, combined.encode("utf-8"))
+ subp.subp(snap_cmd, capture=True)
def run_commands(commands):
@@ -190,7 +190,10 @@ def handle(name, cfg, cloud, log, args):
)
return
- add_assertions(cfgin.get("assertions", []))
+ add_assertions(
+ cfgin.get("assertions", []),
+ os.path.join(cloud.paths.get_ipath_cur(), "snapd.assertions"),
+ )
run_commands(cfgin.get("commands", []))
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index 6820a816..a61ea209 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -99,7 +99,7 @@ def handle(name, cfg, cloud, log, _args):
if not is_registered():
do_register(
spacewalk_server,
- cloud.datasource.get_hostname(fqdn=True),
+ cloud.datasource.get_hostname(fqdn=True).hostname,
proxy=cfg.get("proxy"),
log=log,
activation_key=cfg.get("activation_key"),
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 33c1fd0c..9f71f273 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -213,7 +213,7 @@ def handle(_name, cfg, cloud: Cloud, log: Logger, _args):
reason = "unsupported"
else:
reason = "unrecognized"
- log.warning("Skipping %s ssh_keys" ' entry: "%s"', reason, key)
+ log.warning('Skipping %s ssh_keys entry: "%s"', reason, key)
continue
tgt_fn = CONFIG_KEY_TO_FILE[key][0]
tgt_perms = CONFIG_KEY_TO_FILE[key][1]
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 6a15895d..86cf7254 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -57,7 +57,7 @@ def handle(_name, cfg, cloud, log, args):
)
return
elif not subp.which(SSH_IMPORT_ID_BINARY):
- log.warn(
+ log.warning(
"ssh-import-id is not installed, but module ssh_import_id is "
"configured. Skipping module."
)
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 57763c31..900db695 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -23,8 +23,8 @@ meta: MetaSchema = {
enable or disable support services such as Livepatch, ESM,
FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage,
one can also specify services to enable. When the 'enable'
- list is present, any named service will be enabled and all absent
- services will remain disabled.
+ list is present, any named service will supplement the contract-default
+ enabled services.
Note that when enabling FIPS or FIPS updates you will need to schedule
a reboot to ensure the machine is running the FIPS-compliant kernel.
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index 15f621a7..a962bce3 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -5,6 +5,14 @@
import os
from textwrap import dedent
+try:
+ import debconf
+
+ HAS_DEBCONF = True
+except ImportError:
+ debconf = None
+ HAS_DEBCONF = False
+
from cloudinit import log as logging
from cloudinit import subp, temp_utils, type_utils, util
from cloudinit.config.schema import MetaSchema, get_meta_doc
@@ -48,10 +56,6 @@ OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = (
# 'linux-restricted-modules' deb to accept the NVIDIA EULA and the package
# will automatically link the drivers to the running kernel.
-# EOL_XENIAL: can then drop this script and use python3-debconf which is only
-# available in Bionic and later. Can't use python3-debconf currently as it
-# isn't in Xenial and doesn't yet support X_LOADTEMPLATEFILE debconf command.
-
NVIDIA_DEBCONF_CONTENT = """\
Template: linux/nvidia/latelink
Type: boolean
@@ -61,13 +65,8 @@ Description: Late-link NVIDIA kernel modules?
make them available for use.
"""
-NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT = """\
-#!/bin/sh
-# Allow cloud-init to trigger EULA acceptance via registering a debconf
-# template to set linux/nvidia/latelink true
-. /usr/share/debconf/confmodule
-db_x_loadtemplatefile "$1" cloud-init
-"""
+
+X_LOADTEMPLATEFILE = "X_LOADTEMPLATEFILE"
def install_drivers(cfg, pkg_install_func):
@@ -108,15 +107,10 @@ def install_drivers(cfg, pkg_install_func):
# Register and set debconf selection linux/nvidia/latelink = true
tdir = temp_utils.mkdtemp(needs_exe=True)
debconf_file = os.path.join(tdir, "nvidia.template")
- debconf_script = os.path.join(tdir, "nvidia-debconf.sh")
try:
util.write_file(debconf_file, NVIDIA_DEBCONF_CONTENT)
- util.write_file(
- debconf_script,
- util.encode_text(NVIDIA_DRIVER_LATELINK_DEBCONF_SCRIPT),
- mode=0o755,
- )
- subp.subp([debconf_script, debconf_file])
+ with debconf.DebconfCommunicator("cloud-init") as dc:
+ dc.command(X_LOADTEMPLATEFILE, debconf_file)
except Exception as e:
util.logexc(
LOG, "Failed to register NVIDIA debconf template: %s", str(e)
@@ -143,5 +137,11 @@ def handle(name, cfg, cloud, log, _args):
if "drivers" not in cfg:
log.debug("Skipping module named %s, no 'drivers' key in config", name)
return
+ if not HAS_DEBCONF:
+ log.warning(
+ "Skipping module named %s, 'python3-debconf' is not installed",
+ name,
+ )
+ return
install_drivers(cfg["drivers"], cloud.distro.install_packages)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 5334f453..e0d15167 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -104,7 +104,7 @@ def handle(name, cfg, cloud, log, _args):
"DEPRECATED: please use manage_etc_hosts: true instead of"
" 'template'"
)
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, fqdn, _) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
log.warning(
"Option 'manage_etc_hosts' was set, but no hostname was found"
@@ -126,7 +126,7 @@ def handle(name, cfg, cloud, log, _args):
)
elif manage_hosts == "localhost":
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, fqdn, _) = util.get_hostname_fqdn(cfg, cloud)
if not hostname:
log.warning(
"Option 'manage_etc_hosts' was set, but no hostname was found"
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 1042abf3..aa769405 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -94,7 +94,12 @@ def handle(name, cfg, cloud, log, _args):
if hostname_fqdn is not None:
cloud.distro.set_option("prefer_fqdn_over_hostname", hostname_fqdn)
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, fqdn, is_default) = util.get_hostname_fqdn(cfg, cloud)
+ if is_default and hostname == "localhost":
+ # https://github.com/systemd/systemd/commit/d39079fcaa05e23540d2b1f0270fa31c22a7e9f1
+ log.debug("Hostname is localhost. Let other services handle this.")
+ return
+
try:
prev_fn = os.path.join(cloud.get_cpath("data"), "previous-hostname")
log.debug("Updating hostname to %s (%s)", fqdn, hostname)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index f7357192..405207ad 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -29,6 +29,7 @@ distros = [
"eurolinux",
"fedora",
"openEuler",
+ "openmandriva",
"photon",
"rhel",
"rocky",
@@ -99,8 +100,8 @@ meta: MetaSchema = {
# the repository file created. See: man yum.conf for supported
# config keys.
#
- # Write /etc/yum.conf.d/my_package_stream.repo with gpgkey checks
- # on the repo data of the repositoy enabled.
+ # Write /etc/yum.conf.d/my-package-stream.repo with gpgkey checks
+ # on the repo data of the repository enabled.
yum_repos:
my package stream:
baseurl: http://blah.org/pub/epel/testing/5/$basearch/
@@ -117,10 +118,17 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def _canonicalize_id(repo_id):
- repo_id = repo_id.lower().replace("-", "_")
- repo_id = repo_id.replace(" ", "_")
- return repo_id
+def _canonicalize_id(repo_id: str) -> str:
+ """Canonicalize repo id.
+
+ The sole name convention for repo ids is to not contain namespaces,
+ and typically the separator used is `-`. More info:
+ https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-setting_repository_options
+
+ :param repo_id: Repo id to convert.
+ :return: Canonical repo id.
+ """
+ return repo_id.replace(" ", "-")
def _format_repo_value(val):
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 7a6ecf08..1e29ae5a 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -7,16 +7,18 @@ import logging
import os
import re
import sys
+import textwrap
import typing
from collections import defaultdict
from copy import deepcopy
from functools import partial
+from typing import Optional, Tuple, cast
import yaml
from cloudinit import importer, safeyaml
from cloudinit.cmd.devel import read_cfg_paths
-from cloudinit.util import error, find_modules, load_file
+from cloudinit.util import error, get_modules_from_dir, load_file
error = partial(error, sys_exit=True)
LOG = logging.getLogger(__name__)
@@ -196,6 +198,7 @@ def validate_cloudconfig_schema(
schema: dict = None,
strict: bool = False,
strict_metaschema: bool = False,
+ log_details: bool = True,
):
"""Validate provided config meets the schema definition.
@@ -208,6 +211,9 @@ def validate_cloudconfig_schema(
logging warnings.
@param strict_metaschema: Boolean, when True validates schema using strict
metaschema definition at runtime (currently unused)
+ @param log_details: Boolean, when True logs details of validation errors.
+ If there are concerns about logging sensitive userdata, this should
+ be set to False.
@raises: SchemaValidationError when provided config does not validate
against the provided schema.
@@ -226,18 +232,23 @@ def validate_cloudconfig_schema(
return
validator = cloudinitValidator(schema, format_checker=FormatChecker())
- errors = ()
+ errors: Tuple[Tuple[str, str], ...] = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
path = ".".join([str(p) for p in error.path])
errors += ((path, error.message),)
if errors:
if strict:
+ # This could output/log sensitive data
raise SchemaValidationError(errors)
- else:
+ if log_details:
messages = ["{0}: {1}".format(k, msg) for k, msg in errors]
- LOG.warning(
- "Invalid cloud-config provided:\n%s", "\n".join(messages)
+ details = "\n" + "\n".join(messages)
+ else:
+ details = (
+ "Please run 'sudo cloud-init schema --system' to "
+ "see the schema errors."
)
+ LOG.warning("Invalid cloud-config provided: %s", details)
def annotated_cloudconfig_file(
@@ -413,7 +424,7 @@ def _get_property_type(property_dict: dict, defs: dict) -> str:
property_types.extend(
[
subschema["type"]
- for subschema in property_dict.get("oneOf")
+ for subschema in property_dict.get("oneOf", {})
if subschema.get("type")
]
)
@@ -562,9 +573,7 @@ def _get_examples(meta: MetaSchema) -> str:
return ""
rst_content = SCHEMA_EXAMPLES_HEADER
for count, example in enumerate(examples):
- # Python2.6 is missing textwrapper.indent
- lines = example.split("\n")
- indented_lines = [" {0}".format(line) for line in lines]
+ indented_lines = textwrap.indent(example, " ").split("\n")
if rst_content != SCHEMA_EXAMPLES_HEADER:
indented_lines.insert(
0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1)
@@ -573,7 +582,7 @@ def _get_examples(meta: MetaSchema) -> str:
return rst_content
-def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
+def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str:
"""Return reStructured text rendering the provided metadata.
@param meta: Dict of metadata to render.
@@ -616,7 +625,8 @@ def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
meta_copy["property_header"] = ""
defs = schema.get("$defs", {})
if defs.get(meta["id"]):
- schema = defs.get(meta["id"])
+ schema = defs.get(meta["id"], {})
+ schema = cast(dict, schema)
try:
meta_copy["property_doc"] = _get_property_doc(schema, defs=defs)
except AttributeError:
@@ -634,7 +644,7 @@ def get_meta_doc(meta: MetaSchema, schema: dict = None) -> str:
def get_modules() -> dict:
configs_dir = os.path.dirname(os.path.abspath(__file__))
- return find_modules(configs_dir)
+ return get_modules_from_dir(configs_dir)
def load_doc(requested_modules: list) -> str:
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
index d409d5d6..7bbfcb1a 100644
--- a/cloudinit/config/schemas/schema-cloud-config-v1.json
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -34,8 +34,10 @@
"type": "string"
},
"groups": {
- "description": "Optional comma-separated string of groups to add the user to.",
- "type": "string"
+ "description": "Optional comma-separated string or list of groups to add the user to.",
+ "type": ["string", "array"],
+ "items": {"type": "string"},
+ "minItems": 1
},
"homedir": {
"description": "Optional home dir for user. Default: ``/home/<username>``",
@@ -2089,7 +2091,7 @@
},
"content": {
"type": "string",
- "default": "",
+ "default": "''",
"description": "Optional content to write to the provided ``path``. When content is present and encoding is not 'text/plain', decode the content prior to writing. Default: ``''``"
},
"owner": {
@@ -2099,7 +2101,7 @@
},
"permissions": {
"type": "string",
- "default": "0o644",
+ "default": "'0o644'",
"description": "Optional file permissions to set on ``path`` represented as an octal string '0###'. Default: ``0o644``"
},
"encoding": {
@@ -2214,6 +2216,110 @@
"additionalProperties": false
}
}
+ },
+ "reporting_config": {
+ "type": "object",
+ "properties": {
+ "reporting": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^.+$": {
+ "label": "<arbitrary_name>",
+ "type": "object",
+ "oneOf": [
+ {
+ "additionalProperties": false,
+ "required": ["type"],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["log"]
+ },
+ "level": {
+ "type": "string",
+ "enum": ["DEBUG", "INFO", "WARN", "ERROR", "FATAL"],
+ "default": "DEBUG"
+ }
+ }
+ },
+ {
+ "additionalProperties": false,
+ "required": ["type"],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["print"]
+ }
+ }
+ },
+ {
+ "additionalProperties": false,
+ "required": ["type", "endpoint"],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["webhook"]
+ },
+ "endpoint": {
+ "type": "string",
+ "format": "uri",
+ "description": "The URL to send the event to."
+ },
+ "consumer_key": {
+ "type": "string",
+ "description": "The consumer key to use for the webhook."
+ },
+ "token_key": {
+ "type": "string",
+ "description": "The token key to use for the webhook."
+ },
+ "token_secret": {
+ "type": "string",
+ "description": "The token secret to use for the webhook."
+ },
+ "consumer_secret": {
+ "type": "string",
+ "description": "The consumer secret to use for the webhook."
+ },
+ "timeout": {
+ "type": "number",
+ "minimum": 0,
+ "description": "The timeout in seconds to wait for a response from the webhook."
+ },
+ "retries": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "The number of times to retry sending the webhook."
+ }
+ }
+ },
+ {
+ "additionalProperties": false,
+ "required": ["type"],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": ["hyperv"]
+ },
+ "kvp_file_path": {
+ "type": "string",
+ "description": "The path to the KVP file to use for the hyperv reporter.",
+ "default": "/var/lib/hyperv/.kvp_pool_1"
+ },
+ "event_types": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
}
},
"allOf": [
@@ -2268,6 +2374,7 @@
{ "$ref": "#/$defs/cc_users_groups"},
{ "$ref": "#/$defs/cc_write_files"},
{ "$ref": "#/$defs/cc_yum_add_repo"},
- { "$ref": "#/$defs/cc_zypper_add_repo"}
+ { "$ref": "#/$defs/cc_zypper_add_repo"},
+ { "$ref": "#/$defs/reporting_config"}
]
}
diff --git a/cloudinit/config/schemas/versions.schema.cloud-config.json b/cloudinit/config/schemas/versions.schema.cloud-config.json
index 4ff3b4d1..c606085c 100644
--- a/cloudinit/config/schemas/versions.schema.cloud-config.json
+++ b/cloudinit/config/schemas/versions.schema.cloud-config.json
@@ -11,7 +11,7 @@
}
}
},
- {"$ref": "./schema-cloud-config-v1.json"}
+ {"$ref": "https://raw.githubusercontent.com/canonical/cloud-init/main/cloudinit/config/schemas/schema-cloud-config-v1.json"}
]
}
]
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index b034e2c8..3d771c2a 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -47,6 +47,7 @@ OSFAMILIES = {
"fedora",
"miraclelinux",
"openEuler",
+ "openmandriva",
"photon",
"rhel",
"rocky",
@@ -1063,7 +1064,7 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
return default
-def fetch(name) -> Type[Distro]:
+def fetch(name: str) -> Type[Distro]:
locs, looked_locs = importer.find_module(name, ["", __name__], ["Distro"])
if not locs:
raise ImportError(
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index bab222b5..fca9f9fa 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -1,4 +1,5 @@
import platform
+from typing import List, Optional
from cloudinit import distros, helpers
from cloudinit import log as logging
@@ -20,12 +21,12 @@ class BSD(distros.Distro):
shutdown_options_map = {"halt": "-H", "poweroff": "-p", "reboot": "-r"}
# Set in BSD distro subclasses
- group_add_cmd_prefix = []
- pkg_cmd_install_prefix = []
- pkg_cmd_remove_prefix = []
+ group_add_cmd_prefix: List[str] = []
+ pkg_cmd_install_prefix: List[str] = []
+ pkg_cmd_remove_prefix: List[str] = []
# There is no update/upgrade on OpenBSD
- pkg_cmd_update_prefix = None
- pkg_cmd_upgrade_prefix = None
+ pkg_cmd_update_prefix: Optional[List[str]] = None
+ pkg_cmd_upgrade_prefix: Optional[List[str]] = None
def __init__(self, name, cfg, paths):
super().__init__(name, cfg, paths)
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index c0d6390f..b3232feb 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -89,15 +89,6 @@ class NetBSD(cloudinit.distros.bsd.BSD):
def set_passwd(self, user, passwd, hashed=False):
if hashed:
hashed_pw = passwd
- elif not hasattr(crypt, "METHOD_BLOWFISH"):
- # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available
- # on NetBSD 7 and 8.
- LOG.error(
- "Cannot set non-encrypted password for user %s. "
- "Python >= 3.7 is required.",
- user,
- )
- return
else:
method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
hashed_pw = crypt.crypt(passwd, crypt.mksalt(method))
diff --git a/cloudinit/distros/openmandriva.py b/cloudinit/distros/openmandriva.py
new file mode 100644
index 00000000..b4ba8439
--- /dev/null
+++ b/cloudinit/distros/openmandriva.py
@@ -0,0 +1,14 @@
+# Copyright (C) 2021 LinDev
+#
+# Author: Bernhard Rosenkraenzer <bero@lindev.ch>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import fedora
+
+
+class Distro(fedora.Distro):
+ pass
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 0ef4e147..c2bed1bf 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -36,6 +36,13 @@ class ResolvConf(object):
return dm[0]
return None
+ @local_domain.setter
+ def local_domain(self, domain):
+ self.parse()
+ self._remove_option("domain")
+ self._contents.append(("option", ["domain", str(domain), ""]))
+ return domain
+
@property
def search_domains(self):
self.parse()
@@ -133,13 +140,6 @@ class ResolvConf(object):
self._contents.append(("option", ["search", s_list, ""]))
return flat_sds
- @local_domain.setter
- def local_domain(self, domain):
- self.parse()
- self._remove_option("domain")
- self._contents.append(("option", ["domain", str(domain), ""]))
- return domain
-
def _parse(self, contents):
entries = []
for (i, line) in enumerate(contents.splitlines()):
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index ec6470a9..4e75b6ec 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -11,7 +11,6 @@
import copy
-from cloudinit import util
from cloudinit.distros import PREFERRED_NTP_CLIENTS, debian
@@ -39,14 +38,7 @@ class Distro(debian.Distro):
def preferred_ntp_clients(self):
"""The preferred ntp client is dependent on the version."""
if not self._preferred_ntp_clients:
- (_name, _version, codename) = util.system_info()["dist"]
- # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
- if codename == "xenial" and not util.system_is_snappy():
- self._preferred_ntp_clients = ["ntp"]
- else:
- self._preferred_ntp_clients = copy.deepcopy(
- PREFERRED_NTP_CLIENTS
- )
+ self._preferred_ntp_clients = copy.deepcopy(PREFERRED_NTP_CLIENTS)
return self._preferred_ntp_clients
diff --git a/cloudinit/dmi.py b/cloudinit/dmi.py
index 3a999d41..dff9ab0f 100644
--- a/cloudinit/dmi.py
+++ b/cloudinit/dmi.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
from collections import namedtuple
+from typing import Optional
from cloudinit import log as logging
from cloudinit import subp
@@ -11,8 +12,8 @@ LOG = logging.getLogger(__name__)
# Path for DMI Data
DMI_SYS_PATH = "/sys/class/dmi/id"
-kdmi = namedtuple("KernelNames", ["linux", "freebsd"])
-kdmi.__new__.defaults__ = (None, None)
+KernelNames = namedtuple("KernelNames", ["linux", "freebsd"])
+KernelNames.__new__.__defaults__ = (None, None)
# FreeBSD's kenv(1) and Linux /sys/class/dmi/id/* both use different names from
# dmidecode. The values are the same, and ultimately what we're interested in.
@@ -20,27 +21,45 @@ kdmi.__new__.defaults__ = (None, None)
# This is our canonical translation table. If we add more tools on other
# platforms to find dmidecode's values, their keys need to be put in here.
DMIDECODE_TO_KERNEL = {
- "baseboard-asset-tag": kdmi("board_asset_tag", "smbios.planar.tag"),
- "baseboard-manufacturer": kdmi("board_vendor", "smbios.planar.maker"),
- "baseboard-product-name": kdmi("board_name", "smbios.planar.product"),
- "baseboard-serial-number": kdmi("board_serial", "smbios.planar.serial"),
- "baseboard-version": kdmi("board_version", "smbios.planar.version"),
- "bios-release-date": kdmi("bios_date", "smbios.bios.reldate"),
- "bios-vendor": kdmi("bios_vendor", "smbios.bios.vendor"),
- "bios-version": kdmi("bios_version", "smbios.bios.version"),
- "chassis-asset-tag": kdmi("chassis_asset_tag", "smbios.chassis.tag"),
- "chassis-manufacturer": kdmi("chassis_vendor", "smbios.chassis.maker"),
- "chassis-serial-number": kdmi("chassis_serial", "smbios.chassis.serial"),
- "chassis-version": kdmi("chassis_version", "smbios.chassis.version"),
- "system-manufacturer": kdmi("sys_vendor", "smbios.system.maker"),
- "system-product-name": kdmi("product_name", "smbios.system.product"),
- "system-serial-number": kdmi("product_serial", "smbios.system.serial"),
- "system-uuid": kdmi("product_uuid", "smbios.system.uuid"),
- "system-version": kdmi("product_version", "smbios.system.version"),
+ "baseboard-asset-tag": KernelNames("board_asset_tag", "smbios.planar.tag"),
+ "baseboard-manufacturer": KernelNames(
+ "board_vendor", "smbios.planar.maker"
+ ),
+ "baseboard-product-name": KernelNames(
+ "board_name", "smbios.planar.product"
+ ),
+ "baseboard-serial-number": KernelNames(
+ "board_serial", "smbios.planar.serial"
+ ),
+ "baseboard-version": KernelNames("board_version", "smbios.planar.version"),
+ "bios-release-date": KernelNames("bios_date", "smbios.bios.reldate"),
+ "bios-vendor": KernelNames("bios_vendor", "smbios.bios.vendor"),
+ "bios-version": KernelNames("bios_version", "smbios.bios.version"),
+ "chassis-asset-tag": KernelNames(
+ "chassis_asset_tag", "smbios.chassis.tag"
+ ),
+ "chassis-manufacturer": KernelNames(
+ "chassis_vendor", "smbios.chassis.maker"
+ ),
+ "chassis-serial-number": KernelNames(
+ "chassis_serial", "smbios.chassis.serial"
+ ),
+ "chassis-version": KernelNames(
+ "chassis_version", "smbios.chassis.version"
+ ),
+ "system-manufacturer": KernelNames("sys_vendor", "smbios.system.maker"),
+ "system-product-name": KernelNames(
+ "product_name", "smbios.system.product"
+ ),
+ "system-serial-number": KernelNames(
+ "product_serial", "smbios.system.serial"
+ ),
+ "system-uuid": KernelNames("product_uuid", "smbios.system.uuid"),
+ "system-version": KernelNames("product_version", "smbios.system.version"),
}
-def _read_dmi_syspath(key):
+def _read_dmi_syspath(key: str) -> Optional[str]:
"""
Reads dmi data from /sys/class/dmi/id
"""
@@ -78,7 +97,7 @@ def _read_dmi_syspath(key):
return None
-def _read_kenv(key):
+def _read_kenv(key: str) -> Optional[str]:
"""
Reads dmi data from FreeBSD's kenv(1)
"""
@@ -96,12 +115,11 @@ def _read_kenv(key):
return result
except subp.ProcessExecutionError as e:
LOG.debug("failed kenv cmd: %s\n%s", cmd, e)
- return None
return None
-def _call_dmidecode(key, dmidecode_path):
+def _call_dmidecode(key: str, dmidecode_path: str) -> Optional[str]:
"""
Calls out to dmidecode to get the data out. This is mostly for supporting
OS's without /sys/class/dmi/id support.
@@ -119,7 +137,7 @@ def _call_dmidecode(key, dmidecode_path):
return None
-def read_dmi_data(key):
+def read_dmi_data(key: str) -> Optional[str]:
"""
Wrapper for reading DMI data.
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index 1f9caa64..b8196cb1 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -4,8 +4,16 @@ import copy
import os
import re
from errno import EACCES
-from typing import Optional
+from typing import Optional, Type
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit.settings import PER_ALWAYS
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
+from cloudinit.templater import MISSING_JINJA_PREFIX, render_string
+from cloudinit.util import b64d, json_dumps, load_file, load_json
+
+JUndefinedError: Type[Exception]
try:
from jinja2.exceptions import UndefinedError as JUndefinedError
from jinja2.lexer import operator_re
@@ -14,13 +22,6 @@ except ImportError:
JUndefinedError = Exception
operator_re = re.compile(r"[-.]")
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
-from cloudinit.templater import MISSING_JINJA_PREFIX, render_string
-from cloudinit.util import b64d, json_dumps, load_file, load_json
-
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index d0db4b5b..406d4582 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -330,7 +330,7 @@ class ContentHandlers(object):
class Paths(persistence.CloudInitPickleMixin):
_ci_pkl_version = 1
- def __init__(self, path_cfgs, ds=None):
+ def __init__(self, path_cfgs: dict, ds=None):
self.cfgs = path_cfgs
# Populate all the initial paths
self.cloud_dir = path_cfgs.get("cloud_dir", "/var/lib/cloud")
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 3297a318..f5545fc1 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -250,7 +250,7 @@ def has_netfail_standby_feature(devname):
return features[62] == "1"
-def is_netfail_master(devname, driver=None):
+def is_netfail_master(devname, driver=None) -> bool:
"""A device is a "netfail master" device if:
- The device does NOT have the 'master' sysfs attribute
@@ -992,7 +992,7 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
"""Build a dictionary of tuples {mac: name}.
Bridges and any devices that have a 'stolen' mac are excluded."""
- ret = {}
+ ret: dict = {}
for name, mac, _driver, _devid in get_interfaces(
blacklist_drivers=blacklist_drivers
):
@@ -1139,8 +1139,9 @@ def has_url_connectivity(url_data: Dict[str, Any]) -> bool:
return True
-def network_validator(check_cb: Callable, address: str, **kwargs) -> bool:
- """Use a function to determine whether address meets criteria.
+def maybe_get_address(convert_to_address: Callable, address: str, **kwargs):
+ """Use a function to return an address. If conversion throws a ValueError
+ exception return False.
:param check_cb:
Test function, must return a truthy value
@@ -1148,11 +1149,11 @@ def network_validator(check_cb: Callable, address: str, **kwargs) -> bool:
The string to test.
:return:
- A bool indicating if the string passed the test.
+ Address or False
"""
try:
- return bool(check_cb(address, **kwargs))
+ return convert_to_address(address, **kwargs)
except ValueError:
return False
@@ -1166,7 +1167,7 @@ def is_ip_address(address: str) -> bool:
:return:
A bool indicating if the string is an IP address or not.
"""
- return network_validator(ipaddress.ip_address, address)
+ return bool(maybe_get_address(ipaddress.ip_address, address))
def is_ipv4_address(address: str) -> bool:
@@ -1178,7 +1179,7 @@ def is_ipv4_address(address: str) -> bool:
:return:
A bool indicating if the string is an IPv4 address or not.
"""
- return network_validator(ipaddress.IPv4Address, address)
+ return bool(maybe_get_address(ipaddress.IPv4Address, address))
def is_ipv6_address(address: str) -> bool:
@@ -1190,7 +1191,7 @@ def is_ipv6_address(address: str) -> bool:
:return:
A bool indicating if the string is an IPv4 address or not.
"""
- return network_validator(ipaddress.IPv6Address, address)
+ return bool(maybe_get_address(ipaddress.IPv6Address, address))
def is_ip_network(address: str) -> bool:
@@ -1202,7 +1203,7 @@ def is_ip_network(address: str) -> bool:
:return:
A bool indicating if the string is an IPv4 address or not.
"""
- return network_validator(ipaddress.ip_network, address, strict=False)
+ return bool(maybe_get_address(ipaddress.ip_network, address, strict=False))
def is_ipv4_network(address: str) -> bool:
@@ -1214,7 +1215,9 @@ def is_ipv4_network(address: str) -> bool:
:return:
A bool indicating if the string is an IPv4 address or not.
"""
- return network_validator(ipaddress.IPv4Network, address, strict=False)
+ return bool(
+ maybe_get_address(ipaddress.IPv4Network, address, strict=False)
+ )
def is_ipv6_network(address: str) -> bool:
@@ -1226,7 +1229,9 @@ def is_ipv6_network(address: str) -> bool:
:return:
A bool indicating if the string is an IPv4 address or not.
"""
- return network_validator(ipaddress.IPv6Network, address, strict=False)
+ return bool(
+ maybe_get_address(ipaddress.IPv6Network, address, strict=False)
+ )
def subnet_is_ipv6(subnet) -> bool:
@@ -1304,265 +1309,5 @@ def mask_and_ipv4_to_bcast_addr(mask: str, ip: str) -> str:
)
-class EphemeralIPv4Network(object):
- """Context manager which sets up temporary static network configuration.
-
- No operations are performed if the provided interface already has the
- specified configuration.
- This can be verified with the connectivity_url_data.
- If unconnected, bring up the interface with valid ip, prefix and broadcast.
- If router is provided setup a default route for that interface. Upon
- context exit, clean up the interface leaving no configuration behind.
- """
-
- def __init__(
- self,
- interface,
- ip,
- prefix_or_mask,
- broadcast,
- router=None,
- connectivity_url_data: Dict[str, Any] = None,
- static_routes=None,
- ):
- """Setup context manager and validate call signature.
-
- @param interface: Name of the network interface to bring up.
- @param ip: IP address to assign to the interface.
- @param prefix_or_mask: Either netmask of the format X.X.X.X or an int
- prefix.
- @param broadcast: Broadcast address for the IPv4 network.
- @param router: Optionally the default gateway IP.
- @param connectivity_url_data: Optionally, a URL to verify if a usable
- connection already exists.
- @param static_routes: Optionally a list of static routes from DHCP
- """
- if not all([interface, ip, prefix_or_mask, broadcast]):
- raise ValueError(
- "Cannot init network on {0} with {1}/{2} and bcast {3}".format(
- interface, ip, prefix_or_mask, broadcast
- )
- )
- try:
- self.prefix = ipv4_mask_to_net_prefix(prefix_or_mask)
- except ValueError as e:
- raise ValueError(
- "Cannot setup network, invalid prefix or "
- "netmask: {0}".format(e)
- ) from e
-
- self.connectivity_url_data = connectivity_url_data
- self.interface = interface
- self.ip = ip
- self.broadcast = broadcast
- self.router = router
- self.static_routes = static_routes
- self.cleanup_cmds = [] # List of commands to run to cleanup state.
-
- def __enter__(self):
- """Perform ephemeral network setup if interface is not connected."""
- if self.connectivity_url_data:
- if has_url_connectivity(self.connectivity_url_data):
- LOG.debug(
- "Skip ephemeral network setup, instance has connectivity"
- " to %s",
- self.connectivity_url_data["url"],
- )
- return
-
- self._bringup_device()
-
- # rfc3442 requires us to ignore the router config *if* classless static
- # routes are provided.
- #
- # https://tools.ietf.org/html/rfc3442
- #
- # If the DHCP server returns both a Classless Static Routes option and
- # a Router option, the DHCP client MUST ignore the Router option.
- #
- # Similarly, if the DHCP server returns both a Classless Static Routes
- # option and a Static Routes option, the DHCP client MUST ignore the
- # Static Routes option.
- if self.static_routes:
- self._bringup_static_routes()
- elif self.router:
- self._bringup_router()
-
- def __exit__(self, excp_type, excp_value, excp_traceback):
- """Teardown anything we set up."""
- for cmd in self.cleanup_cmds:
- subp.subp(cmd, capture=True)
-
- def _delete_address(self, address, prefix):
- """Perform the ip command to remove the specified address."""
- subp.subp(
- [
- "ip",
- "-family",
- "inet",
- "addr",
- "del",
- "%s/%s" % (address, prefix),
- "dev",
- self.interface,
- ],
- capture=True,
- )
-
- def _bringup_device(self):
- """Perform the ip comands to fully setup the device."""
- cidr = "{0}/{1}".format(self.ip, self.prefix)
- LOG.debug(
- "Attempting setup of ephemeral network on %s with %s brd %s",
- self.interface,
- cidr,
- self.broadcast,
- )
- try:
- subp.subp(
- [
- "ip",
- "-family",
- "inet",
- "addr",
- "add",
- cidr,
- "broadcast",
- self.broadcast,
- "dev",
- self.interface,
- ],
- capture=True,
- update_env={"LANG": "C"},
- )
- except subp.ProcessExecutionError as e:
- if "File exists" not in e.stderr:
- raise
- LOG.debug(
- "Skip ephemeral network setup, %s already has address %s",
- self.interface,
- self.ip,
- )
- else:
- # Address creation success, bring up device and queue cleanup
- subp.subp(
- [
- "ip",
- "-family",
- "inet",
- "link",
- "set",
- "dev",
- self.interface,
- "up",
- ],
- capture=True,
- )
- self.cleanup_cmds.append(
- [
- "ip",
- "-family",
- "inet",
- "link",
- "set",
- "dev",
- self.interface,
- "down",
- ]
- )
- self.cleanup_cmds.append(
- [
- "ip",
- "-family",
- "inet",
- "addr",
- "del",
- cidr,
- "dev",
- self.interface,
- ]
- )
-
- def _bringup_static_routes(self):
- # static_routes = [("169.254.169.254/32", "130.56.248.255"),
- # ("0.0.0.0/0", "130.56.240.1")]
- for net_address, gateway in self.static_routes:
- via_arg = []
- if gateway != "0.0.0.0":
- via_arg = ["via", gateway]
- subp.subp(
- ["ip", "-4", "route", "append", net_address]
- + via_arg
- + ["dev", self.interface],
- capture=True,
- )
- self.cleanup_cmds.insert(
- 0,
- ["ip", "-4", "route", "del", net_address]
- + via_arg
- + ["dev", self.interface],
- )
-
- def _bringup_router(self):
- """Perform the ip commands to fully setup the router if needed."""
- # Check if a default route exists and exit if it does
- out, _ = subp.subp(["ip", "route", "show", "0.0.0.0/0"], capture=True)
- if "default" in out:
- LOG.debug(
- "Skip ephemeral route setup. %s already has default route: %s",
- self.interface,
- out.strip(),
- )
- return
- subp.subp(
- [
- "ip",
- "-4",
- "route",
- "add",
- self.router,
- "dev",
- self.interface,
- "src",
- self.ip,
- ],
- capture=True,
- )
- self.cleanup_cmds.insert(
- 0,
- [
- "ip",
- "-4",
- "route",
- "del",
- self.router,
- "dev",
- self.interface,
- "src",
- self.ip,
- ],
- )
- subp.subp(
- [
- "ip",
- "-4",
- "route",
- "add",
- "default",
- "via",
- self.router,
- "dev",
- self.interface,
- ],
- capture=True,
- )
- self.cleanup_cmds.insert(
- 0, ["ip", "-4", "route", "del", "default", "dev", self.interface]
- )
-
-
class RendererNotFoundError(RuntimeError):
pass
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 53f8c686..fd1d4256 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -10,18 +10,11 @@ import re
import signal
import time
from io import StringIO
-from typing import Any, Dict
import configobj
from cloudinit import subp, temp_utils, util
-from cloudinit.net import (
- EphemeralIPv4Network,
- find_fallback_nic,
- get_devicelist,
- has_url_connectivity,
- mask_and_ipv4_to_bcast_addr,
-)
+from cloudinit.net import find_fallback_nic, get_devicelist
LOG = logging.getLogger(__name__)
@@ -48,111 +41,6 @@ class NoDHCPLeaseMissingDhclientError(NoDHCPLeaseError):
"""Raised when unable to find dhclient."""
-class EphemeralDHCPv4(object):
- def __init__(
- self,
- iface=None,
- connectivity_url_data: Dict[str, Any] = None,
- dhcp_log_func=None,
- ):
- self.iface = iface
- self._ephipv4 = None
- self.lease = None
- self.dhcp_log_func = dhcp_log_func
- self.connectivity_url_data = connectivity_url_data
-
- def __enter__(self):
- """Setup sandboxed dhcp context, unless connectivity_url can already be
- reached."""
- if self.connectivity_url_data:
- if has_url_connectivity(self.connectivity_url_data):
- LOG.debug(
- "Skip ephemeral DHCP setup, instance has connectivity"
- " to %s",
- self.connectivity_url_data,
- )
- return
- return self.obtain_lease()
-
- def __exit__(self, excp_type, excp_value, excp_traceback):
- """Teardown sandboxed dhcp context."""
- self.clean_network()
-
- def clean_network(self):
- """Exit _ephipv4 context to teardown of ip configuration performed."""
- if self.lease:
- self.lease = None
- if not self._ephipv4:
- return
- self._ephipv4.__exit__(None, None, None)
-
- def obtain_lease(self):
- """Perform dhcp discovery in a sandboxed environment if possible.
-
- @return: A dict representing dhcp options on the most recent lease
- obtained from the dhclient discovery if run, otherwise an error
- is raised.
-
- @raises: NoDHCPLeaseError if no leases could be obtained.
- """
- if self.lease:
- return self.lease
- leases = maybe_perform_dhcp_discovery(self.iface, self.dhcp_log_func)
- if not leases:
- raise NoDHCPLeaseError()
- self.lease = leases[-1]
- LOG.debug(
- "Received dhcp lease on %s for %s/%s",
- self.lease["interface"],
- self.lease["fixed-address"],
- self.lease["subnet-mask"],
- )
- nmap = {
- "interface": "interface",
- "ip": "fixed-address",
- "prefix_or_mask": "subnet-mask",
- "broadcast": "broadcast-address",
- "static_routes": [
- "rfc3442-classless-static-routes",
- "classless-static-routes",
- ],
- "router": "routers",
- }
- kwargs = self.extract_dhcp_options_mapping(nmap)
- if not kwargs["broadcast"]:
- kwargs["broadcast"] = mask_and_ipv4_to_bcast_addr(
- kwargs["prefix_or_mask"], kwargs["ip"]
- )
- if kwargs["static_routes"]:
- kwargs["static_routes"] = parse_static_routes(
- kwargs["static_routes"]
- )
- if self.connectivity_url_data:
- kwargs["connectivity_url_data"] = self.connectivity_url_data
- ephipv4 = EphemeralIPv4Network(**kwargs)
- ephipv4.__enter__()
- self._ephipv4 = ephipv4
- return self.lease
-
- def extract_dhcp_options_mapping(self, nmap):
- result = {}
- for internal_reference, lease_option_names in nmap.items():
- if isinstance(lease_option_names, list):
- self.get_first_option_value(
- internal_reference, lease_option_names, result
- )
- else:
- result[internal_reference] = self.lease.get(lease_option_names)
- return result
-
- def get_first_option_value(
- self, internal_mapping, lease_option_names, result
- ):
- for different_names in lease_option_names:
- if not result.get(internal_mapping):
- result[internal_mapping] = self.lease.get(different_names)
-
-
def maybe_perform_dhcp_discovery(nic=None, dhcp_log_func=None):
"""Perform dhcp discovery if nic valid and dhclient command exists.
diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py
new file mode 100644
index 00000000..c0d83d29
--- /dev/null
+++ b/cloudinit/net/ephemeral.py
@@ -0,0 +1,445 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Module for ephemeral network context managers
+"""
+import contextlib
+import logging
+from typing import Any, Dict, List
+
+import cloudinit.net as net
+from cloudinit import subp
+from cloudinit.net.dhcp import (
+ NoDHCPLeaseError,
+ maybe_perform_dhcp_discovery,
+ parse_static_routes,
+)
+
+LOG = logging.getLogger(__name__)
+
+
+class EphemeralIPv4Network(object):
+ """Context manager which sets up temporary static network configuration.
+
+ No operations are performed if the provided interface already has the
+ specified configuration.
+ This can be verified with the connectivity_url_data.
+ If unconnected, bring up the interface with valid ip, prefix and broadcast.
+ If router is provided setup a default route for that interface. Upon
+ context exit, clean up the interface leaving no configuration behind.
+ """
+
+ def __init__(
+ self,
+ interface,
+ ip,
+ prefix_or_mask,
+ broadcast,
+ router=None,
+ connectivity_url_data: Dict[str, Any] = None,
+ static_routes=None,
+ ):
+ """Setup context manager and validate call signature.
+
+ @param interface: Name of the network interface to bring up.
+ @param ip: IP address to assign to the interface.
+ @param prefix_or_mask: Either netmask of the format X.X.X.X or an int
+ prefix.
+ @param broadcast: Broadcast address for the IPv4 network.
+ @param router: Optionally the default gateway IP.
+ @param connectivity_url_data: Optionally, a URL to verify if a usable
+ connection already exists.
+ @param static_routes: Optionally a list of static routes from DHCP
+ """
+ if not all([interface, ip, prefix_or_mask, broadcast]):
+ raise ValueError(
+ "Cannot init network on {0} with {1}/{2} and bcast {3}".format(
+ interface, ip, prefix_or_mask, broadcast
+ )
+ )
+ try:
+ self.prefix = net.ipv4_mask_to_net_prefix(prefix_or_mask)
+ except ValueError as e:
+ raise ValueError(
+ "Cannot setup network, invalid prefix or "
+ "netmask: {0}".format(e)
+ ) from e
+
+ self.connectivity_url_data = connectivity_url_data
+ self.interface = interface
+ self.ip = ip
+ self.broadcast = broadcast
+ self.router = router
+ self.static_routes = static_routes
+ # List of commands to run to cleanup state.
+ self.cleanup_cmds: List[str] = []
+
+ def __enter__(self):
+ """Perform ephemeral network setup if interface is not connected."""
+ if self.connectivity_url_data:
+ if net.has_url_connectivity(self.connectivity_url_data):
+ LOG.debug(
+ "Skip ephemeral network setup, instance has connectivity"
+ " to %s",
+ self.connectivity_url_data["url"],
+ )
+ return
+
+ self._bringup_device()
+
+ # rfc3442 requires us to ignore the router config *if* classless static
+ # routes are provided.
+ #
+ # https://tools.ietf.org/html/rfc3442
+ #
+ # If the DHCP server returns both a Classless Static Routes option and
+ # a Router option, the DHCP client MUST ignore the Router option.
+ #
+ # Similarly, if the DHCP server returns both a Classless Static Routes
+ # option and a Static Routes option, the DHCP client MUST ignore the
+ # Static Routes option.
+ if self.static_routes:
+ self._bringup_static_routes()
+ elif self.router:
+ self._bringup_router()
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ """Teardown anything we set up."""
+ for cmd in self.cleanup_cmds:
+ subp.subp(cmd, capture=True)
+
+ def _delete_address(self, address, prefix):
+ """Perform the ip command to remove the specified address."""
+ subp.subp(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ "%s/%s" % (address, prefix),
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ )
+
+ def _bringup_device(self):
+ """Perform the ip comands to fully setup the device."""
+ cidr = "{0}/{1}".format(self.ip, self.prefix)
+ LOG.debug(
+ "Attempting setup of ephemeral network on %s with %s brd %s",
+ self.interface,
+ cidr,
+ self.broadcast,
+ )
+ try:
+ subp.subp(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "add",
+ cidr,
+ "broadcast",
+ self.broadcast,
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ update_env={"LANG": "C"},
+ )
+ except subp.ProcessExecutionError as e:
+ if "File exists" not in str(e.stderr):
+ raise
+ LOG.debug(
+ "Skip ephemeral network setup, %s already has address %s",
+ self.interface,
+ self.ip,
+ )
+ else:
+ # Address creation success, bring up device and queue cleanup
+ subp.subp(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ self.interface,
+ "up",
+ ],
+ capture=True,
+ )
+ self.cleanup_cmds.append(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "link",
+ "set",
+ "dev",
+ self.interface,
+ "down",
+ ]
+ )
+ self.cleanup_cmds.append(
+ [
+ "ip",
+ "-family",
+ "inet",
+ "addr",
+ "del",
+ cidr,
+ "dev",
+ self.interface,
+ ]
+ )
+
+ def _bringup_static_routes(self):
+ # static_routes = [("169.254.169.254/32", "130.56.248.255"),
+ # ("0.0.0.0/0", "130.56.240.1")]
+ for net_address, gateway in self.static_routes:
+ via_arg = []
+ if gateway != "0.0.0.0":
+ via_arg = ["via", gateway]
+ subp.subp(
+ ["ip", "-4", "route", "append", net_address]
+ + via_arg
+ + ["dev", self.interface],
+ capture=True,
+ )
+ self.cleanup_cmds.insert(
+ 0,
+ ["ip", "-4", "route", "del", net_address]
+ + via_arg
+ + ["dev", self.interface],
+ )
+
+ def _bringup_router(self):
+ """Perform the ip commands to fully setup the router if needed."""
+ # Check if a default route exists and exit if it does
+ out, _ = subp.subp(["ip", "route", "show", "0.0.0.0/0"], capture=True)
+ if "default" in out:
+ LOG.debug(
+ "Skip ephemeral route setup. %s already has default route: %s",
+ self.interface,
+ out.strip(),
+ )
+ return
+ subp.subp(
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ self.router,
+ "dev",
+ self.interface,
+ "src",
+ self.ip,
+ ],
+ capture=True,
+ )
+ self.cleanup_cmds.insert(
+ 0,
+ [
+ "ip",
+ "-4",
+ "route",
+ "del",
+ self.router,
+ "dev",
+ self.interface,
+ "src",
+ self.ip,
+ ],
+ )
+ subp.subp(
+ [
+ "ip",
+ "-4",
+ "route",
+ "add",
+ "default",
+ "via",
+ self.router,
+ "dev",
+ self.interface,
+ ],
+ capture=True,
+ )
+ self.cleanup_cmds.insert(
+ 0, ["ip", "-4", "route", "del", "default", "dev", self.interface]
+ )
+
+
+class EphemeralIPv6Network:
+ """Context manager which sets up a ipv6 link local address
+
+ The linux kernel assigns link local addresses on link-up, which is
+ sufficient for link-local communication.
+ """
+
+ def __init__(self, interface):
+ """Setup context manager and validate call signature.
+
+ @param interface: Name of the network interface to bring up.
+ @param ip: IP address to assign to the interface.
+ @param prefix: IPv6 uses prefixes, not netmasks
+ """
+ if not interface:
+ raise ValueError("Cannot init network on {0}".format(interface))
+
+ self.interface = interface
+
+ def __enter__(self):
+ """linux kernel does autoconfiguration even when autoconf=0
+
+ https://www.kernel.org/doc/html/latest/networking/ipv6.html
+ """
+ if net.read_sys_net(self.interface, "operstate") != "up":
+ subp.subp(
+ ["ip", "link", "set", "dev", self.interface, "up"],
+ capture=False,
+ )
+
+ def __exit__(self, *_args):
+ """No need to set the link to down state"""
+
+
+class EphemeralDHCPv4(object):
+ def __init__(
+ self,
+ iface=None,
+ connectivity_url_data: Dict[str, Any] = None,
+ dhcp_log_func=None,
+ ):
+ self.iface = iface
+ self._ephipv4 = None
+ self.lease = None
+ self.dhcp_log_func = dhcp_log_func
+ self.connectivity_url_data = connectivity_url_data
+
+ def __enter__(self):
+ """Setup sandboxed dhcp context, unless connectivity_url can already be
+ reached."""
+ if self.connectivity_url_data:
+ if net.has_url_connectivity(self.connectivity_url_data):
+ LOG.debug(
+ "Skip ephemeral DHCP setup, instance has connectivity"
+ " to %s",
+ self.connectivity_url_data,
+ )
+ return
+ return self.obtain_lease()
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ """Teardown sandboxed dhcp context."""
+ self.clean_network()
+
+ def clean_network(self):
+ """Exit _ephipv4 context to teardown of ip configuration performed."""
+ if self.lease:
+ self.lease = None
+ if not self._ephipv4:
+ return
+ self._ephipv4.__exit__(None, None, None)
+
+ def obtain_lease(self):
+ """Perform dhcp discovery in a sandboxed environment if possible.
+
+ @return: A dict representing dhcp options on the most recent lease
+ obtained from the dhclient discovery if run, otherwise an error
+ is raised.
+
+ @raises: NoDHCPLeaseError if no leases could be obtained.
+ """
+ if self.lease:
+ return self.lease
+ leases = maybe_perform_dhcp_discovery(self.iface, self.dhcp_log_func)
+ if not leases:
+ raise NoDHCPLeaseError()
+ self.lease = leases[-1]
+ LOG.debug(
+ "Received dhcp lease on %s for %s/%s",
+ self.lease["interface"],
+ self.lease["fixed-address"],
+ self.lease["subnet-mask"],
+ )
+ nmap = {
+ "interface": "interface",
+ "ip": "fixed-address",
+ "prefix_or_mask": "subnet-mask",
+ "broadcast": "broadcast-address",
+ "static_routes": [
+ "rfc3442-classless-static-routes",
+ "classless-static-routes",
+ ],
+ "router": "routers",
+ }
+ kwargs = self.extract_dhcp_options_mapping(nmap)
+ if not kwargs["broadcast"]:
+ kwargs["broadcast"] = net.mask_and_ipv4_to_bcast_addr(
+ kwargs["prefix_or_mask"], kwargs["ip"]
+ )
+ if kwargs["static_routes"]:
+ kwargs["static_routes"] = parse_static_routes(
+ kwargs["static_routes"]
+ )
+ if self.connectivity_url_data:
+ kwargs["connectivity_url_data"] = self.connectivity_url_data
+ ephipv4 = EphemeralIPv4Network(**kwargs)
+ ephipv4.__enter__()
+ self._ephipv4 = ephipv4
+ return self.lease
+
+ def extract_dhcp_options_mapping(self, nmap):
+ result = {}
+ for internal_reference, lease_option_names in nmap.items():
+ if isinstance(lease_option_names, list):
+ self.get_first_option_value(
+ internal_reference, lease_option_names, result
+ )
+ else:
+ result[internal_reference] = self.lease.get(lease_option_names)
+ return result
+
+ def get_first_option_value(
+ self, internal_mapping, lease_option_names, result
+ ):
+ for different_names in lease_option_names:
+ if not result.get(internal_mapping):
+ result[internal_mapping] = self.lease.get(different_names)
+
+
+class EphemeralIPNetwork:
+ """Marries together IPv4 and IPv6 ephemeral context managers"""
+
+ def __init__(self, interface, ipv6: bool = False, ipv4: bool = True):
+ self.interface = interface
+ self.ipv4 = ipv4
+ self.ipv6 = ipv6
+ self.stack = contextlib.ExitStack()
+ self.state_msg: str = ""
+
+ def __enter__(self):
+ # ipv6 dualstack might succeed when dhcp4 fails
+ # therefore catch exception unless only v4 is used
+ try:
+ if self.ipv4:
+ self.stack.enter_context(EphemeralIPv6Network(self.interface))
+ if self.ipv6:
+ self.stack.enter_context(EphemeralDHCPv4(self.interface))
+ # v6 link local might be usable
+ # caller may want to log network state
+ except NoDHCPLeaseError as e:
+ if self.ipv6:
+ self.state_msg = "using link-local ipv6"
+ else:
+ raise e
+ return self
+
+ def __exit__(self, *_args):
+ self.stack.close()
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 2af0ee9b..66ad598f 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -2,6 +2,8 @@
import copy
import os
+import textwrap
+from typing import cast
from cloudinit import log as logging
from cloudinit import safeyaml, subp, util
@@ -293,7 +295,7 @@ class Renderer(renderer.Renderer):
)
ethernets = {}
- wifis = {}
+ wifis: dict = {}
bridges = {}
bonds = {}
vlans = {}
@@ -335,8 +337,11 @@ class Renderer(renderer.Renderer):
bond = {}
bond_config = {}
# extract bond params and drop the bond_ prefix as it's
- # redundent in v2 yaml format
- v2_bond_map = NET_CONFIG_TO_V2.get("bond")
+ # redundant in v2 yaml format
+ v2_bond_map = cast(dict, NET_CONFIG_TO_V2.get("bond"))
+ # Previous cast is needed to help mypy to know that the key is
+ # present in `NET_CONFIG_TO_V2`. This could probably be removed
+ # by using `Literal` when supported.
for match in ["bond_", "bond-"]:
bond_params = _get_params_dict_by_match(ifcfg, match)
for (param, value) in bond_params.items():
@@ -348,7 +353,7 @@ class Renderer(renderer.Renderer):
if len(bond_config) > 0:
bond.update({"parameters": bond_config})
if ifcfg.get("mac_address"):
- bond["macaddress"] = ifcfg.get("mac_address").lower()
+ bond["macaddress"] = ifcfg["mac_address"].lower()
slave_interfaces = ifcfg.get("bond-slaves")
if slave_interfaces == "none":
_extract_bond_slaves_by_name(interfaces, bond, ifname)
@@ -357,19 +362,24 @@ class Renderer(renderer.Renderer):
elif if_type == "bridge":
# required_keys = ['name', 'bridge_ports']
- ports = sorted(copy.copy(ifcfg.get("bridge_ports")))
- bridge = {
+ bridge_ports = ifcfg.get("bridge_ports")
+ # mypy wrong error. `copy(None)` is supported:
+ ports = sorted(copy.copy(bridge_ports)) # type: ignore
+ bridge: dict = {
"interfaces": ports,
}
# extract bridge params and drop the bridge prefix as it's
- # redundent in v2 yaml format
+ # redundant in v2 yaml format
match_prefix = "bridge_"
params = _get_params_dict_by_match(ifcfg, match_prefix)
br_config = {}
# v2 yaml uses different names for the keys
# and at least one value format change
- v2_bridge_map = NET_CONFIG_TO_V2.get("bridge")
+ v2_bridge_map = cast(dict, NET_CONFIG_TO_V2.get("bridge"))
+ # Previous cast is needed to help mypy to know that the key is
+ # present in `NET_CONFIG_TO_V2`. This could probably be removed
+ # by using `Literal` when supported.
for (param, value) in params.items():
newname = v2_bridge_map.get(param)
if newname is None:
@@ -386,7 +396,7 @@ class Renderer(renderer.Renderer):
if len(br_config) > 0:
bridge.update({"parameters": br_config})
if ifcfg.get("mac_address"):
- bridge["macaddress"] = ifcfg.get("mac_address").lower()
+ bridge["macaddress"] = ifcfg["mac_address"].lower()
_extract_addresses(ifcfg, bridge, ifname, self.features)
bridges.update({ifname: bridge})
@@ -421,7 +431,7 @@ class Renderer(renderer.Renderer):
explicit_end=False,
noalias=True,
)
- txt = util.indent(dump, " " * 4)
+ txt = textwrap.indent(dump, " " * 4)
return [txt]
return []
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 3c7ee5a3..2c64e492 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -7,6 +7,7 @@
import copy
import functools
import logging
+from typing import Any, Dict
from cloudinit import safeyaml, util
from cloudinit.net import (
@@ -44,7 +45,7 @@ NETWORK_V2_KEY_FILTER = [
"accept-ra",
]
-NET_CONFIG_TO_V2 = {
+NET_CONFIG_TO_V2: Dict[str, Dict[str, Any]] = {
"bond": {
"bond-ad-select": "ad-select",
"bond-arp-interval": "arp-interval",
@@ -56,7 +57,7 @@ NET_CONFIG_TO_V2 = {
"bond-miimon": "mii-monitor-interval",
"bond-min-links": "min-links",
"bond-mode": "mode",
- "bond-num-grat-arp": "gratuitious-arp",
+ "bond-num-grat-arp": "gratuitous-arp",
"bond-primary": "primary",
"bond-primary-reselect": "primary-reselect-policy",
"bond-updelay": "up-delay",
@@ -795,13 +796,12 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
for (key, value) in item_cfg.items()
if key not in NETWORK_V2_KEY_FILTER
)
- # we accept the fixed spelling, but write the old for compatibility
- # Xenial does not have an updated netplan which supports the
- # correct spelling. LP: #1756701
+ # We accept both spellings (as netplan does). LP: #1756701
+ # Normalize internally to the new spelling:
params = item_params.get("parameters", {})
- grat_value = params.pop("gratuitous-arp", None)
+ grat_value = params.pop("gratuitious-arp", None)
if grat_value:
- params["gratuitious-arp"] = grat_value
+ params["gratuitous-arp"] = grat_value
v1_cmd = {
"type": cmd_type,
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index 3bbeb284..7d7d82c2 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -7,7 +7,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import os
from collections import OrderedDict
from cloudinit import log as logging
@@ -219,15 +218,15 @@ class Renderer(renderer.Renderer):
util.chownbyname(net_fn, net_fn_owner, net_fn_owner)
def render_network_state(self, network_state, templates=None, target=None):
- fp_nwkd = self.network_conf_dir
+ network_dir = self.network_conf_dir
if target:
- fp_nwkd = subp.target_path(target) + fp_nwkd
+ network_dir = subp.target_path(target) + network_dir
- util.ensure_dir(os.path.dirname(fp_nwkd))
+ util.ensure_dir(network_dir)
ret_dict = self._render_content(network_state)
for k, v in ret_dict.items():
- self.create_network_file(k, v, fp_nwkd)
+ self.create_network_file(k, v, network_dir)
def _render_content(self, ns):
ret_dict = {}
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 37c5d260..698724ab 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -28,6 +28,7 @@ KNOWN_DISTROS = [
"fedora",
"miraclelinux",
"openEuler",
+ "openmandriva",
"rhel",
"rocky",
"suse",
@@ -361,7 +362,7 @@ class Renderer(renderer.Renderer):
]
)
- templates = {}
+ templates: dict = {}
def __init__(self, config=None):
if not config:
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 06b5b49f..b839eaae 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -9,8 +9,10 @@ The reporting framework is intended to allow all parts of cloud-init to
report events in a structured manner.
"""
-from ..registry import DictRegistry
-from .handlers import available_handlers
+from typing import Type
+
+from cloudinit.registry import DictRegistry
+from cloudinit.reporting.handlers import HandlerType, available_handlers
DEFAULT_CONFIG = {
"logging": {"type": "log"},
@@ -32,19 +34,19 @@ def update_configuration(config):
)
continue
handler_config = handler_config.copy()
- cls = available_handlers.registered_items[handler_config.pop("type")]
+ cls: Type[HandlerType] = available_handlers.registered_items[
+ handler_config.pop("type")
+ ]
instantiated_handler_registry.unregister_item(handler_name)
- instance = cls(**handler_config)
+ instance = cls(**handler_config) # pyright: ignore
instantiated_handler_registry.register_item(handler_name, instance)
def flush_events():
- for _, handler in instantiated_handler_registry.registered_items.items():
- if hasattr(handler, "flush"):
- handler.flush()
+ handler: HandlerType
+ for handler in instantiated_handler_registry.registered_items.values():
+ handler.flush()
instantiated_handler_registry = DictRegistry()
update_configuration(DEFAULT_CONFIG)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index e53186a3..34c3b875 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -11,6 +11,9 @@ They can be published to registered handlers with report_event.
import base64
import os.path
import time
+from typing import List
+
+from cloudinit.reporting.handlers import ReportingHandler
from . import available_handlers, instantiated_handler_registry
@@ -116,8 +119,10 @@ def report_event(event, excluded_handler_types=None):
if hndl_type in excluded_handler_types
}
- handlers = instantiated_handler_registry.registered_items.items()
- for _, handler in handlers:
+ handlers: List[ReportingHandler] = list(
+ instantiated_handler_registry.registered_items.values()
+ )
+ for handler in handlers:
if type(handler) in excluded_handler_classes:
continue # skip this excluded handler
handler.publish_event(event)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index e163e168..d43b80b0 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -10,6 +10,8 @@ import threading
import time
import uuid
from datetime import datetime
+from threading import Event
+from typing import Union
from cloudinit import log as logging
from cloudinit import url_helper, util
@@ -81,34 +83,79 @@ class WebHookHandler(ReportingHandler):
super(WebHookHandler, self).__init__()
if any([consumer_key, token_key, token_secret, consumer_secret]):
- self.oauth_helper = url_helper.OauthUrlHelper(
+ oauth_helper = url_helper.OauthUrlHelper(
consumer_key=consumer_key,
token_key=token_key,
token_secret=token_secret,
consumer_secret=consumer_secret,
)
+ self.readurl = oauth_helper.readurl
else:
- self.oauth_helper = None
+ self.readurl = url_helper.readurl
self.endpoint = endpoint
self.timeout = timeout
self.retries = retries
self.ssl_details = util.fetch_ssl_details()
+ self.flush_requested = Event()
+ self.queue = queue.Queue()
+ self.event_processor = threading.Thread(target=self.process_requests)
+ self.event_processor.daemon = True
+ self.event_processor.start()
+
+ def process_requests(self):
+ consecutive_failed = 0
+ while True:
+ if self.flush_requested.is_set() and consecutive_failed > 2:
+ # At this point the main thread is waiting for the queue to
+ # drain. If we have a queue of events piled up and recent
+ # events have failed, lets not waste time trying to post
+ # the rest, especially since a long timeout could block
+ # cloud-init for quite a long time.
+ LOG.warning(
+ "Multiple consecutive failures in WebHookHandler. "
+ "Cancelling all queued events."
+ )
+ while not self.queue.empty():
+ self.queue.get_nowait()
+ self.queue.task_done()
+ consecutive_failed = 0
+ args = self.queue.get(block=True)
+ try:
+ self.readurl(
+ args[0],
+ data=args[1],
+ timeout=args[2],
+ retries=args[3],
+ ssl_details=args[4],
+ )
+ consecutive_failed = 0
+ except Exception as e:
+ LOG.warning(
+ "Failed posting event: %s. This was caused by: %s",
+ args[1],
+ e,
+ )
+ consecutive_failed += 1
+ finally:
+ self.queue.task_done()
+
def publish_event(self, event):
- if self.oauth_helper:
- readurl = self.oauth_helper.readurl
- else:
- readurl = url_helper.readurl
- try:
- return readurl(
+ self.queue.put(
+ (
self.endpoint,
- data=json.dumps(event.as_dict()),
- timeout=self.timeout,
- retries=self.retries,
- ssl_details=self.ssl_details,
+ json.dumps(event.as_dict()),
+ self.timeout,
+ self.retries,
+ self.ssl_details,
)
- except Exception:
- LOG.warning("failed posting event: %s", event.as_string())
+ )
+
+ def flush(self):
+ self.flush_requested.set()
+ LOG.debug("WebHookHandler flushing remaining events")
+ self.queue.join()
+ self.flush_requested.clear()
class HyperVKvpReportingHandler(ReportingHandler):
@@ -359,10 +406,18 @@ class HyperVKvpReportingHandler(ReportingHandler):
self.q.join()
+# Type[ReportingHandler] doesn't work here because each class has different
+# call args. Protocols in python 3.8 can probably make this simpler.
+HandlerType = Union[
+ ReportingHandler,
+ LogHandler,
+ PrintHandler,
+ WebHookHandler,
+ HyperVKvpReportingHandler,
+]
+
available_handlers = DictRegistry()
available_handlers.register_item("log", LogHandler)
available_handlers.register_item("print", PrintHandler)
available_handlers.register_item("webhook", WebHookHandler)
available_handlers.register_item("hyperv", HyperVKvpReportingHandler)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
index eeb6f82b..368ac861 100644
--- a/cloudinit/safeyaml.py
+++ b/cloudinit/safeyaml.py
@@ -57,7 +57,7 @@ class _CustomSafeLoaderWithMarks(yaml.SafeLoader):
def __init__(self, stream):
super().__init__(stream)
- self.schemamarks_by_line = {} # type: Dict[int, List[SchemaPathMarks]]
+ self.schemamarks_by_line: Dict[int, List[SchemaPathMarks]] = {}
def _get_nested_path_prefix(self, node):
if node.start_mark.line in self.schemamarks_by_line:
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
deleted file mode 100644
index a6f710ef..00000000
--- a/cloudinit/serial.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-try:
- from serial import Serial
-except ImportError:
- # For older versions of python (ie 2.6) pyserial may not exist and/or
- # work and/or be installed, so make a dummy/fake serial that blows up
- # when used...
- class Serial(object):
- def __init__(self, *args, **kwargs):
- pass
-
- @staticmethod
- def isOpen():
- return False
-
- @staticmethod
- def write(data):
- raise IOError(
- "Unable to perform serial `write` operation,"
- " pyserial not installed."
- )
-
- @staticmethod
- def readline():
- raise IOError(
- "Unable to perform serial `readline` operation,"
- " pyserial not installed."
- )
-
- @staticmethod
- def flush():
- raise IOError(
- "Unable to perform serial `flush` operation,"
- " pyserial not installed."
- )
-
- @staticmethod
- def read(size=1):
- raise IOError(
- "Unable to perform serial `read` operation,"
- " pyserial not installed."
- )
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 37f512e3..6804274e 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -1,7 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from typing import List
+
from cloudinit import dmi, sources
from cloudinit.sources import DataSourceEc2 as EC2
+from cloudinit.sources import DataSourceHostname
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
@@ -13,10 +16,15 @@ class DataSourceAliYun(EC2.DataSourceEc2):
# The minimum supported metadata_version from the ec2 metadata apis
min_metadata_version = "2016-01-01"
- extended_metadata_versions = []
+ extended_metadata_versions: List[str] = []
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata.get("hostname", "localhost.localdomain")
+ hostname = self.metadata.get("hostname")
+ is_default = False
+ if hostname is None:
+ hostname = "localhost.localdomain"
+ is_default = True
+ return DataSourceHostname(hostname, is_default)
def get_public_ssh_keys(self):
return parse_public_keys(self.metadata.get("public-keys", {}))
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index d1bec85c..e7a0407c 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -24,11 +24,11 @@ from cloudinit import net, sources, ssh_util, subp, util
from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
from cloudinit.net.dhcp import (
- EphemeralDHCPv4,
NoDHCPLeaseError,
NoDHCPLeaseInterfaceError,
NoDHCPLeaseMissingDhclientError,
)
+from cloudinit.net.ephemeral import EphemeralDHCPv4
from cloudinit.reporting import events
from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
@@ -60,7 +60,6 @@ RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
DEFAULT_FS = "ext4"
# DMI chassis-asset-tag is set static for all azure instances
AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
-REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
AGENT_SEED_DIR = "/var/lib/waagent"
DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
@@ -100,7 +99,7 @@ class PPSType(Enum):
PLATFORM_ENTROPY_SOURCE: Optional[str] = "/sys/firmware/acpi/tables/OEM0"
# List of static scripts and network config artifacts created by
-# stock ubuntu suported images.
+# stock ubuntu supported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
"/etc/netplan/90-hotplug-azure.yaml",
"/usr/local/sbin/ephemeral_eth.sh",
@@ -208,7 +207,7 @@ def get_hv_netvsc_macs_normalized() -> List[str]:
def execute_or_debug(cmd, fail_ret=None) -> str:
try:
- return subp.subp(cmd).stdout # type: ignore
+ return subp.subp(cmd).stdout # pyright: ignore
except subp.ProcessExecutionError:
LOG.debug("Failed to execute: %s", " ".join(cmd))
return fail_ret
@@ -285,7 +284,6 @@ BUILTIN_DS_CONFIG = {
"disk_aliases": {"ephemeral0": RESOURCE_DISK_PATH},
"apply_network_config": True, # Use IMDS published network configuration
}
-# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
"disk_setup": {
@@ -332,6 +330,9 @@ class DataSourceAzure(sources.DataSource):
self._network_config = None
self._ephemeral_dhcp_ctx = None
self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT
+ self._reported_ready_marker_file = os.path.join(
+ paths.cloud_dir, "data", "reported_ready"
+ )
def _unpickle(self, ci_pkl_version: int) -> None:
super()._unpickle(ci_pkl_version)
@@ -598,9 +599,9 @@ class DataSourceAzure(sources.DataSource):
if metadata_source == "IMDS" and not crawled_data["files"]:
try:
contents = build_minimal_ovf(
- username=imds_username, # type: ignore
- hostname=imds_hostname, # type: ignore
- disableSshPwd=imds_disable_password, # type: ignore
+ username=imds_username, # pyright: ignore
+ hostname=imds_hostname, # pyright: ignore
+ disableSshPwd=imds_disable_password, # pyright: ignore
)
crawled_data["files"] = {"ovf-env.xml": contents}
except Exception as e:
@@ -748,9 +749,6 @@ class DataSourceAzure(sources.DataSource):
)
self.userdata_raw = crawled_data["userdata_raw"]
- user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
-
# walinux agent writes files world readable, but expects
# the directory to be protected.
write_files(
@@ -948,7 +946,7 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
- path = REPORTED_READY_MARKER_FILE
+ path = self._reported_ready_marker_file
LOG.info("Creating a marker file to report ready: %s", path)
util.write_file(
path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
@@ -1153,7 +1151,9 @@ class DataSourceAzure(sources.DataSource):
)
headers = {"Metadata": "true"}
nl_sock = None
- report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ report_ready = bool(
+ not os.path.isfile(self._reported_ready_marker_file)
+ )
self.imds_logging_threshold = 1
self.imds_poll_counter = 1
dhcp_attempts = 0
@@ -1383,7 +1383,7 @@ class DataSourceAzure(sources.DataSource):
def _determine_pps_type(self, ovf_cfg: dict, imds_md: dict) -> PPSType:
"""Determine PPS type using OVF, IMDS data, and reprovision marker."""
- if os.path.isfile(REPORTED_READY_MARKER_FILE):
+ if os.path.isfile(self._reported_ready_marker_file):
pps_type = PPSType.UNKNOWN
elif (
ovf_cfg.get("PreprovisionedVMType", None) == PPSType.SAVABLE.value
@@ -1441,12 +1441,14 @@ class DataSourceAzure(sources.DataSource):
def _cleanup_markers(self):
"""Cleanup any marker files."""
- util.del_file(REPORTED_READY_MARKER_FILE)
+ util.del_file(self._reported_ready_marker_file)
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
+ instance_dir = self.paths.get_ipath_cur()
try:
address_ephemeral_resize(
+ instance_dir,
is_new_instance=is_new_instance,
preserve_ntfs=self.ds_cfg.get(DS_CFG_KEY_PRESERVE_NTFS, False),
)
@@ -1462,22 +1464,42 @@ class DataSourceAzure(sources.DataSource):
.get("platformFaultDomain")
)
+ @azure_ds_telemetry_reporter
+ def _generate_network_config(self):
+ """Generate network configuration according to configuration."""
+ # Use IMDS network metadata, if configured.
+ if (
+ self._metadata_imds
+ and self._metadata_imds != sources.UNSET
+ and self.ds_cfg.get("apply_network_config")
+ ):
+ try:
+ return generate_network_config_from_instance_network_metadata(
+ self._metadata_imds["network"]
+ )
+ except Exception as e:
+ LOG.error(
+ "Failed generating network config "
+ "from IMDS network metadata: %s",
+ str(e),
+ )
+
+ # Generate fallback configuration.
+ try:
+ return _generate_network_config_from_fallback_config()
+ except Exception as e:
+ LOG.error("Failed generating fallback network config: %s", str(e))
+
+ return {}
+
@property
def network_config(self):
- """Generate a network config like net.generate_fallback_network() with
- the following exceptions.
+ """Provide network configuration v2 dictionary."""
+ # Use cached config, if present.
+ if self._network_config and self._network_config != sources.UNSET:
+ return self._network_config
- 1. Probe the drivers of the net-devices present and inject them in
- the network configuration under params: driver: <driver> value
- 2. Generate a fallback network config that does not include any of
- the blacklisted devices.
- """
- if not self._network_config or self._network_config == sources.UNSET:
- if self.ds_cfg.get("apply_network_config"):
- nc_src = self._metadata_imds
- else:
- nc_src = None
- self._network_config = parse_network_config(nc_src)
+ self._network_config = self._generate_network_config()
return self._network_config
@property
@@ -1710,7 +1732,10 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
@azure_ds_telemetry_reporter
def address_ephemeral_resize(
- devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False
+ instance_dir: str,
+ devpath: str = RESOURCE_DISK_PATH,
+ is_new_instance: bool = False,
+ preserve_ntfs: bool = False,
):
if not os.path.exists(devpath):
report_diagnostic_event(
@@ -1736,14 +1761,13 @@ def address_ephemeral_resize(
return
for mod in ["disk_setup", "mounts"]:
- sempath = "/var/lib/cloud/instance/sem/config_" + mod
+ sempath = os.path.join(instance_dir, "sem", "config_" + mod)
bmsg = 'Marker "%s" for module "%s"' % (sempath, mod)
if os.path.exists(sempath):
try:
os.unlink(sempath)
LOG.debug("%s removed.", bmsg)
- except Exception as e:
- # python3 throws FileNotFoundError, python2 throws OSError
+ except FileNotFoundError as e:
LOG.warning("%s: remove failed! (%s)", bmsg, e)
else:
LOG.debug("%s did not exist.", bmsg)
@@ -1883,8 +1907,7 @@ def read_azure_ovf(contents):
if not lpcs.hasChildNodes():
raise BrokenAzureDataSource("no child nodes of configuration set")
- md_props = "seedfrom"
- md: Dict[str, Any] = {"azure_data": {}}
+ md: Dict[str, Any] = {}
cfg = {}
ud = ""
password = None
@@ -1896,45 +1919,25 @@ def read_azure_ovf(contents):
name = child.localName.lower()
- simple = False
value = ""
if (
len(child.childNodes) == 1
and child.childNodes[0].nodeType == dom.TEXT_NODE
):
- simple = True
value = child.childNodes[0].wholeText
- attrs = dict([(k, v) for k, v in child.attributes.items()])
-
- # we accept either UserData or CustomData. If both are present
- # then behavior is undefined.
- if name == "userdata" or name == "customdata":
- if attrs.get("encoding") in (None, "base64"):
- ud = base64.b64decode("".join(value.split()))
- else:
- ud = value
+ if name == "customdata":
+ ud = base64.b64decode("".join(value.split()))
elif name == "username":
username = value
elif name == "userpassword":
password = value
elif name == "hostname":
md["local-hostname"] = value
- elif name == "dscfg":
- if attrs.get("encoding") in (None, "base64"):
- dscfg = base64.b64decode("".join(value.split()))
- else:
- dscfg = value
- cfg["datasource"] = {DS_NAME: util.load_yaml(dscfg, default={})}
elif name == "ssh":
cfg["_pubkeys"] = load_azure_ovf_pubkeys(child)
elif name == "disablesshpasswordauthentication":
cfg["ssh_pwauth"] = util.is_false(value)
- elif simple:
- if name in md_props:
- md[name] = value
- else:
- md["azure_data"][name] = value
defuser = {}
if username:
@@ -2087,15 +2090,14 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
seed = util.load_file(source, quiet=True, decode=False)
# The seed generally contains non-Unicode characters. load_file puts
- # them into a str (in python 2) or bytes (in python 3). In python 2,
- # bad octets in a str cause util.json_dumps() to throw an exception. In
- # python 3, bytes is a non-serializable type, and the handler load_file
+ # them into bytes (in python 3).
+ # bytes is a non-serializable type, and the handler load_file
# uses applies b64 encoding *again* to handle it. The simplest solution
# is to just b64encode the data and then decode it to a serializable
# string. Same number of bits of entropy, just with 25% more zeroes.
# There's no need to undo this base64-encoding when the random seed is
# actually used in cc_seed_random.py.
- return base64.b64encode(seed).decode() # type: ignore
+ return base64.b64encode(seed).decode() # pyright: ignore
@azure_ds_telemetry_reporter
@@ -2128,40 +2130,16 @@ def load_azure_ds_dir(source_dir):
@azure_ds_telemetry_reporter
-def parse_network_config(imds_metadata) -> dict:
- """Convert imds_metadata dictionary to network v2 configuration.
- Parses network configuration from imds metadata if present or generate
- fallback network config excluding mlx4_core devices.
-
- @param: imds_metadata: Dict of content read from IMDS network service.
- @return: Dictionary containing network version 2 standard configuration.
- """
- if imds_metadata != sources.UNSET and imds_metadata:
- try:
- return _generate_network_config_from_imds_metadata(imds_metadata)
- except Exception as e:
- LOG.error(
- "Failed generating network config "
- "from IMDS network metadata: %s",
- str(e),
- )
- try:
- return _generate_network_config_from_fallback_config()
- except Exception as e:
- LOG.error("Failed generating fallback network config: %s", str(e))
- return {}
-
+def generate_network_config_from_instance_network_metadata(
+ network_metadata: dict,
+) -> dict:
+ """Convert imds network metadata dictionary to network v2 configuration.
-@azure_ds_telemetry_reporter
-def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
- """Convert imds_metadata dictionary to network v2 configuration.
- Parses network configuration from imds metadata.
+ :param: network_metadata: Dict of "network" key from instance metdata.
- @param: imds_metadata: Dict of content read from IMDS network service.
- @return: Dictionary containing network version 2 standard configuration.
+ :return: Dictionary containing network version 2 standard configuration.
"""
netconfig: Dict[str, Any] = {"version": 2, "ethernets": {}}
- network_metadata = imds_metadata["network"]
for idx, intf in enumerate(network_metadata["interface"]):
has_ip_address = False
# First IPv4 and/or IPv6 address will be obtained via DHCP.
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 426a762e..8d01b563 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -6,6 +6,7 @@
import errno
import json
+import os
from cloudinit import sources, url_helper, util
@@ -15,13 +16,13 @@ class DataSourceBigstep(sources.DataSource):
dsname = "Bigstep"
def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ super().__init__(sys_cfg, distro, paths)
self.metadata = {}
self.vendordata_raw = ""
self.userdata_raw = ""
- def _get_data(self, apply_filter=False):
- url = get_url_from_file()
+ def _get_data(self, apply_filter=False) -> bool:
+ url = self._get_url_from_file()
if url is None:
return False
response = url_helper.readurl(url)
@@ -31,22 +32,25 @@ class DataSourceBigstep(sources.DataSource):
self.userdata_raw = decoded["userdata_raw"]
return True
- def _get_subplatform(self):
+ def _get_subplatform(self) -> str:
"""Return the subplatform metadata source details."""
- return "metadata (%s)" % get_url_from_file()
-
-
-def get_url_from_file():
- try:
- content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
- except IOError as e:
- # If the file doesn't exist, then the server probably isn't a Bigstep
- # instance; otherwise, another problem exists which needs investigation
- if e.errno == errno.ENOENT:
- return None
- else:
- raise
- return content
+ return f"metadata ({self._get_url_from_file()})"
+
+ def _get_url_from_file(self):
+ url_file = os.path.join(
+ self.paths.cloud_dir, "data", "seed", "bigstep", "url"
+ )
+ try:
+ content = util.load_file(url_file)
+ except IOError as e:
+ # If the file doesn't exist, then the server probably isn't a
+ # Bigstep instance; otherwise, another problem exists which needs
+ # investigation
+ if e.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ return content
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 7d702137..270a3a18 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -10,6 +10,7 @@ from base64 import b64decode
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit.sources import DataSourceHostname
from cloudinit.sources.helpers.cloudsigma import SERIAL_PORT, Cepko
LOG = logging.getLogger(__name__)
@@ -90,9 +91,10 @@ class DataSourceCloudSigma(sources.DataSource):
the first part from uuid is being used.
"""
if re.match(r"^[A-Za-z0-9 -_\.]+$", self.metadata["name"]):
- return self.metadata["name"][:61]
+ ret = self.metadata["name"][:61]
else:
- return self.metadata["uuid"].split("-")[0]
+ ret = self.metadata["uuid"].split("-")[0]
+ return DataSourceHostname(ret, False)
def get_public_ssh_keys(self):
return [self.ssh_public_key]
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 9c525d3d..32659848 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -11,6 +11,7 @@
import copy
import os
import time
+from typing import List
from cloudinit import dmi
from cloudinit import log as logging
@@ -18,7 +19,8 @@ from cloudinit import net, sources
from cloudinit import url_helper as uhelp
from cloudinit import util, warnings
from cloudinit.event import EventScope, EventType
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralIPNetwork
from cloudinit.sources.helpers import ec2
LOG = logging.getLogger(__name__)
@@ -67,7 +69,11 @@ class DataSourceEc2(sources.DataSource):
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02.
# Tags support comes in 2021-03-23.
- extended_metadata_versions = ["2021-03-23", "2018-09-24", "2016-09-02"]
+ extended_metadata_versions: List[str] = [
+ "2021-03-23",
+ "2018-09-24",
+ "2016-09-02",
+ ]
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -120,12 +126,16 @@ class DataSourceEc2(sources.DataSource):
LOG.debug("FreeBSD doesn't support running dhclient with -sf")
return False
try:
- with EphemeralDHCPv4(self.fallback_interface):
+ with EphemeralIPNetwork(
+ self.fallback_interface, ipv6=True
+ ) as netw:
+ state_msg = f" {netw.state_msg}" if netw.state_msg else ""
self._crawled_metadata = util.log_time(
logfunc=LOG.debug,
- msg="Crawl of metadata service",
+ msg=f"Crawl of metadata service{state_msg}",
func=self.crawl_metadata,
)
+
except NoDHCPLeaseError:
return False
else:
@@ -222,7 +232,7 @@ class DataSourceEc2(sources.DataSource):
else:
return self.metadata["instance-id"]
- def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ def _maybe_fetch_api_token(self, mdurls):
"""Get an API token for EC2 Instance Metadata Service.
On EC2. IMDS will always answer an API token, unless
@@ -472,12 +482,6 @@ class DataSourceEc2(sources.DataSource):
),
)
- # RELEASE_BLOCKER: xenial should drop the below if statement,
- # because the issue being addressed doesn't exist pre-netplan.
- # (This datasource doesn't implement check_instance_id() so the
- # datasource object is recreated every boot; this means we don't
- # need to modify update_events on cloud-init upgrade.)
-
# Non-VPC (aka Classic) Ec2 instances need to rewrite the
# network config file every boot due to MAC address change.
if self.is_classic_instance():
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index c470bea8..3691a706 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -11,7 +11,8 @@ from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources, url_helper, util
from cloudinit.distros import ug_util
-from cloudinit.net.dhcp import EphemeralDHCPv4
+from cloudinit.net.ephemeral import EphemeralDHCPv4
+from cloudinit.sources import DataSourceHostname
LOG = logging.getLogger(__name__)
@@ -122,7 +123,9 @@ class DataSourceGCE(sources.DataSource):
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
- return self.metadata["local-hostname"].split(".")[0]
+ return DataSourceHostname(
+ self.metadata["local-hostname"].split(".")[0], False
+ )
@property
def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 91a6f9c9..90531769 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -10,7 +10,8 @@ import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net, sources, util
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralDHCPv4
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 640348f4..34e4e00e 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -13,22 +13,14 @@ import os
import socket
import stat
from json.decoder import JSONDecodeError
+from typing import Any, Dict, Union, cast
import requests
from requests.adapters import HTTPAdapter
-# pylint fails to import the two modules below.
-# These are imported via requests.packages rather than urllib3 because:
-# a.) the provider of the requests package should ensure that urllib3
-# contained in it is consistent/correct.
-# b.) cloud-init does not specifically have a dependency on urllib3
-#
-# For future reference, see:
-# https://github.com/kennethreitz/requests/pull/2375
-# https://github.com/requests/requests/issues/4104
-# pylint: disable=E0401
-from requests.packages.urllib3.connection import HTTPConnection
-from requests.packages.urllib3.connectionpool import HTTPConnectionPool
+# Note: `urllib3` is transitively installed by `requests`.
+from urllib3.connection import HTTPConnection
+from urllib3.connectionpool import HTTPConnectionPool
from cloudinit import log as logging
from cloudinit import sources, subp, util
@@ -51,7 +43,7 @@ CONFIG_KEY_ALIASES = {
def generate_fallback_network_config() -> dict:
"""Return network config V1 dict representing instance network config."""
- network_v1 = {
+ network_v1: Dict[str, Any] = {
"version": 1,
"config": [
{
@@ -142,8 +134,8 @@ class DataSourceLXD(sources.DataSource):
dsname = "LXD"
- _network_config = sources.UNSET
- _crawled_metadata = sources.UNSET
+ _network_config: Union[Dict, str] = sources.UNSET
+ _crawled_metadata: Union[Dict, str] = sources.UNSET
sensitive_metadata_keys = (
"merged_cfg",
@@ -211,13 +203,17 @@ class DataSourceLXD(sources.DataSource):
If none is present, then we generate fallback configuration.
"""
if self._network_config == sources.UNSET:
- if self._crawled_metadata.get("network-config"):
+ if self._crawled_metadata == sources.UNSET:
+ self._get_data()
+ if isinstance(
+ self._crawled_metadata, dict
+ ) and self._crawled_metadata.get("network-config"):
self._network_config = self._crawled_metadata.get(
- "network-config"
+ "network-config", {}
)
else:
self._network_config = generate_fallback_network_config()
- return self._network_config
+ return cast(dict, self._network_config)
def is_platform_viable() -> bool:
@@ -257,7 +253,7 @@ def read_metadata(
configuration keys and values provided to the container surfaced by
the socket under the /1.0/config/ route.
"""
- md = {}
+ md: dict = {}
lxd_url = "http://lxd"
version_url = lxd_url + "/" + api_version + "/"
with requests.Session() as session:
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 0df39824..05bf84c2 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -51,6 +51,10 @@ GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
VMWARE_IMC_DIR = "/var/run/vmware-imc"
+class GuestCustScriptDisabled(Exception):
+ pass
+
+
class DataSourceOVF(sources.DataSource):
dsname = "OVF"
@@ -270,11 +274,20 @@ class DataSourceOVF(sources.DataSource):
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
)
- raise RuntimeError(msg)
+ raise GuestCustScriptDisabled(msg)
ccScriptsDir = os.path.join(
self.paths.get_cpath("scripts"), "per-instance"
)
+ except GuestCustScriptDisabled as e:
+ LOG.debug("GuestCustScriptDisabled")
+ _raise_error_status(
+ "Error parsing the customization Config File",
+ e,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 6878528d..292e0efc 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -10,7 +10,8 @@ from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources, url_helper, util
from cloudinit.event import EventScope, EventType
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralDHCPv4
from cloudinit.sources import DataSourceOracle as oracle
from cloudinit.sources.helpers import openstack
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 3fd8d753..bf7c0c3a 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -15,15 +15,15 @@ Notes:
import base64
from collections import namedtuple
-from contextlib import suppress as noop
-from typing import Tuple
+from typing import Optional, Tuple
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net, sources, util
+from cloudinit.distros.networking import NetworkConfig
from cloudinit.net import (
cmdline,
- dhcp,
+ ephemeral,
get_interfaces_by_mac,
is_netfail_master,
)
@@ -46,7 +46,19 @@ V2_HEADERS = {"Authorization": "Bearer Oracle"}
OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
-def _ensure_netfailover_safe(network_config):
+class KlibcOracleNetworkConfigSource(cmdline.KlibcNetworkConfigSource):
+ """Override super class to lower the applicability conditions.
+
+ If any `/run/net-*.cfg` files exist, then it is applicable. Even if
+ `/run/initramfs/open-iscsi.interface` does not exist.
+ """
+
+ def is_applicable(self) -> bool:
+ """Override is_applicable"""
+ return bool(self._files)
+
+
+def _ensure_netfailover_safe(network_config: NetworkConfig) -> None:
"""
Search network config physical interfaces to see if any of them are
a netfailover master. If found, we prevent matching by MAC as the other
@@ -110,7 +122,7 @@ class DataSourceOracle(sources.DataSource):
sources.NetworkConfigSource.SYSTEM_CFG,
)
- _network_config = sources.UNSET
+ _network_config: dict = {"config": [], "version": 1}
def __init__(self, sys_cfg, *args, **kwargs):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
@@ -122,8 +134,12 @@ class DataSourceOracle(sources.DataSource):
BUILTIN_DS_CONFIG,
]
)
+ self._network_config_source = KlibcOracleNetworkConfigSource()
+
+ def _has_network_config(self) -> bool:
+ return bool(self._network_config.get("config", []))
- def _is_platform_viable(self):
+ def _is_platform_viable(self) -> bool:
"""Check platform environment to report if this datasource may run."""
return _is_platform_viable()
@@ -133,24 +149,21 @@ class DataSourceOracle(sources.DataSource):
self.system_uuid = _read_system_uuid()
- # network may be configured if iscsi root. If that is the case
- # then read_initramfs_config will return non-None.
- fetch_vnics_data = self.ds_cfg.get(
+ network_context = ephemeral.EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": METADATA_PATTERN.format(version=2, path="instance"),
+ "headers": V2_HEADERS,
+ },
+ )
+ fetch_primary_nic = not self._is_iscsi_root()
+ fetch_secondary_nics = self.ds_cfg.get(
"configure_secondary_nics",
BUILTIN_DS_CONFIG["configure_secondary_nics"],
)
- network_context = noop()
- if not _is_iscsi_root():
- network_context = dhcp.EphemeralDHCPv4(
- iface=net.find_fallback_nic(),
- connectivity_url_data={
- "url": METADATA_PATTERN.format(version=2, path="instance"),
- "headers": V2_HEADERS,
- },
- )
with network_context:
fetched_metadata = read_opc_metadata(
- fetch_vnics_data=fetch_vnics_data
+ fetch_vnics_data=fetch_primary_nic or fetch_secondary_nics
)
data = self._crawled_metadata = fetched_metadata.instance_data
@@ -177,7 +190,7 @@ class DataSourceOracle(sources.DataSource):
return True
- def check_instance_id(self, sys_cfg):
+ def check_instance_id(self, sys_cfg) -> bool:
"""quickly check (local only) if self.instance_id is still valid
On Oracle, the dmi-provided system uuid differs from the instance-id
@@ -187,59 +200,75 @@ class DataSourceOracle(sources.DataSource):
def get_public_ssh_keys(self):
return sources.normalize_pubkey_data(self.metadata.get("public_keys"))
+ def _is_iscsi_root(self) -> bool:
+ """Return whether we are on a iscsi machine."""
+ return self._network_config_source.is_applicable()
+
+ def _get_iscsi_config(self) -> dict:
+ return self._network_config_source.render_config()
+
@property
def network_config(self):
"""Network config is read from initramfs provided files
+ Priority for primary network_config selection:
+ - iscsi
+ - imds
+
If none is present, then we fall back to fallback configuration.
"""
- if self._network_config == sources.UNSET:
- # this is v1
- self._network_config = cmdline.read_initramfs_config()
-
- if not self._network_config:
- # this is now v2
- self._network_config = self.distro.generate_fallback_config()
-
- if self.ds_cfg.get(
- "configure_secondary_nics",
- BUILTIN_DS_CONFIG["configure_secondary_nics"],
- ):
- try:
- # Mutate self._network_config to include secondary
- # VNICs
- self._add_network_config_from_opc_imds()
- except Exception:
- util.logexc(
- LOG, "Failed to parse secondary network configuration!"
- )
-
- # we need to verify that the nic selected is not a netfail over
- # device and, if it is a netfail master, then we need to avoid
- # emitting any match by mac
- _ensure_netfailover_safe(self._network_config)
+ if self._has_network_config():
+ return self._network_config
+
+ set_primary = False
+ # this is v1
+ if self._is_iscsi_root():
+ self._network_config = self._get_iscsi_config()
+ if not self._has_network_config():
+ LOG.warning(
+ "Could not obtain network configuration from initramfs. "
+ "Falling back to IMDS."
+ )
+ set_primary = True
+
+ set_secondary = self.ds_cfg.get(
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
+ )
+ if set_primary or set_secondary:
+ try:
+ # Mutate self._network_config to include primary and/or
+ # secondary VNICs
+ self._add_network_config_from_opc_imds(set_primary)
+ except Exception:
+ util.logexc(
+ LOG,
+ "Failed to parse IMDS network configuration!",
+ )
+
+ # we need to verify that the nic selected is not a netfail over
+ # device and, if it is a netfail master, then we need to avoid
+ # emitting any match by mac
+ _ensure_netfailover_safe(self._network_config)
return self._network_config
- def _add_network_config_from_opc_imds(self):
- """Generate secondary NIC config from IMDS and merge it.
+ def _add_network_config_from_opc_imds(self, set_primary: bool = False):
+ """Generate primary and/or secondary NIC config from IMDS and merge it.
- The primary NIC configuration should not be modified based on the IMDS
- values, as it should continue to be configured for DHCP. As such, this
- uses the instance's network config dict which is expected to have the
- primary NIC configuration already present.
It will mutate the network config to include the secondary VNICs.
+ :param set_primary: If True set primary interface.
:raises:
Exceptions are not handled within this function. Likely
exceptions are KeyError/IndexError
(if the IMDS returns valid JSON with unexpected contents).
"""
if self._vnics_data is None:
- LOG.warning("Secondary NIC data is UNSET but should not be")
+ LOG.warning("NIC data is UNSET but should not be")
return
- if "nicIndex" in self._vnics_data[0]:
+ if not set_primary and ("nicIndex" in self._vnics_data[0]):
# TODO: Once configure_secondary_nics defaults to True, lower the
# level of this log message. (Currently, if we're running this
# code at all, someone has explicitly opted-in to secondary
@@ -255,14 +284,14 @@ class DataSourceOracle(sources.DataSource):
interfaces_by_mac = get_interfaces_by_mac()
- for vnic_dict in self._vnics_data[1:]:
- # We skip the first entry in the response because the primary
- # interface is already configured by iSCSI boot; applying
- # configuration from the IMDS is not required.
+ vnics_data = self._vnics_data if set_primary else self._vnics_data[1:]
+
+ for vnic_dict in vnics_data:
mac_address = vnic_dict["macAddr"].lower()
if mac_address not in interfaces_by_mac:
- LOG.debug(
- "Interface with MAC %s not found; skipping", mac_address
+ LOG.warning(
+ "Interface with MAC %s not found; skipping",
+ mac_address,
)
continue
name = interfaces_by_mac[mac_address]
@@ -291,21 +320,25 @@ class DataSourceOracle(sources.DataSource):
}
-def _read_system_uuid():
+def _read_system_uuid() -> Optional[str]:
sys_uuid = dmi.read_dmi_data("system-uuid")
return None if sys_uuid is None else sys_uuid.lower()
-def _is_platform_viable():
+def _is_platform_viable() -> bool:
asset_tag = dmi.read_dmi_data("chassis-asset-tag")
return asset_tag == CHASSIS_ASSET_TAG
-def _is_iscsi_root():
- return bool(cmdline.read_initramfs_config())
+def _fetch(metadata_version: int, path: str, retries: int = 2) -> dict:
+ return readurl(
+ url=METADATA_PATTERN.format(version=metadata_version, path=path),
+ headers=V2_HEADERS if metadata_version > 1 else None,
+ retries=retries,
+ )._response.json()
-def read_opc_metadata(*, fetch_vnics_data: bool = False):
+def read_opc_metadata(*, fetch_vnics_data: bool = False) -> OpcMetadata:
"""Fetch metadata from the /opc/ routes.
:return:
@@ -319,15 +352,6 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
# Per Oracle, there are short windows (measured in milliseconds) throughout
# an instance's lifetime where the IMDS is being updated and may 404 as a
# result. To work around these windows, we retry a couple of times.
- retries = 2
-
- def _fetch(metadata_version: int, path: str) -> dict:
- return readurl(
- url=METADATA_PATTERN.format(version=metadata_version, path=path),
- headers=V2_HEADERS if metadata_version > 1 else None,
- retries=retries,
- )._response.json()
-
metadata_version = 2
try:
instance_data = _fetch(metadata_version, path="instance")
@@ -340,9 +364,7 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
try:
vnics_data = _fetch(metadata_version, path="vnics")
except UrlError:
- util.logexc(
- LOG, "Failed to fetch secondary network configuration!"
- )
+ util.logexc(LOG, "Failed to fetch IMDS network configuration!")
return OpcMetadata(metadata_version, instance_data, vnics_data)
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 14ac77e4..6890562d 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -12,6 +12,8 @@ instance on rootbox / hyperone cloud platforms
import errno
import os
import os.path
+import typing
+from ipaddress import IPv4Address
from cloudinit import log as logging
from cloudinit import sources, subp, util
@@ -30,18 +32,21 @@ def get_manage_etc_hosts():
return True
-def ip2int(addr):
- parts = addr.split(".")
- return (
- (int(parts[0]) << 24)
- + (int(parts[1]) << 16)
- + (int(parts[2]) << 8)
- + int(parts[3])
- )
+def increment_ip(addr, inc: int) -> str:
+ return str(IPv4Address(int(IPv4Address(addr)) + inc))
+
+def get_three_ips(addr) -> typing.List[str]:
+ """Return a list of 3 IP addresses: [addr, addr + 2, addr + 3]
-def int2ip(addr):
- return ".".join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+ @param addr: an object that is passed to IPvAddress
+ @return: list of strings
+ """
+ return [
+ addr,
+ increment_ip(addr, 2),
+ increment_ip(addr, 3),
+ ]
def _sub_arp(cmd):
@@ -178,11 +183,7 @@ def read_user_data_callback(mount_dir):
{"source": ip["address"], "destination": target}
for netadp in meta_data["netadp"]
for ip in netadp["ip"]
- for target in [
- netadp["network"]["gateway"],
- int2ip(ip2int(netadp["network"]["gateway"]) + 2),
- int2ip(ip2int(netadp["network"]["gateway"]) + 3),
- ]
+ for target in get_three_ips(netadp["network"]["gateway"])
],
"cfg": {
"ssh_pwauth": True,
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index c47a8bf5..0ba0dec3 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -12,24 +12,17 @@ import time
import requests
-# pylint fails to import the two modules below.
-# These are imported via requests.packages rather than urllib3 because:
-# a.) the provider of the requests package should ensure that urllib3
-# contained in it is consistent/correct.
-# b.) cloud-init does not specifically have a dependency on urllib3
-#
-# For future reference, see:
-# https://github.com/kennethreitz/requests/pull/2375
-# https://github.com/requests/requests/issues/4104
-# pylint: disable=E0401
-from requests.packages.urllib3.connection import HTTPConnection
-from requests.packages.urllib3.poolmanager import PoolManager
+# Note: `urllib3` is transitively installed by `requests`
+from urllib3.connection import HTTPConnection
+from urllib3.poolmanager import PoolManager
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net, sources, url_helper, util
from cloudinit.event import EventScope, EventType
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralDHCPv4
+from cloudinit.sources import DataSourceHostname
LOG = logging.getLogger(__name__)
@@ -288,7 +281,7 @@ class DataSourceScaleway(sources.DataSource):
return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata["hostname"]
+ return DataSourceHostname(self.metadata["hostname"], False)
@property
def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 40f915fa..11168f6a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -30,9 +30,11 @@ import random
import re
import socket
+import serial
+
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import serial, sources, subp, util
+from cloudinit import sources, subp, util
from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
@@ -711,8 +713,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
if self.is_b64_encoded(key):
try:
val = base64.b64decode(val.encode()).decode()
- # Bogus input produces different errors in Python 2 and 3
- except (TypeError, binascii.Error):
+ except binascii.Error:
LOG.warning("Failed base64 decoding key '%s': %s", key, val)
if strip:
@@ -1049,7 +1050,7 @@ if __name__ == "__main__":
return data[key]
- data = {}
+ data: dict = {}
for key in keys:
load_key(client=jmc, key=key, data=data)
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
index f4b78da5..d6b74bc1 100644
--- a/cloudinit/sources/DataSourceUpCloud.py
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -8,7 +8,8 @@
from cloudinit import log as logging
from cloudinit import net as cloudnet
from cloudinit import sources, util
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralDHCPv4
from cloudinit.sources.helpers import upcloud as uc_helper
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
index 6ef7c9d5..80a01e89 100644
--- a/cloudinit/sources/DataSourceVMware.py
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -73,7 +73,7 @@ import netifaces
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources, util
+from cloudinit import net, sources, util
from cloudinit.subp import ProcessExecutionError, subp, which
PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
@@ -685,20 +685,10 @@ def is_valid_ip_addr(val):
Returns false if the address is loopback, link local or unspecified;
otherwise true is returned.
"""
- # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc)
- # TODO(migrate to use cloudinit.net.is_ip_addr)#
-
- addr = None
- try:
- addr = ipaddress.ip_address(val)
- except ipaddress.AddressValueError:
- addr = ipaddress.ip_address(str(val))
- except Exception:
- return None
-
- if addr.is_link_local or addr.is_loopback or addr.is_unspecified:
- return False
- return True
+ addr = net.maybe_get_address(ipaddress.ip_address, val)
+ return addr and not (
+ addr.is_link_local or addr.is_loopback or addr.is_unspecified
+ )
def get_host_info():
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index fff760f1..b621fb6e 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -14,7 +14,7 @@ import json
import os
from collections import namedtuple
from enum import Enum, unique
-from typing import Dict, List, Tuple
+from typing import Any, Dict, List, Tuple
from cloudinit import dmi, importer
from cloudinit import log as logging
@@ -149,7 +149,7 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
URLParams = namedtuple(
- "URLParms",
+ "URLParams",
[
"max_wait_seconds",
"timeout_seconds",
@@ -158,6 +158,11 @@ URLParams = namedtuple(
],
)
+DataSourceHostname = namedtuple(
+ "DataSourceHostname",
+ ["hostname", "is_default"],
+)
+
class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@@ -228,7 +233,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
# values are reset via clear_cached_attrs during any update_metadata call.
- cached_attr_defaults = (
+ cached_attr_defaults: Tuple[Tuple[str, Any], ...] = (
("ec2_metadata", UNSET),
("network_json", UNSET),
("metadata", {}),
@@ -244,7 +249,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = (
+ sensitive_metadata_keys: Tuple[str, ...] = (
"merged_cfg",
"security-credentials",
)
@@ -256,7 +261,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self.distro = distro
self.paths = paths
self.userdata = None
- self.metadata = {}
+ self.metadata: dict = {}
self.userdata_raw = None
self.vendordata = None
self.vendordata2 = None
@@ -301,7 +306,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
- local_hostname = self.get_hostname()
+ local_hostname = self.get_hostname().hostname
instance_id = self.get_instance_id()
availability_zone = self.availability_zone
# In the event of upgrade from existing cloudinit, pickled datasource
@@ -356,7 +361,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if not attr_defaults:
self._dirty_cache = False
- def get_data(self):
+ def get_data(self) -> bool:
"""Datasources implement _get_data to setup metadata and userdata_raw.
Minimally, the datasource should return a boolean True on success.
@@ -437,7 +442,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
write_json(json_file, redact_sensitive_keys(processed_data))
return True
- def _get_data(self):
+ def _get_data(self) -> bool:
"""Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
"Subclasses of DataSource must implement _get_data which"
@@ -445,7 +450,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
)
def get_url_params(self):
- """Return the Datasource's prefered url_read parameters.
+ """Return the Datasource's preferred url_read parameters.
Subclasses may override url_max_wait, url_timeout, url_retries.
@@ -707,22 +712,33 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@param metadata_only: Boolean, set True to avoid looking up hostname
if meta-data doesn't have local-hostname present.
- @return: hostname or qualified hostname. Optionally return None when
+ @return: a DataSourceHostname namedtuple
+ <hostname or qualified hostname>, <is_default> (str, bool).
+ is_default is a bool and
+ it's true only if hostname is localhost and was
+ returned by util.get_hostname() as a default.
+ This is used to differentiate with a user-defined
+ localhost hostname.
+ Optionally return (None, False) when
metadata_only is True and local-hostname data is not available.
"""
defdomain = "localdomain"
defhost = "localhost"
domain = defdomain
+ is_default = False
if not self.metadata or not self.metadata.get("local-hostname"):
if metadata_only:
- return None
+ return DataSourceHostname(None, is_default)
# this is somewhat questionable really.
# the cloud datasource was asked for a hostname
# and didn't have one. raising error might be more appropriate
# but instead, basically look up the existing hostname
toks = []
hostname = util.get_hostname()
+ if hostname == "localhost":
+ # default hostname provided by socket.gethostname()
+ is_default = True
hosts_fqdn = util.get_fqdn_from_hosts(hostname)
if hosts_fqdn and hosts_fqdn.find(".") > 0:
toks = str(hosts_fqdn).split(".")
@@ -755,15 +771,15 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
hostname = toks[0]
if fqdn and domain != defdomain:
- return "%s.%s" % (hostname, domain)
- else:
- return hostname
+ hostname = "%s.%s" % (hostname, domain)
+
+ return DataSourceHostname(hostname, is_default)
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
def get_supported_events(self, source_event_types: List[EventType]):
- supported_events = {} # type: Dict[EventScope, set]
+ supported_events: Dict[EventScope, set] = {}
for event in source_event_types:
for (
update_scope,
@@ -970,7 +986,9 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
-def instance_id_matches_system_uuid(instance_id, field="system-uuid"):
+def instance_id_matches_system_uuid(
+ instance_id, field: str = "system-uuid"
+) -> bool:
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
if not instance_id:
diff --git a/cloudinit/sources/helpers/cloudsigma.py b/cloudinit/sources/helpers/cloudsigma.py
index 6db7e117..19fa1669 100644
--- a/cloudinit/sources/helpers/cloudsigma.py
+++ b/cloudinit/sources/helpers/cloudsigma.py
@@ -22,7 +22,7 @@ API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
import json
import platform
-from cloudinit import serial
+import serial
# these high timeouts are necessary as read may read a lot of data.
READ_TIMEOUT = 60
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 845294ec..4def10f1 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -5,13 +5,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import configparser
import logging
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
from .config_source import ConfigSource
logger = logging.getLogger(__name__)
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index c8fb8420..adbcfbe5 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -10,7 +10,8 @@ from requests import exceptions
from cloudinit import dmi
from cloudinit import log as log
from cloudinit import net, netinfo, subp, url_helper, util
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.net.dhcp import NoDHCPLeaseError
+from cloudinit.net.ephemeral import EphemeralDHCPv4
# Get LOG
LOG = log.getLogger(__name__)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 27af6055..66e12eed 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -576,7 +576,7 @@ class Init(object):
# Attempts to register any handler modules under the given path.
if not path or not os.path.isdir(path):
return
- potential_handlers = util.find_modules(path)
+ potential_handlers = util.get_modules_from_dir(path)
for (fname, mod_name) in potential_handlers.items():
try:
mod_locs, looked_locs = importer.find_module(
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 298eaf6b..4d712829 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -10,31 +10,38 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+# noqa: E402
+
import collections
import re
import sys
+from typing import Type
+
+from cloudinit import log as logging
+from cloudinit import type_utils as tu
+from cloudinit import util
+from cloudinit.atomic_helper import write_file
+JUndefined: Type
try:
- from jinja2 import DebugUndefined as JUndefined
+ from jinja2 import DebugUndefined as _DebugUndefined
from jinja2 import Template as JTemplate
JINJA_AVAILABLE = True
+ JUndefined = _DebugUndefined
except (ImportError, AttributeError):
JINJA_AVAILABLE = False
JUndefined = object
-from cloudinit import log as logging
-from cloudinit import type_utils as tu
-from cloudinit import util
-from cloudinit.atomic_helper import write_file
-
LOG = logging.getLogger(__name__)
TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
BASIC_MATCHER = re.compile(r"\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)")
MISSING_JINJA_PREFIX = "CI_MISSING_JINJA_VAR/"
-class UndefinedJinjaVariable(JUndefined):
+# Mypy, and the PEP 484 ecosystem in general, does not support creating
+# classes with dynamic base types: https://stackoverflow.com/a/59636248
+class UndefinedJinjaVariable(JUndefined): # type: ignore
"""Class used to represent any undefined jinja template variable."""
def __str__(self):
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 04643895..7dd98d95 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -443,7 +443,7 @@ def dual_stack(
"Timed out waiting for addresses: %s, "
"exception(s) raised while waiting: %s",
" ".join(addresses),
- " ".join(exceptions),
+ " ".join(exceptions), # type: ignore
)
finally:
executor.shutdown(wait=False)
@@ -460,7 +460,7 @@ def wait_for_url(
headers_redact=None,
sleep_time: int = 1,
exception_cb: Callable = None,
- sleep_time_cb: Callable = None,
+ sleep_time_cb: Callable[[Any, int], int] = None,
request_method: str = "",
connect_synchronously: bool = True,
async_delay: float = 0.150,
@@ -503,7 +503,7 @@ def wait_for_url(
A value of None for max_wait will retry indefinitely.
"""
- def default_sleep_time(_, loop_number: int):
+ def default_sleep_time(_, loop_number: int) -> int:
return int(loop_number / 5) + 1
def timeup(max_wait, start_time):
@@ -631,9 +631,7 @@ def wait_for_url(
read_url_serial if connect_synchronously else read_url_parallel
)
- calculate_sleep_time = (
- default_sleep_time if not sleep_time_cb else sleep_time_cb
- )
+ calculate_sleep_time = sleep_time_cb or default_sleep_time
loop_n: int = 0
response = None
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 2639478a..e3a891e4 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -32,10 +32,10 @@ import subprocess
import sys
import time
from base64 import b64decode, b64encode
-from collections import deque
+from collections import deque, namedtuple
from errno import EACCES, ENOENT
from functools import lru_cache
-from typing import List
+from typing import Callable, List, TypeVar
from urllib import parse
from cloudinit import importer
@@ -368,7 +368,7 @@ def extract_usergroup(ug_pair):
return (u, g)
-def find_modules(root_dir) -> dict:
+def get_modules_from_dir(root_dir: str) -> dict:
entries = dict()
for fname in glob.glob(os.path.join(root_dir, "*.py")):
if not os.path.isfile(fname):
@@ -601,6 +601,7 @@ def _get_variant(info):
"fedora",
"miraclelinux",
"openeuler",
+ "openmandriva",
"photon",
"rhel",
"rocky",
@@ -800,28 +801,6 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
os.dup2(new_fp.fileno(), o_err.fileno())
-def make_url(
- scheme, host, port=None, path="", params="", query="", fragment=""
-):
-
- pieces = [scheme or ""]
-
- netloc = ""
- if host:
- netloc = str(host)
-
- if port is not None:
- netloc += ":" + "%s" % (port)
-
- pieces.append(netloc or "")
- pieces.append(path or "")
- pieces.append(params or "")
- pieces.append(query or "")
- pieces.append(fragment or "")
-
- return parse.urlunparse(pieces)
-
-
def mergemanydict(srcs, reverse=False) -> dict:
if reverse:
srcs = reversed(srcs)
@@ -887,17 +866,16 @@ def read_optional_seed(fill, base="", ext="", timeout=5):
def fetch_ssl_details(paths=None):
ssl_details = {}
# Lookup in these locations for ssl key/cert files
- ssl_cert_paths = [
- "/var/lib/cloud/data/ssl",
- "/var/lib/cloud/instance/data/ssl",
- ]
- if paths:
- ssl_cert_paths.extend(
- [
- os.path.join(paths.get_ipath_cur("data"), "ssl"),
- os.path.join(paths.get_cpath("data"), "ssl"),
- ]
- )
+ if not paths:
+ ssl_cert_paths = [
+ "/var/lib/cloud/data/ssl",
+ "/var/lib/cloud/instance/data/ssl",
+ ]
+ else:
+ ssl_cert_paths = [
+ os.path.join(paths.get_ipath_cur("data"), "ssl"),
+ os.path.join(paths.get_cpath("data"), "ssl"),
+ ]
ssl_cert_paths = uniq_merge(ssl_cert_paths)
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
cert_file = None
@@ -1103,6 +1081,12 @@ def dos2unix(contents):
return contents.replace("\r\n", "\n")
+HostnameFqdnInfo = namedtuple(
+ "HostnameFqdnInfo",
+ ["hostname", "fqdn", "is_default"],
+)
+
+
def get_hostname_fqdn(cfg, cloud, metadata_only=False):
"""Get hostname and fqdn from config if present and fallback to cloud.
@@ -1110,9 +1094,17 @@ def get_hostname_fqdn(cfg, cloud, metadata_only=False):
@param cloud: Cloud instance from init.cloudify().
@param metadata_only: Boolean, set True to only query cloud meta-data,
returning None if not present in meta-data.
- @return: a Tuple of strings <hostname>, <fqdn>. Values can be none when
+ @return: a namedtuple of
+ <hostname>, <fqdn>, <is_default> (str, str, bool).
+ Values can be none when
metadata_only is True and no cfg or metadata provides hostname info.
+ is_default is a bool and
+ it's true only if hostname is localhost and was
+ returned by util.get_hostname() as a default.
+ This is used to differentiate with a user-defined
+ localhost hostname.
"""
+ is_default = False
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
fqdn = cfg["fqdn"]
@@ -1126,12 +1118,16 @@ def get_hostname_fqdn(cfg, cloud, metadata_only=False):
else:
# no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
- fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only)
+ fqdn = cloud.get_hostname(
+ fqdn=True, metadata_only=metadata_only
+ ).hostname
if "hostname" in cfg:
hostname = cfg["hostname"]
else:
- hostname = cloud.get_hostname(metadata_only=metadata_only)
- return (hostname, fqdn)
+ hostname, is_default = cloud.get_hostname(
+ metadata_only=metadata_only
+ )
+ return HostnameFqdnInfo(hostname, fqdn, is_default)
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
@@ -1723,37 +1719,15 @@ def json_serialize_default(_obj):
return "Warning: redacted unserializable type {0}".format(type(_obj))
-def json_preserialize_binary(data):
- """Preserialize any discovered binary values to avoid json.dumps issues.
-
- Used only on python 2.7 where default type handling is not honored for
- failure to encode binary data. LP: #1801364.
- TODO(Drop this function when py2.7 support is dropped from cloud-init)
- """
- data = obj_copy.deepcopy(data)
- for key, value in data.items():
- if isinstance(value, (dict)):
- data[key] = json_preserialize_binary(value)
- if isinstance(value, bytes):
- data[key] = "ci-b64:{0}".format(b64e(value))
- return data
-
-
def json_dumps(data):
"""Return data in nicely formatted json."""
- try:
- return json.dumps(
- data,
- indent=1,
- sort_keys=True,
- separators=(",", ": "),
- default=json_serialize_default,
- )
- except UnicodeDecodeError:
- if sys.version_info[:2] == (2, 7):
- data = json_preserialize_binary(data)
- return json.dumps(data)
- raise
+ return json.dumps(
+ data,
+ indent=1,
+ sort_keys=True,
+ separators=(",", ": "),
+ default=json_serialize_default,
+ )
def ensure_dir(path, mode=None):
@@ -2618,7 +2592,17 @@ def get_mount_info(path, log=LOG, get_mnt_opts=False):
return parse_mount(path)
-def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
+T = TypeVar("T")
+
+
+def log_time(
+ logfunc,
+ msg,
+ func: Callable[..., T],
+ args=None,
+ kwargs=None,
+ get_uptime=False,
+) -> T:
if args is None:
args = []
if kwargs is None:
@@ -2800,14 +2784,6 @@ def system_is_snappy():
return False
-def indent(text, prefix):
- """replacement for indent from textwrap that is not available in 2.7."""
- lines = []
- for line in text.splitlines(True):
- lines.append(prefix + line)
- return "".join(lines)
-
-
def rootdev_from_cmdline(cmdline):
found = None
for tok in cmdline.split():
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 6951a0e3..8c9b8398 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -33,7 +33,7 @@ disable_root: true
{% endif %}
{% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux",
- "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
+ "fedora", "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
{% if variant == "rhel" %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail,x-systemd.requires=cloud-init.service,_netdev', '0', '2']
{% else %}
@@ -43,7 +43,7 @@ mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
resize_rootfs: noblock
{% endif %}
resize_rootfs_tmp: /dev
-ssh_pwauth: 0
+ssh_pwauth: false
{% endif %}
# This will cause the set+update hostname module to not operate (if true)
@@ -125,7 +125,7 @@ cloud_config_modules:
{% if variant in ["rhel"] %}
- rh_subscription
{% endif %}
-{% if variant in ["rhel", "fedora", "photon"] %}
+{% if variant in ["rhel", "fedora", "openmandriva", "photon"] %}
{% if variant not in ["photon"] %}
- spacewalk
{% endif %}
@@ -192,7 +192,7 @@ system_info:
# This will affect which distro class gets used
{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian",
"eurolinux", "fedora", "freebsd", "gentoo", "netbsd", "miraclelinux", "openbsd", "openEuler",
- "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
+ "openmandriva", "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %}
distro: {{ variant }}
{% elif variant in ["dragonfly"] %}
distro: dragonflybsd
@@ -245,7 +245,7 @@ system_info:
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux",
- "fedora", "gentoo", "miraclelinux", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %}
+ "fedora", "gentoo", "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "suse", "virtuozzo"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
@@ -271,6 +271,8 @@ system_info:
groups: [adm, sudo]
{% elif variant == "arch" %}
groups: [wheel, users]
+{% elif variant == "openmandriva" %}
+ groups: [wheel, users, systemd-journal]
{% elif variant == "rhel" %}
groups: [adm, systemd-journal]
{% else %}
@@ -349,4 +351,10 @@ system_info:
{% elif variant in ["dragonfly"] %}
network:
renderers: ['freebsd']
+{% elif variant in ["rhel", "fedora"] %}
+ network:
+ renderers: ['netplan', 'network-manager', 'networkd', 'sysconfig', 'eni']
+{% elif variant == "openmandriva" %}
+ network:
+ renderers: ['network-manager', 'networkd']
{% endif %}
diff --git a/debian/changelog b/debian/changelog
index d3c97cb4..74fcfcef 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,103 @@
+cloud-init (22.2-64-g1fcd55d6-0ubuntu1~22.10.1) kinetic; urgency=medium
+
+ * d/control: add python3-debconf as Depends and Build-Depends
+ * d/gbp.conf d/gbp_format_changelog:
+ + git-buildpackage customization for debian/changelog generation
+ * New upstream snapshot.
+ + tests: mock dns lookup that causes long timeouts (#1555)
+ + tox: add unpinned env for do_format and check_format (#1554)
+ + cc_ssh_import_id: Substitute deprecated warn (#1553)
+ [Alberto Contreras]
+ + Remove schema errors from log (#1551) (LP: #1978422) (CVE-2022-2084)
+ + Update WebHookHandler to run as background thread (SC-456) (#1491)
+ (LP: #1910552)
+ + testing: Don't run custom cloud dir test on Bionic (#1542)
+ + bash completion: update schema command (#1543) (LP: #1979547)
+ + CI: add non-blocking run against the linters tip versions (#1531)
+ [Paride Legovini]
+ + Change groups within the users schema to support lists and strings
+ (#1545) [RedKrieg]
+ + make it clear which username should go in the contributing doc (#1546)
+ + Pin setuptools for Travis (SC-1136) (#1540)
+ + Fix LXD datasource crawl when BOOT enabled (#1537)
+ + testing: Fix wrong path in dual stack test (#1538)
+ + cloud-config: honor cloud_dir setting (#1523) (LP: #1976564)
+ [Alberto Contreras]
+ + Add python3-debconf to pkg-deps.json Build-Depends (#1535)
+ [Alberto Contreras]
+ + redhat spec: udev/rules.d lives under /usr/lib on rhel-based systems
+ (#1536)
+ + tests/azure: add test coverage for DisableSshPasswordAuthentication
+ (#1534) [Chris Patterson]
+ + summary: Add david-caro to the cla signers (#1527) [David Caro]
+ + Add support for OpenMandriva (https://openmandriva.org/) (#1520)
+ [Bernhard Rosenkraenzer]
+ + tests/azure: refactor ovf creation (#1533) [Chris Patterson]
+ + Improve DataSourceOVF error reporting when script disabled (#1525)
+ [rong]
+ + tox: integration-tests-jenkins: softfail if only some test failed
+ (#1528) [Paride Legovini]
+ + CI: drop linters from Travis CI (moved to GH Actions) (#1530)
+ [Paride Legovini]
+ + sources/azure: remove unused encoding support for customdata (#1526)
+ [Chris Patterson]
+ + sources/azure: remove unused metadata captured when parsing ovf
+ (#1524) [Chris Patterson]
+ + sources/azure: remove dscfg parsing from ovf-env.xml (#1522)
+ [Chris Patterson]
+ + Remove extra space from ec2 dual stack crawl message (#1521)
+ + tests/azure: use namespaces in generated ovf-env.xml documents (#1519)
+ [Chris Patterson]
+ + setup.py: adjust udev/rules default path (#1513)
+ [Emanuele Giuseppe Esposito]
+ + Add python3-deconf dependency (#1506) [Alberto Contreras]
+ + Change match macadress param for network v2 config (#1518)
+ [Henrique Caricatti Capozzi]
+ + sources/azure: remove unused userdata property from ovf (#1516)
+ [Chris Patterson]
+ + sources/azure: minor refactoring to network config generation (#1497)
+ [Chris Patterson]
+ + net: Implement link-local ephemeral ipv6
+ + Rename function to avoid confusion (#1501)
+ + Fix cc_phone_home requiring 'tries' (#1500) (LP: #1977952)
+ + datasources: replace networking functions with stdlib and
+ cloudinit.net code
+ + Remove xenial references (#1472) [Alberto Contreras]
+ + Oracle ds changes (#1474) (LP: #1967942) [Alberto Contreras]
+ + improve runcmd docs (#1498)
+ + add 3.11-dev to Travis CI (#1493)
+ + Only run github actions on pull request (#1496)
+ + Fix integration test client creation (#1494) [Alberto Contreras]
+ + tox: add link checker environment, fix links (#1480)
+ + cc_ubuntu_advantage: Fix doc (#1487) [Alberto Contreras]
+ + cc_yum_add_repo: Fix repo id canonicalization (#1489) (LP: #1975818)
+ [Alberto Contreras]
+ + Add linitio as contributor in the project (#1488) [Kevin Allioli]
+ + net-convert: use yaml.dump for debugging python NetworkState obj
+ (#1484) (LP: #1975907)
+ + test_schema: no relative $ref URLs, replace $ref with local path
+ (#1486)
+ + cc_set_hostname: do not write "localhost" when no hostname is given
+ (#1453) [Emanuele Giuseppe Esposito]
+ + Update .github-cla-signers (#1478) [rong]
+ + schema: write_files defaults, versions $ref full URL and add vscode
+ (#1479)
+ + docs: fix external links, add one more to the list (#1477)
+ + doc: Document how to change module frequency (#1481)
+ + tests: bump pycloudlib (#1482)
+ + tests: bump pycloudlib pinned commit for kinetic Azure (#1476)
+ + testing: fix test_status.py (#1475)
+ + integration tests: If KEEP_INSTANCE = True, log IP (#1473)
+ + Drop mypy excluded files (#1454) [Alberto Contreras]
+ + Docs additions (#1470)
+ + Add "formatting tests" to Github Actions
+ + Remove unused arguments in function signature (#1471)
+ + Changelog: correct errant classification of LP issues as GH (#1464)
+ + Use Network-Manager and Netplan as default renderers for RHEL and
+ Fedora (#1465) [Emanuele Giuseppe Esposito]
+
+ -- James Falcon <james.falcon@canonical.com> Fri, 01 Jul 2022 10:07:15 -0500
+
cloud-init (22.2-0ubuntu1~22.10.1) kinetic; urgency=medium
* d/control:
@@ -3627,7 +3727,7 @@ cloud-init (0.7.8-34-ga1cdebd-0ubuntu1) zesty; urgency=medium
* New upstream snapshot.
- net/cmdline: Further adjustments to ipv6 support [LaMont Jones]
- (LP: #1621615)
+ (LP: #1621615)
- Add coverage dependency to bddeb to fix package build.
- doc: improve HACKING.rst file
- dmidecode: Allow dmidecode to be used on aarch64 [Robert Schweikert]
diff --git a/debian/control b/debian/control
index 0ad1aee4..ead20100 100644
--- a/debian/control
+++ b/debian/control
@@ -10,6 +10,7 @@ Build-Depends: debhelper-compat (= 13),
po-debconf,
python3,
python3-configobj,
+ python3-debconf,
python3-httpretty,
python3-jinja2,
python3-jsonpatch,
@@ -38,6 +39,7 @@ Depends: cloud-guest-utils | cloud-utils,
netplan.io,
procps,
python3,
+ python3-debconf,
python3-netifaces,
python3-requests,
python3-serial,
diff --git a/debian/gbp.conf b/debian/gbp.conf
new file mode 100644
index 00000000..9eb215de
--- /dev/null
+++ b/debian/gbp.conf
@@ -0,0 +1,12 @@
+# Configuration file for "gbp <command>"
+
+# See default config settings at: /etc/git-buildpackage/gbp.conf
+
+# Options only affecting gbp dch
+[dch]
+# options passed to git-log:
+git-log = --no-merges --reverse
+# Customizatons can e.g. be used for line wrapping
+customizations=./debian/gbp_format_changelog
+multimaint-merge = False
+multimaint = False
diff --git a/debian/gbp_format_changelog b/debian/gbp_format_changelog
new file mode 100755
index 00000000..62a871a7
--- /dev/null
+++ b/debian/gbp_format_changelog
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+# Simple changelog entry formatter
+#
+# It simply uses the built in formatter and linewraps the text
+#
+# Use git-dch --customizations=/<uss_tableflip>/scripts/gbp_format_changelog
+# or set it via gbp.conf
+
+import re
+import textwrap
+
+import gbp.dch
+
+# FILTER_UPSTREAM_COMMMITERS are authors for which to redact [author name]
+# suffix per changelog message. The reason we redact those names as they
+# will be repeated for most changelog entries.
+
+# To override this default filter behavior, copy this file into your project's
+# ./debian/ directory and adapt as needed.
+
+FILTER_UPSTREAM_COMMITERS = ( # cloud-init upstream author names
+ "Chad Smith",
+ "James Falcon",
+ "Brett Holman",
+)
+
+FILTER_NOISY_COMMIT_REGEX = (
+ r"update changelog.*",
+ r"refresh patches against.*",
+)
+
+# To filter Jira tickets from "SC" project out of potential changelog comments
+JIRA_PROJECT_KEY = "SC"
+
+UNWRAPPABLE_DELIMITERS = {
+ "]": "[",
+ ")": "(",
+}
+
+
+def _wrap_on_delimiter(text, prefix="", max_length=70) -> list:
+ """Break lines at specific UNWRAPPABLE_DELIMITERS.
+
+ When a line ends with either (LP: #XXX) or [Author Name] avoid using
+ textwrap.wrap which breaks at the last whitespace.
+
+ Instead break at the leading ( or [ special delimiter to ensure entire
+ author name or LP bug reference remains on the same line.
+
+ Fallback to use textwrap.wrap if special conditions don't apply.
+
+ Return a list of individual lines.
+ """
+ if len(text) <= max_length:
+ return [prefix + text]
+ if text[-1] in UNWRAPPABLE_DELIMITERS:
+ delimiter = UNWRAPPABLE_DELIMITERS[text[-1]]
+ part1, sep, part2 = text.rpartition(delimiter)
+ lines = []
+ for part in (part1.rstrip(), f"{sep}{part2}"):
+ if lines:
+ if len(lines[-1] + " " + part) < 70:
+ # Then the previous part plus current part should be joined
+ part = lines.pop() + " " + part
+ part = f" {part}"
+ lines.extend(_wrap_on_delimiter(part, prefix="" if lines else "+"))
+ return lines
+ return textwrap.wrap(prefix + text)
+
+
+def format_changelog_entry(commit_info, options, last_commit=False):
+ entry = gbp.dch.format_changelog_entry(commit_info, options, last_commit)
+ if re.search(rf"\({JIRA_PROJECT_KEY}-\d+\)", entry[0]):
+ # Remove JIRA card references from debian/changelog comments
+ entry[0] = re.sub(r"\({JIRA_PROJECT_KEY}-\d+\)", "", entry[0])
+ if commit_info["author"].name not in FILTER_UPSTREAM_COMMITERS:
+ # Only append non-upstream authors since most committers are upstream
+ entry.append(f"[{commit_info['author'].name}]")
+ if entry:
+ combined_entry = " ".join(entry)
+ for filter_re in FILTER_NOISY_COMMIT_REGEX:
+ if re.match(filter_re, combined_entry):
+ return None
+ return _wrap_on_delimiter(combined_entry, prefix="+")
diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt
index 80bde303..a4ebabfd 100644
--- a/doc/examples/cloud-config-reporting.txt
+++ b/doc/examples/cloud-config-reporting.txt
@@ -2,7 +2,6 @@
##
## The following sets up 2 reporting end points.
## A 'webhook' and a 'log' type.
-## It also disables the built in default 'log'
reporting:
smtest:
type: webhook
@@ -14,4 +13,3 @@ reporting:
smlogger:
type: log
level: WARN
- log: null
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 77232269..d30e1bb6 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -89,7 +89,8 @@ The settings that may be configured are:
* **metadata_urls**: This list of urls will be searched for an EC2
metadata service. The first entry that successfully returns a 200 response
for <url>/<version>/meta-data/instance-id will be selected.
- (default: ['http://169.254.169.254', 'http://instance-data:8773']).
+ (default: ['http://169.254.169.254', 'http://[fd00:ec2::254]',
+ 'http://instance-data:8773']).
* **max_wait**: the maximum amount of clock time in seconds that should be
spent searching metadata_urls. A value less than zero will result in only
one request being made, to the first in the list. (default: 120)
diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst
index 70aefea2..3aeb9afc 100644
--- a/doc/rtd/topics/datasources/gce.rst
+++ b/doc/rtd/topics/datasources/gce.rst
@@ -37,6 +37,6 @@ An example configuration with the default values is provided below:
retries: 5
sec_between_retries: 1
-.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying
+.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata
.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 8ce656af..6080d288 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -149,7 +149,7 @@ be network configuration based on the filename.
ethernets:
interface0:
match:
- mac_address: "52:54:00:12:34:00"
+ macaddress: "52:54:00:12:34:00"
set-name: interface0
addresses:
- 192.168.1.10/255.255.255.0
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
index f1f48117..59cfc2f8 100644
--- a/doc/rtd/topics/datasources/vmware.rst
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -7,7 +7,7 @@ This datasource is for use with systems running on a VMware platform such as
vSphere and currently supports the following data transports:
-* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md#vmchange>`_ keys
+* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md>`_ keys
Configuration
-------------
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 0f77fb15..2815f492 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -151,6 +151,82 @@ provided to the system:
As launching instances in the cloud can cost money and take a bit longer,
sometimes it is easier to launch instances locally using Multipass or LXD:
+Why did cloud-init never complete?
+==================================
+
+To check if cloud-init is running still, run:
+
+.. code-block:: shell-session
+
+ $ cloud-init status
+
+To wait for clous-init to complete, run:
+
+.. code-block:: shell-session
+
+ $ cloud-init status --wait
+
+There are a number of reasons that cloud-init might never complete. This list
+is not exhaustive, but attempts to enumerate potential causes:
+
+External reasons:
+-----------------
+- failed dependant services in the boot
+- bugs in the kernel or drivers
+- bugs in external userspace tools that are called by cloud-init
+
+Internal reasons:
+-----------------
+- a command in ``bootcmd`` or ``runcmd`` that never completes (ex: running
+ `cloud-init status --wait` will wait forever on itself and never complete)
+- nonstandard configurations that disable timeouts or set extremely high
+ values ("never" is used in a loose sense here)
+
+How can I make a module run on every boot?
+==========================================
+Modules have a default frequency that can be overridden. This is done
+by modifying the module list in ``/etc/cloud/cloud.cfg``.
+
+1. Change the module from a string (default) to a list.
+2. Set the first list item to the module name and the second item to the
+ frequency.
+
+Example
+-------
+The following example demonstrates how to log boot times to a file every boot.
+
+Update ``/etc/cloud/cloud.cfg``:
+
+.. code-block:: yaml
+ :name: /etc/cloud/cloud.cfg
+ :emphasize-lines: 3
+
+ cloud_final_modules:
+ # list shortened for brevity
+ - [phone-home, always]
+ - final-message
+ - power-state-change
+
+
+
+Then your userdata could then be:
+
+.. code-block:: yaml
+
+ #cloud-config
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post: all
+
+
+
+How can I test cloud-init locally before deploying to the cloud?
+================================================================
+
+Several different virtual machine and containerization tools can be used for
+testing locally. Multipass, LXD, and qemu are described in this section.
+
+
Multipass
---------
@@ -212,17 +288,15 @@ launch this multiple times:
The above examples all show how to pass user data. To pass other types of
configuration data use the config option specified below:
-+----------------+---------------------+
-| Data | Config Option |
-+================+=====================+
-| user data | user.user-data |
-+----------------+---------------------+
-| vendor data | user.vendor-data |
-+----------------+---------------------+
-| metadata | user.meta-data |
-+----------------+---------------------+
-| network config | user.network-config |
-+----------------+---------------------+
++----------------+---------------------------+
+| Data | Config Option |
++================+===========================+
+| user data | cloud-init.user-data |
++----------------+---------------------------+
+| vendor data | cloud-init.vendor-data |
++----------------+---------------------------+
+| network config | cloud-init.network-config |
++----------------+---------------------------+
See the LXD `Instance Configuration`_ docs for more info about configuration
values or the LXD `Custom Network Configuration`_ document for more about
@@ -232,8 +306,8 @@ custom network config.
.. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances
.. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init
-cloud-localds
--------------
+QEMU
+----
The `cloud-localds` command from the `cloud-utils`_ package generates a disk
with user supplied data. The NoCloud datasouce allows users to provide their
@@ -283,37 +357,56 @@ check out the :ref:`datasource_nocloud` page.
.. _cloud-utils: https://github.com/canonical/cloud-utils/
Where can I learn more?
-========================================
+=======================
Below are some videos, blog posts, and white papers about cloud-init from a
variety of sources.
+Videos:
+
- `cloud-init - The Good Parts`_
-- `cloud-init Summit 2019`_
-- `Utilising cloud-init on Microsoft Azure (Whitepaper)`_
-- `Cloud Instance Initialization with cloud-init (Whitepaper)`_
-- `cloud-init Summit 2018`_
-- `cloud-init - The cross-cloud Magic Sauce (PDF)`_
-- `cloud-init Summit 2017`_
+- `Perfect Proxmox Template with Cloud Image and Cloud Init [proxmox, cloud-init, template]`_
- `cloud-init - Building clouds one Linux box at a time (Video)`_
-- `cloud-init - Building clouds one Linux box at a time (PDF)`_
- `Metadata and cloud-init`_
-- `The beauty of cloud-init`_
- `Introduction to cloud-init`_
+Blog Posts:
+
+- `cloud-init - The cross-cloud Magic Sauce (PDF)`_
+- `cloud-init - Building clouds one Linux box at a time (PDF)`_
+- `The beauty of cloud-init`_
+- `Cloud-init Getting Started [fedora, libvirt, cloud-init]`_
+- `Build Azure Devops Agents With Linux cloud-init for Dotnet Development [terraform, azure, devops, docker, dotnet, cloud-init]`_
+- `Cloud-init Getting Started [fedora, libvirt, cloud-init]`_
+- `Setup Neovim cloud-init Completion [neovim, yaml, Language Server Protocol, jsonschema, cloud-init]`_
+
+Events:
+
+- `cloud-init Summit 2019`_
+- `cloud-init Summit 2018`_
+- `cloud-init Summit 2017`_
+
+
+Whitepapers:
+
+- `Utilising cloud-init on Microsoft Azure (Whitepaper)`_
+- `Cloud Instance Initialization with cloud-init (Whitepaper)`_
+
.. _cloud-init - The Good Parts: https://www.youtube.com/watch?v=2_m6EUo6VOI
-.. _cloud-init Summit 2019: https://powersj.io/post/cloud-init-summit19/
.. _Utilising cloud-init on Microsoft Azure (Whitepaper): https://ubuntu.com/engage/azure-cloud-init-whitepaper
.. _Cloud Instance Initialization with cloud-init (Whitepaper): https://ubuntu.com/blog/cloud-instance-initialisation-with-cloud-init
-.. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/
+
.. _cloud-init - The cross-cloud Magic Sauce (PDF): https://events.linuxfoundation.org/wp-content/uploads/2017/12/cloud-init-The-cross-cloud-Magic-Sauce-Scott-Moser-Chad-Smith-Canonical.pdf
-.. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/
.. _cloud-init - Building clouds one Linux box at a time (Video): https://www.youtube.com/watch?v=1joQfUZQcPg
-.. _cloud-init - Building clouds one Linux box at a time (PDF): https://annex.debconf.org/debconf-share/debconf17/slides/164-cloud-init_Building_clouds_one_Linux_box_at_a_time.pdf
+.. _cloud-init - Building clouds one Linux box at a time (PDF): https://web.archive.org/web/20181111020605/https://annex.debconf.org/debconf-share/debconf17/slides/164-cloud-init_Building_clouds_one_Linux_box_at_a_time.pdf
.. _Metadata and cloud-init: https://www.youtube.com/watch?v=RHVhIWifVqU
-.. _The beauty of cloud-init: http://brandon.fuller.name/archives/2011/05/02/06.40.57/
+.. _The beauty of cloud-init: https://web.archive.org/web/20180830161317/http://brandon.fuller.name/archives/2011/05/02/06.40.57/
.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY
-.. Blog Post: [terraform, azure, devops, docker, dotnet, cloud-init] https://codingsoul.org/2022/04/25/build-azure-devops-agents-with-linux-cloud-init-for-dotnet-development/
-.. Youtube: [proxmox, cloud-init, template] https://www.youtube.com/watch?v=shiIi38cJe4
+.. _Build Azure Devops Agents With Linux cloud-init for Dotnet Development [terraform, azure, devops, docker, dotnet, cloud-init]: https://codingsoul.org/2022/04/25/build-azure-devops-agents-with-linux-cloud-init-for-dotnet-development/
+.. _Perfect Proxmox Template with Cloud Image and Cloud Init [proxmox, cloud-init, template]: https://www.youtube.com/watch?v=shiIi38cJe4
+.. _Cloud-init Getting Started [fedora, libvirt, cloud-init]: https://blog.while-true-do.io/cloud-init-getting-started/
+.. _Setup Neovim cloud-init Completion [neovim, yaml, Language Server Protocol, jsonschema, cloud-init]: https://phoenix-labs.xyz/blog/setup-neovim-cloud-init-completion/
-.. vi: textwidth=79
+.. _cloud-init Summit 2019: https://powersj.io/post/cloud-init-summit19/
+.. _cloud-init Summit 2018: https://powersj.io/post/cloud-init-summit18/
+.. _cloud-init Summit 2017: https://powersj.io/post/cloud-init-summit17/
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
index 744e9bd4..f72b77c1 100644
--- a/doc/rtd/topics/logging.rst
+++ b/doc/rtd/topics/logging.rst
@@ -1,52 +1,15 @@
*******
Logging
*******
-Cloud-init supports both local and remote logging configurable through python's
-built-in logging configuration and through the cloud-init rsyslog module.
+Cloud-init supports both local and remote logging configurable through
+multiple configurations:
-Command Output
-==============
-Cloud-init can redirect its stdout and stderr based on config given under the
-``output`` config key. The output of any commands run by cloud-init and any
-user or vendor scripts provided will also be included here. The ``output`` key
-accepts a dictionary for configuration. Output files may be specified
-individually for each stage (``init``, ``config``, and ``final``), or a single
-key ``all`` may be used to specify output for all stages.
-
-The output for each stage may be specified as a dictionary of ``output`` and
-``error`` keys, for stdout and stderr respectively, as a tuple with stdout
-first and stderr second, or as a single string to use for both. The strings
-passed to all of these keys are handled by the system shell, so any form of
-redirection that can be used in bash is valid, including piping cloud-init's
-output to ``tee``, or ``logger``. If only a filename is provided, cloud-init
-will append its output to the file as though ``>>`` was specified.
-
-By default, cloud-init loads its output configuration from
-``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default config directs both
-stdout and stderr from all cloud-init stages to
-``/var/log/cloud-init-output.log``. The default config is given as ::
-
- output: { all: "| tee -a /var/log/cloud-init-output.log" }
-
-For a more complex example, the following configuration would output the init
-stage to ``/var/log/cloud-init.out`` and ``/var/log/cloud-init.err``, for
-stdout and stderr respectively, replacing anything that was previously there.
-For the config stage, it would pipe both stdout and stderr through ``tee -a
-/var/log/cloud-config.log``. For the final stage it would append the output of
-stdout and stderr to ``/var/log/cloud-final.out`` and
-``/var/log/cloud-final.err`` respectively. ::
-
- output:
- init:
- output: "> /var/log/cloud-init.out"
- error: "> /var/log/cloud-init.err"
- config: "tee -a /var/log/cloud-config.log"
- final:
- - ">> /var/log/cloud-final.out"
- - "/var/log/cloud-final.err"
+- Python's built-in logging configuration
+- Cloud-init's event reporting system
+- The cloud-init rsyslog module
Python Logging
---------------
+==============
Cloud-init uses the python logging module, and can accept config for this
module using the standard python fileConfig format. Cloud-init looks for
config for the logging module under the ``logcfg`` key.
@@ -135,8 +98,131 @@ the default format string ``%(message)s``::
For additional information about configuring python's logging module, please
see the documentation for `python logging config`_.
-Rsyslog Module
+Command Output
--------------
+Cloud-init can redirect its stdout and stderr based on config given under the
+``output`` config key. The output of any commands run by cloud-init and any
+user or vendor scripts provided will also be included here. The ``output`` key
+accepts a dictionary for configuration. Output files may be specified
+individually for each stage (``init``, ``config``, and ``final``), or a single
+key ``all`` may be used to specify output for all stages.
+
+The output for each stage may be specified as a dictionary of ``output`` and
+``error`` keys, for stdout and stderr respectively, as a tuple with stdout
+first and stderr second, or as a single string to use for both. The strings
+passed to all of these keys are handled by the system shell, so any form of
+redirection that can be used in bash is valid, including piping cloud-init's
+output to ``tee``, or ``logger``. If only a filename is provided, cloud-init
+will append its output to the file as though ``>>`` was specified.
+
+By default, cloud-init loads its output configuration from
+``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default config directs both
+stdout and stderr from all cloud-init stages to
+``/var/log/cloud-init-output.log``. The default config is given as ::
+
+ output: { all: "| tee -a /var/log/cloud-init-output.log" }
+
+For a more complex example, the following configuration would output the init
+stage to ``/var/log/cloud-init.out`` and ``/var/log/cloud-init.err``, for
+stdout and stderr respectively, replacing anything that was previously there.
+For the config stage, it would pipe both stdout and stderr through ``tee -a
+/var/log/cloud-config.log``. For the final stage it would append the output of
+stdout and stderr to ``/var/log/cloud-final.out`` and
+``/var/log/cloud-final.err`` respectively. ::
+
+ output:
+ init:
+ output: "> /var/log/cloud-init.out"
+ error: "> /var/log/cloud-init.err"
+ config: "tee -a /var/log/cloud-config.log"
+ final:
+ - ">> /var/log/cloud-final.out"
+ - "/var/log/cloud-final.err"
+
+Event Reporting
+===============
+Cloud-init contains an eventing system that allows events to emitted
+to a variety of destinations.
+
+3 configurations are available for reporting events:
+
+- **webhook**: POST to a web server
+- **log**: Write to the cloud-init log at configurable log level
+- **stdout**: Print to stdout
+
+The default configuration is to emit events to the cloud-init log file
+at ``DEBUG`` level.
+
+Event reporting can be configured using the ``reporting`` key in
+cloud-config userdata.
+
+Configuration
+-------------
+
+**webhook**
+
+.. code-block:: yaml
+
+ reporting:
+ <user-defined name>:
+ type: webhook
+ endpoint: <url>
+ timeout: <timeout in seconds>
+ retries: <number of retries>
+ consumer_key: <OAuth consumer key>
+ token_key: <OAuth token key>
+ token_secret: <OAuth token secret>
+ consumer_secret: <OAuth consumer secret>
+
+``endpoint`` is the only additional required key when specifying
+``type: webhook``.
+
+**log**
+
+.. code-block:: yaml
+
+ reporting:
+ <user-defined name>:
+ type: log
+ level: <DEBUG|INFO|WARN|ERROR|FATAL>
+
+``level`` is optional and defaults to "DEBUG".
+
+**print**
+
+.. code-block:: yaml
+
+ reporting:
+ <user-defined name>:
+ type: print
+
+
+Example
+^^^^^^^
+
+The follow example shows configuration for all three sources:
+
+.. code-block:: yaml
+
+ #cloud-config
+ reporting:
+ webserver:
+ type: webhook
+ endpoint: "http://10.0.0.1:55555/asdf"
+ timeout: 5
+ retries: 3
+ consumer_key: <consumer_key>
+ token_key: <token_key>
+ token_secret: <token_secret>
+ consumer_secret: <consumer_secret>
+ info_log:
+ type: log
+ level: WARN
+ stdout:
+ type: print
+
+Rsyslog Module
+==============
Cloud-init's ``cc_rsyslog`` module allows for fully customizable rsyslog
configuration under the ``rsyslog`` config key. The simplest way to
use the rsyslog module is by specifying remote servers under the ``remotes``
diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst
index b09cd2cc..070d411f 100644
--- a/doc/rtd/topics/module_creation.rst
+++ b/doc/rtd/topics/module_creation.rst
@@ -111,7 +111,7 @@ in the ``cloud_final_modules`` section before the ``final-message`` module.
.. _MetaSchema: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/config/schema.py#L58
.. _OSFAMILIES: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/distros/__init__.py#L35
.. _settings.py: https://github.com/canonical/cloud-init/blob/3bcffacb216d683241cf955e4f7f3e89431c1491/cloudinit/settings.py#L66
-.. _cloud-init-schema.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/cloud-init-schema.json
+.. _cloud-init-schema.json: https://github.com/canonical/cloud-init/blob/main/cloudinit/config/schemas/versions.schema.cloud-config.json
.. _cloud.cfg.tmpl: https://github.com/canonical/cloud-init/blob/main/config/cloud.cfg.tmpl
.. _cloud_init_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L70
.. _cloud_config_modules: https://github.com/canonical/cloud-init/blob/b4746b6aed7660510071395e70b2d6233fbdc3ab/config/cloud.cfg.tmpl#L101
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index c1bf05d1..3080c6d4 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -338,7 +338,7 @@ Set whether to set all slaves to the same MAC address when adding
them to the bond, or how else the system should handle MAC addresses.
The possible values are ``none``, ``active``, and ``follow``.
-**gratuitious-arp**: <*(scalar)>*
+**gratuitous-arp**: <*(scalar)>*
Specify how many ARP packets to send after failover. Once a link is
up on a new slave, a notification is sent and possibly repeated if
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index c461a3fe..682637c4 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -188,6 +188,15 @@ generated configuration into an internal network configuration state. From
this state `Cloud-init`_ delegates rendering of the configuration to Distro
supported formats. The following ``renderers`` are supported in cloud-init:
+- **NetworkManager**
+
+`NetworkManager <https://networkmanager.dev>`_ is the standard Linux network
+configuration tool suite. It supports a wide range of networking setups.
+Configuration is typically stored in ``/etc/NetworkManager``.
+
+It is the default for a number of Linux distributions, notably Fedora;
+CentOS/RHEL; and derivatives.
+
- **ENI**
/etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package
@@ -215,6 +224,7 @@ is as follows:
- ENI
- Sysconfig
- Netplan
+- NetworkManager
When applying the policy, `Cloud-init`_ checks if the current instance has the
correct binaries and paths to support the renderer. The first renderer that
@@ -223,7 +233,7 @@ supplying an updated configuration in cloud-config. ::
system_info:
network:
- renderers: ['netplan', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
+ renderers: ['netplan', 'network-manager', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
Network Configuration Tools
@@ -280,7 +290,7 @@ Example output converting V2 to sysconfig:
.. _Cloud-init: https://launchpad.net/cloud-init
-.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index
+.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/
.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 102553cb..cd10c540 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@675dffdc14224a03f8f0ba7212ecb3ca2a8a7083
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@6eee33c9c4f630bc9c13b6e48f9ab36e7fb79ca6
pytest
diff --git a/packages/bddeb b/packages/bddeb
index b009021a..fdb541d4 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -34,7 +34,13 @@ DEBUILD_ARGS = ["-S", "-d"]
def get_release_suffix(release):
- """Given ubuntu release (xenial), return a suffix for package (~16.04.1)"""
+ """Given ubuntu release, return a suffix for package
+
+ Examples:
+ ---------
+ >>> get_release_suffix("jammy")
+ '~22.04.1'
+ """
csv_path = "/usr/share/distro-info/ubuntu.csv"
rels = {}
# fields are version, codename, series, created, release, eol, eol-server
@@ -150,10 +156,6 @@ def get_parser():
default=False,
action='store_true')
- parser.add_argument("--python2", dest="python2",
- help=("build debs for python2 rather than python3"),
- default=False, action='store_true')
-
parser.add_argument("--init-system", dest="init_system",
help=("build deb with INIT_SYSTEM=xxx"
" (default: %(default)s"),
diff --git a/packages/debian/control.in b/packages/debian/control.in
index 5bb915a9..30cf406b 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -12,7 +12,8 @@ Architecture: all
Depends: ${misc:Depends},
${python3:Depends},
iproute2,
- isc-dhcp-client
+ isc-dhcp-client,
+ python3-debconf
Recommends: eatmydata, sudo, software-properties-common, gdisk
Suggests: ssh-import-id, openssh-server
Description: Init scripts for cloud instances
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 36e6b38f..8ba27e85 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -3,7 +3,8 @@
"build-requires" : [
"debhelper",
"dh-python",
- "dh-systemd"
+ "dh-systemd",
+ "python3-debconf"
],
"renames" : {
"pyyaml" : "python3-yaml",
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 1491822b..0ea782b9 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -48,11 +48,6 @@ BuildRequires: {{r}}
Requires: dmidecode
%endif
-# python2.6 needs argparse
-%if "%{?el6}" == "1"
-Requires: python-argparse
-%endif
-
# Install 'dynamic' runtime reqs from *requirements.txt and pkg-deps.json.
# Install them as BuildRequires too as they're used for testing.
@@ -171,7 +166,7 @@ fi
%files
-/lib/udev/rules.d/66-azure-ephemeral.rules
+%{_udevrulesdir}/66-azure-ephemeral.rules
%if "%{init_system}" == "systemd"
/usr/lib/systemd/system-generators/cloud-init-generator
diff --git a/pyproject.toml b/pyproject.toml
index 1aac03a8..2ee26121 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,92 +9,25 @@ skip = ["cloudinit/cmd/main.py", ".tox", "packages", "tools"]
[tool.mypy]
follow_imports = "silent"
-exclude=[
- '^cloudinit/apport\.py$',
- '^cloudinit/cmd/query\.py$',
- '^cloudinit/config/cc_chef\.py$',
- '^cloudinit/config/cc_keyboard\.py$',
- '^cloudinit/config/cc_landscape\.py$',
- '^cloudinit/config/cc_mcollective\.py$',
- '^cloudinit/config/cc_rsyslog\.py$',
- '^cloudinit/config/cc_write_files_deferred\.py$',
- '^cloudinit/config/cc_zypper_add_repo\.py$',
- '^cloudinit/config/schema\.py$',
- '^cloudinit/distros/bsd\.py$',
- '^cloudinit/distros/freebsd\.py$',
- '^cloudinit/distros/parsers/networkmanager_conf\.py$',
- '^cloudinit/distros/parsers/resolv_conf\.py$',
- '^cloudinit/distros/parsers/sys_conf\.py$',
- '^cloudinit/dmi\.py$',
- '^cloudinit/features\.py$',
- '^cloudinit/handlers/cloud_config\.py$',
- '^cloudinit/handlers/jinja_template\.py$',
- '^cloudinit/net/__init__\.py$',
- '^cloudinit/net/dhcp\.py$',
- '^cloudinit/net/netplan\.py$',
- '^cloudinit/net/sysconfig\.py$',
- '^cloudinit/serial\.py$',
- '^cloudinit/sources/DataSourceAliYun\.py$',
- '^cloudinit/sources/DataSourceLXD\.py$',
- '^cloudinit/sources/DataSourceOracle\.py$',
- '^cloudinit/sources/DataSourceScaleway\.py$',
- '^cloudinit/sources/DataSourceSmartOS\.py$',
- '^cloudinit/sources/DataSourceVMware\.py$',
- '^cloudinit/sources/__init__\.py$',
- '^cloudinit/sources/helpers/vmware/imc/config_file\.py$',
- '^cloudinit/templater\.py$',
- '^cloudinit/url_helper\.py$',
- '^conftest\.py$',
- '^doc/rtd/conf\.py$',
- '^setup\.py$',
- '^tests/integration_tests/clouds\.py$',
- '^tests/integration_tests/conftest\.py$',
- '^tests/integration_tests/instances\.py$',
- '^tests/integration_tests/integration_settings\.py$',
- '^tests/integration_tests/modules/test_disk_setup\.py$',
- '^tests/integration_tests/modules/test_growpart\.py$',
- '^tests/integration_tests/modules/test_ssh_keysfile\.py$',
- '^tests/unittests/__init__\.py$',
- '^tests/unittests/cmd/test_clean\.py$',
- '^tests/unittests/cmd/test_cloud_id\.py$',
- '^tests/unittests/cmd/test_main\.py$',
- '^tests/unittests/config/test_cc_chef\.py$',
- '^tests/unittests/config/test_cc_landscape\.py$',
- '^tests/unittests/config/test_cc_locale\.py$',
- '^tests/unittests/config/test_cc_mcollective\.py$',
- '^tests/unittests/config/test_cc_rh_subscription\.py$',
- '^tests/unittests/config/test_cc_set_hostname\.py$',
- '^tests/unittests/config/test_cc_snap\.py$',
- '^tests/unittests/config/test_cc_timezone\.py$',
- '^tests/unittests/config/test_cc_ubuntu_advantage\.py$',
- '^tests/unittests/config/test_cc_ubuntu_drivers\.py$',
- '^tests/unittests/config/test_schema\.py$',
- '^tests/unittests/helpers\.py$',
- '^tests/unittests/net/test_dhcp\.py$',
- '^tests/unittests/net/test_init\.py$',
- '^tests/unittests/sources/test_aliyun\.py$',
- '^tests/unittests/sources/test_ec2\.py$',
- '^tests/unittests/sources/test_exoscale\.py$',
- '^tests/unittests/sources/test_gce\.py$',
- '^tests/unittests/sources/test_lxd\.py$',
- '^tests/unittests/sources/test_opennebula\.py$',
- '^tests/unittests/sources/test_openstack\.py$',
- '^tests/unittests/sources/test_rbx\.py$',
- '^tests/unittests/sources/test_scaleway\.py$',
- '^tests/unittests/sources/test_smartos\.py$',
- '^tests/unittests/test_data\.py$',
- '^tests/unittests/test_ds_identify\.py$',
- '^tests/unittests/test_ec2_util\.py$',
- '^tests/unittests/test_net\.py$',
- '^tests/unittests/test_net_activators\.py$',
- '^tests/unittests/test_persistence\.py$',
- '^tests/unittests/test_sshutil\.py$',
- '^tests/unittests/test_subp\.py$',
- '^tests/unittests/test_templating\.py$',
- '^tests/unittests/test_url_helper\.py$',
- '^tools/mock-meta\.py$',
-]
+warn_unused_ignores = "true"
+warn_redundant_casts = "true"
+exclude=[]
[[tool.mypy.overrides]]
-module = [ "httpretty", "pycloudlib.*" ]
+module = [
+ "apport.*",
+ "BaseHTTPServer",
+ "configobj",
+ "cloudinit.feature_overrides",
+ "debconf",
+ "httpretty",
+ "httplib",
+ "jsonpatch",
+ "netifaces",
+ "paramiko.*",
+ "pycloudlib.*",
+ "responses",
+ "serial",
+ "tests.integration_tests.user_settings"
+]
ignore_missing_imports = true
diff --git a/requirements.txt b/requirements.txt
index c4adc455..edec46a7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,10 +10,7 @@ oauthlib
# This one is currently used only by the CloudSigma and SmartOS datasources.
# If these datasources are removed, this is no longer needed.
#
-# This will not work in py2.6 so it is only optionally installed on
-# python 2.7 and later.
-#
-# pyserial
+pyserial
# This is only needed for places where we need to support configs in a manner
# that the built-in config parser is not sufficent (ie
diff --git a/setup.py b/setup.py
index 7ba0ee8c..53ebcb9b 100644
--- a/setup.py
+++ b/setup.py
@@ -91,9 +91,8 @@ def render_tmpl(template, mode=None):
in that file if user had something there. b.) debuild will complain
that files are different outside of the debian directory."""
- # older versions of tox use bdist (xenial), and then install from there.
# newer versions just use install.
- if not (sys.argv[1] == "install" or sys.argv[1].startswith("bdist*")):
+ if not (sys.argv[1] == "install"):
return template
tmpl_ext = ".tmpl"
@@ -303,6 +302,11 @@ data_files = [
),
]
if not platform.system().endswith("BSD"):
+
+ RULES_PATH = LIB
+ if os.path.isfile("/etc/redhat-release"):
+ RULES_PATH = "/usr/lib"
+
data_files.extend(
[
(
@@ -310,7 +314,7 @@ if not platform.system().endswith("BSD"):
["tools/hook-network-manager"],
),
(ETC + "/dhcp/dhclient-exit-hooks.d/", ["tools/hook-dhclient"]),
- (LIB + "/udev/rules.d", [f for f in glob("udev/*.rules")]),
+ (RULES_PATH + "/udev/rules.d", [f for f in glob("udev/*.rules")]),
(
ETC + "/systemd/system/sshd-keygen@.service.d/",
["systemd/disable-sshd-keygen-if-cloud-init-active.conf"],
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 66893098..f8121e99 100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -21,7 +21,7 @@ CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target"
CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
{% endif %}
{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
- "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
+ "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index c170aef7..a9e180ee 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -13,7 +13,7 @@ After=systemd-networkd-wait-online.service
After=networking.service
{% endif %}
{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
- "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %}
+ "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py
index 765d73ef..8ecb1246 100644
--- a/tests/integration_tests/bugs/test_lp1835584.py
+++ b/tests/integration_tests/bugs/test_lp1835584.py
@@ -12,8 +12,6 @@ In cases where product_uuid changes case, ensure cloud-init doesn't
recreate ssh hostkeys across reboot (due to detecting an instance_id change).
This currently only affects linux-azure-fips -> linux-azure on Bionic.
-This test won't run on Xenial because both linux-azure-fips and linux-azure
-report uppercase product_uuids.
The test will launch a specific Bionic Ubuntu PRO FIPS image which has a
linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 0e2e1deb..eb9bd9cc 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -20,7 +20,7 @@ from pycloudlib import (
)
from pycloudlib.cloud import BaseCloud
from pycloudlib.lxd.cloud import _BaseLXD
-from pycloudlib.lxd.instance import LXDInstance
+from pycloudlib.lxd.instance import BaseInstance, LXDInstance
import cloudinit
from cloudinit.subp import ProcessExecutionError, subp
@@ -126,7 +126,7 @@ class IntegrationCloud(ABC):
except (ValueError, IndexError):
return image.image_id
- def _perform_launch(self, launch_kwargs, **kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs) -> BaseInstance:
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
return pycloudlib_instance
@@ -145,10 +145,11 @@ class IntegrationCloud(ABC):
"Instance id: %s",
self.settings.EXISTING_INSTANCE_ID,
)
- self.instance = self.cloud_instance.get_instance(
+ pycloudlib_instance = self.cloud_instance.get_instance(
self.settings.EXISTING_INSTANCE_ID
)
- return self.instance
+ instance = self.get_instance(pycloudlib_instance, settings)
+ return instance
default_launch_kwargs = {
"image_id": self.image_id,
"user_data": user_data,
@@ -174,7 +175,9 @@ class IntegrationCloud(ABC):
log.info("image serial: %s", serial.split()[1])
return instance
- def get_instance(self, cloud_instance, settings=integration_settings):
+ def get_instance(
+ self, cloud_instance, settings=integration_settings
+ ) -> IntegrationInstance:
return IntegrationInstance(self, cloud_instance, settings)
def destroy(self):
diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py
index ced883fd..f5a2d39c 100644
--- a/tests/integration_tests/cmd/test_status.py
+++ b/tests/integration_tests/cmd/test_status.py
@@ -53,7 +53,7 @@ def test_wait_when_no_datasource(session_cloud: IntegrationCloud, setup_image):
}
) as client:
# We know this will be an LXD instance due to our pytest mark
- client.instance.execute_via_ssh = False # type: ignore
+ client.instance.execute_via_ssh = False # pyright: ignore
# No ubuntu user if cloud-init didn't run
client.instance.username = "root"
# Jammy and above will use LXD datasource by default
@@ -65,5 +65,4 @@ def test_wait_when_no_datasource(session_cloud: IntegrationCloud, setup_image):
_remove_nocloud_dir_and_reboot(client)
status_out = _wait_for_cloud_init(client).stdout.strip()
assert "status: disabled" in status_out
- assert "Cloud-init disabled by cloud-init-generator" in status_out
assert client.execute("cloud-init status --wait").ok
diff --git a/tests/integration_tests/datasources/test_ec2_ipv6.py b/tests/integration_tests/datasources/test_ec2_ipv6.py
index 8cde4dc9..7bb45b40 100644
--- a/tests/integration_tests/datasources/test_ec2_ipv6.py
+++ b/tests/integration_tests/datasources/test_ec2_ipv6.py
@@ -10,9 +10,7 @@ def _test_crawl(client, ip):
assert client.execute("cloud-init init --local").ok
log = client.read_from_file("/var/log/cloud-init.log")
assert f"Using metadata source: '{ip}'" in log
- result = re.findall(
- r"Crawl of metadata service took (\d+.\d+) seconds", log
- )
+ result = re.findall(r"Crawl of metadata service.* (\d+.\d+) seconds", log)
if len(result) != 1:
pytest.fail(f"Expected 1 metadata crawl time, got {result}")
# 20 would still be a crazy long time for metadata service to crawl,
@@ -41,3 +39,11 @@ def test_dual_stack(client: IntegrationInstance):
# Block IPv6 requests
assert client.execute("ip6tables -I OUTPUT -d fd00:ec2::254 -j REJECT").ok
_test_crawl(client, "http://169.254.169.254")
+
+ # Force NoDHCPLeaseError (by removing dhclient) and assert ipv6 still works
+ # Destructive test goes last
+ # dhclient is at /sbin/dhclient on bionic but /usr/sbin/dhclient elseware
+ assert client.execute("rm $(which dhclient)").ok
+ client.restart()
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Crawl of metadata service using link-local ipv6 took" in log
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
index f72b1b4b..feae52a9 100644
--- a/tests/integration_tests/datasources/test_lxd_discovery.py
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -8,7 +8,7 @@ from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import verify_clean_log
-def _customize_envionment(client: IntegrationInstance):
+def _customize_environment(client: IntegrationInstance):
# Assert our platform can detect LXD during systemd generator timeframe.
ds_id_log = client.execute("cat /run/cloud-init/ds-identify.log").stdout
assert "check for 'LXD' returned found" in ds_id_log
@@ -54,7 +54,7 @@ def _customize_envionment(client: IntegrationInstance):
def test_lxd_datasource_discovery(client: IntegrationInstance):
"""Test that DataSourceLXD is detected instead of NoCloud."""
- _customize_envionment(client)
+ _customize_environment(client)
result = client.execute("cloud-init status --wait --long")
if not result.ok:
raise AssertionError("cloud-init failed:\n%s", result.stderr)
diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py
index 32ac7053..bd7fe658 100644
--- a/tests/integration_tests/datasources/test_network_dependency.py
+++ b/tests/integration_tests/datasources/test_network_dependency.py
@@ -3,7 +3,7 @@ import pytest
from tests.integration_tests.instances import IntegrationInstance
-def _customize_envionment(client: IntegrationInstance):
+def _customize_environment(client: IntegrationInstance):
# Insert our "disable_network_activation" file here
client.write_to_file(
"/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg",
@@ -19,7 +19,7 @@ def _customize_envionment(client: IntegrationInstance):
@pytest.mark.ubuntu # Because netplan
def test_network_activation_disabled(client: IntegrationInstance):
"""Test that the network is not activated during init mode."""
- _customize_envionment(client)
+ _customize_environment(client)
result = client.execute("systemctl status google-guest-agent.service")
if not result.ok:
raise AssertionError(
diff --git a/tests/integration_tests/datasources/test_oci_networking.py b/tests/integration_tests/datasources/test_oci_networking.py
new file mode 100644
index 00000000..f569650e
--- /dev/null
+++ b/tests/integration_tests/datasources/test_oci_networking.py
@@ -0,0 +1,118 @@
+import re
+from typing import Iterator, Set
+
+import pytest
+import yaml
+
+from tests.integration_tests.clouds import IntegrationCloud
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+DS_CFG = """\
+datasource:
+ Oracle:
+ configure_secondary_nics: {configure_secondary_nics}
+"""
+
+
+def customize_environment(
+ client: IntegrationInstance,
+ tmpdir,
+ configure_secondary_nics: bool = False,
+):
+ cfg = tmpdir.join("01_oracle_datasource.cfg")
+ with open(cfg, "w") as f:
+ f.write(
+ DS_CFG.format(configure_secondary_nics=configure_secondary_nics)
+ )
+ client.push_file(cfg, "/etc/cloud/cloud.cfg.d/01_oracle_datasource.cfg")
+
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+def extract_interface_names(network_config: dict) -> Set[str]:
+ if network_config["version"] == 1:
+ interfaces = map(lambda conf: conf["name"], network_config["config"])
+ elif network_config["version"] == 2:
+ interfaces = network_config["ethernets"].keys()
+ else:
+ raise NotImplementedError(
+ f'Implement me for version={network_config["version"]}'
+ )
+ return set(interfaces)
+
+
+@pytest.mark.oci
+def test_oci_networking_iscsi_instance(client: IntegrationInstance, tmpdir):
+ customize_environment(client, tmpdir, configure_secondary_nics=False)
+ result_net_files = client.execute("ls /run/net-*.conf")
+ assert result_net_files.ok, "No net files found under /run"
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ assert (
+ "opc/v2/vnics/" not in log
+ ), "vnic data was fetched and it should not have been"
+
+ netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ configured_interfaces = extract_interface_names(netplan_cfg["network"])
+ assert 1 <= len(
+ configured_interfaces
+ ), "Expected at least 1 primary network configuration."
+
+ expected_interfaces = set(
+ re.findall(r"/run/net-(.+)\.conf", result_net_files.stdout)
+ )
+ for expected_interface in expected_interfaces:
+ assert (
+ f"Reading from /run/net-{expected_interface}.conf" in log
+ ), "Expected {expected_interface} not found in: {log}"
+
+ not_found_interfaces = expected_interfaces.difference(
+ configured_interfaces
+ )
+ assert not not_found_interfaces, (
+ f"Interfaces, {not_found_interfaces}, expected to be configured in"
+ f" {netplan_cfg['network']}"
+ )
+ assert client.execute("ping -c 2 canonical.com").ok
+
+
+@pytest.fixture(scope="function")
+def client_with_secondary_vnic(
+ session_cloud: IntegrationCloud,
+) -> Iterator[IntegrationInstance]:
+ """Create an instance client and attach a temporary vnic"""
+ with session_cloud.launch() as client:
+ ip_address = client.instance.add_network_interface()
+ yield client
+ client.instance.remove_network_interface(ip_address)
+
+
+@pytest.mark.oci
+def test_oci_networking_iscsi_instance_secondary_vnics(
+ client_with_secondary_vnic: IntegrationInstance, tmpdir
+):
+ client = client_with_secondary_vnic
+ customize_environment(client, tmpdir, configure_secondary_nics=True)
+
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ assert "opc/v2/vnics/" in log, f"vnics data not fetched in {log}"
+ netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ configured_interfaces = extract_interface_names(netplan_cfg["network"])
+ assert 2 <= len(
+ configured_interfaces
+ ), "Expected at least 1 primary and 1 secondary network configurations"
+
+ result_net_files = client.execute("ls /run/net-*.conf")
+ expected_interfaces = set(
+ re.findall(r"/run/net-(.+)\.conf", result_net_files.stdout)
+ )
+ assert len(expected_interfaces) + 1 == len(configured_interfaces)
+ assert client.execute("ping -c 2 canonical.com").ok
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index 65cd977a..bd807cef 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -60,6 +60,7 @@ class IntegrationInstance:
self.cloud = cloud
self.instance = instance
self.settings = settings
+ self._ip = ""
def destroy(self):
self.instance.delete()
@@ -193,9 +194,20 @@ class IntegrationInstance:
assert self.execute("apt-get update -q").ok
assert self.execute("apt-get install -qy cloud-init").ok
+ def ip(self) -> str:
+ if self._ip:
+ return self._ip
+ try:
+ self._ip = self.instance.ip
+ except NotImplementedError:
+ self._ip = "Unknown"
+ return self._ip
+
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.settings.KEEP_INSTANCE:
self.destroy()
+ else:
+ log.info("Keeping Instance, public ip: %s", self.ip)
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index f27e4f12..abc70fe4 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+from typing import Optional
from cloudinit.util import is_false, is_true
@@ -26,7 +27,7 @@ PLATFORM = "lxd_container"
# The cloud-specific instance type to run. E.g., a1.medium on AWS
# If the pycloudlib instance provides a default, this can be left None
-INSTANCE_TYPE = None
+INSTANCE_TYPE: Optional[str] = None
# Determines the base image to use or generate new images from.
#
@@ -38,7 +39,7 @@ OS_IMAGE = "focal"
# Populate if you want to use a pre-launched instance instead of
# creating a new one. The exact contents will be platform dependent
-EXISTING_INSTANCE_ID = None
+EXISTING_INSTANCE_ID: Optional[str] = None
##################################################################
# IMAGE GENERATION SETTINGS
diff --git a/tests/integration_tests/modules/test_cli.py b/tests/integration_tests/modules/test_cli.py
index e878176f..4b8f53a8 100644
--- a/tests/integration_tests/modules/test_cli.py
+++ b/tests/integration_tests/modules/test_cli.py
@@ -18,11 +18,18 @@ runcmd:
- echo 'hi' > /var/tmp/test
"""
+# The '-' in 'hashed-password' fails schema validation
INVALID_USER_DATA_SCHEMA = """\
#cloud-config
-updates:
- notnetwork: -1
-apt_pipelining: bogus
+users:
+ - default
+ - name: newsuper
+ gecos: Big Stuff
+ groups: users, admin
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ hashed-password: asdfasdf
+ shell: /bin/bash
+ lock_passwd: true
"""
@@ -69,11 +76,12 @@ def test_invalid_userdata_schema(client: IntegrationInstance):
assert result.ok
log = client.read_from_file("/var/log/cloud-init.log")
warning = (
- "[WARNING]: Invalid cloud-config provided:\napt_pipelining: 'bogus'"
- " is not valid under any of the given schemas\nupdates: Additional"
- " properties are not allowed ('notnetwork' was unexpected)"
+ "[WARNING]: Invalid cloud-config provided: Please run "
+ "'sudo cloud-init schema --system' to see the schema errors."
)
assert warning in log
+ assert "asdfasdf" not in log
+
result = client.execute("cloud-init status --long")
if not result.ok:
raise AssertionError(
diff --git a/tests/integration_tests/modules/test_ubuntu_drivers.py b/tests/integration_tests/modules/test_ubuntu_drivers.py
new file mode 100644
index 00000000..4fbfba3c
--- /dev/null
+++ b/tests/integration_tests/modules/test_ubuntu_drivers.py
@@ -0,0 +1,37 @@
+import re
+
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+from tests.integration_tests.util import verify_clean_log
+
+USER_DATA = """\
+#cloud-config
+drivers:
+ nvidia:
+ license-accepted: true
+"""
+
+# NOTE(VM.GPU2.1 is not in all availability_domains: use qIZq:US-ASHBURN-AD-1)
+
+
+@pytest.mark.adhoc # Expensive instance type
+@pytest.mark.oci
+def test_ubuntu_drivers_installed(session_cloud: IntegrationCloud):
+ with session_cloud.launch(
+ launch_kwargs={"instance_type": "VM.GPU2.1"}, user_data=USER_DATA
+ ) as client:
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+ assert 1 == log.count(
+ "Installing and activating NVIDIA drivers "
+ "(nvidia/license-accepted=True, version=latest)"
+ )
+ result = client.execute("dpkg -l | grep nvidia")
+ assert result.ok, "No nvidia packages found"
+ assert re.search(
+ r"ii\s+linux-modules-nvidia-\d+-server", result.stdout
+ ), (
+ f"Did not find specific nvidia drivers packages in:"
+ f" {result.stdout}"
+ )
diff --git a/tests/integration_tests/test_paths.py b/tests/integration_tests/test_paths.py
new file mode 100644
index 00000000..20392e35
--- /dev/null
+++ b/tests/integration_tests/test_paths.py
@@ -0,0 +1,66 @@
+import re
+from typing import Iterator
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+DEFAULT_CLOUD_DIR = "/var/lib/cloud"
+NEW_CLOUD_DIR = "/new-cloud-dir"
+CUSTOM_CLOUD_DIR = f"""\
+system_info:
+ paths:
+ cloud_dir: {NEW_CLOUD_DIR}
+"""
+CUSTOM_CLOUD_DIR_FN = "95-custom-cloud-dir.cfg"
+
+
+@pytest.fixture
+def custom_client(
+ client: IntegrationInstance, tmpdir
+) -> Iterator[IntegrationInstance]:
+ client.write_to_file(
+ f"/etc/cloud/cloud.cfg.d/{CUSTOM_CLOUD_DIR_FN}", CUSTOM_CLOUD_DIR
+ )
+ client.execute(f"rm -rf {DEFAULT_CLOUD_DIR}") # Remove previous cloud_dir
+ client.execute("cloud-init clean --logs")
+ client.restart()
+ yield client
+
+
+class TestHonorCloudDir:
+ def verify_log_and_files(self, custom_client):
+ log_content = custom_client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log_content)
+ assert NEW_CLOUD_DIR in log_content
+ assert DEFAULT_CLOUD_DIR not in log_content
+ assert custom_client.execute(f"test ! -d {DEFAULT_CLOUD_DIR}").ok
+
+ def collect_logs(self, custom_client: IntegrationInstance):
+ help_result = custom_client.execute("cloud-init collect-logs -h")
+ assert help_result.ok, help_result.stderr
+ assert f"{NEW_CLOUD_DIR}/instance/user-data.txt" in re.sub(
+ r"\s+", "", help_result.stdout
+ ), "user-data file not correctly render in collect-logs -h"
+ collect_logs_result = custom_client.execute(
+ "cloud-init collect-logs --include-userdata"
+ )
+ assert (
+ collect_logs_result.ok
+ ), f"collect-logs error: {collect_logs_result.stderr}"
+
+ # LXD inserts some agent setup code into VMs on Bionic under
+ # /var/lib/cloud. The inserted script will cause this test to fail
+ # because the test ensures nothing is running under /var/lib/cloud.
+ # Since LXD is doing this and not cloud-init, we should just not run
+ # on Bionic to avoid it.
+ @pytest.mark.not_bionic
+ def test_honor_cloud_dir(self, custom_client: IntegrationInstance):
+ """Integration test for LP: #1976564
+
+ cloud-init must honor the cloud-dir configured in
+ /etc/cloud/cloud.cfg.d
+ """
+ self.verify_log_and_files(custom_client)
+ self.collect_logs(custom_client)
diff --git a/tests/unittests/cmd/devel/test_logs.py b/tests/unittests/cmd/devel/test_logs.py
index 73ed3c65..c916c19f 100644
--- a/tests/unittests/cmd/devel/test_logs.py
+++ b/tests/unittests/cmd/devel/test_logs.py
@@ -1,55 +1,52 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
+import re
from datetime import datetime
from io import StringIO
from cloudinit.cmd.devel import logs
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.subp import subp
-from cloudinit.util import ensure_dir, load_file, write_file
-from tests.unittests.helpers import (
- FilesystemMockingTestCase,
- mock,
- wrap_and_call,
-)
+from cloudinit.util import load_file, write_file
+from tests.unittests.helpers import mock
+M_PATH = "cloudinit.cmd.devel.logs."
-@mock.patch("cloudinit.cmd.devel.logs.os.getuid")
-class TestCollectLogs(FilesystemMockingTestCase):
- def setUp(self):
- super(TestCollectLogs, self).setUp()
- self.new_root = self.tmp_dir()
- self.run_dir = self.tmp_path("run", self.new_root)
- def test_collect_logs_with_userdata_requires_root_user(self, m_getuid):
+@mock.patch("cloudinit.cmd.devel.logs.os.getuid")
+class TestCollectLogs:
+ def test_collect_logs_with_userdata_requires_root_user(
+ self, m_getuid, tmpdir
+ ):
"""collect-logs errors when non-root user collects userdata ."""
m_getuid.return_value = 100 # non-root
- output_tarfile = self.tmp_path("logs.tgz")
+ output_tarfile = tmpdir.join("logs.tgz")
with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
- self.assertEqual(
- 1, logs.collect_logs(output_tarfile, include_userdata=True)
+ assert 1 == logs.collect_logs(
+ output_tarfile, include_userdata=True
)
- self.assertEqual(
+ assert (
"To include userdata, root user is required."
- " Try sudo cloud-init collect-logs\n",
- m_stderr.getvalue(),
+ " Try sudo cloud-init collect-logs\n" == m_stderr.getvalue()
)
- def test_collect_logs_creates_tarfile(self, m_getuid):
+ def test_collect_logs_creates_tarfile(self, m_getuid, mocker, tmpdir):
"""collect-logs creates a tarfile with all related cloud-init info."""
m_getuid.return_value = 100
- log1 = self.tmp_path("cloud-init.log", self.new_root)
+ log1 = tmpdir.join("cloud-init.log")
write_file(log1, "cloud-init-log")
- log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+ log2 = tmpdir.join("cloud-init-output.log")
write_file(log2, "cloud-init-output-log")
- ensure_dir(self.run_dir)
- write_file(self.tmp_path("results.json", self.run_dir), "results")
+ run_dir = tmpdir.join("run")
+ write_file(run_dir.join("results.json"), "results")
write_file(
- self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
+ run_dir.join(
+ INSTANCE_JSON_SENSITIVE_FILE,
+ ),
"sensitive",
)
- output_tarfile = self.tmp_path("logs.tgz")
+ output_tarfile = str(tmpdir.join("logs.tgz"))
date = datetime.utcnow().date().strftime("%Y-%m-%d")
date_logdir = "cloud-init-logs-{0}".format(date)
@@ -80,76 +77,63 @@ class TestCollectLogs(FilesystemMockingTestCase):
fake_stderr = mock.MagicMock()
- wrap_and_call(
- "cloudinit.cmd.devel.logs",
- {
- "subp": {"side_effect": fake_subp},
- "sys.stderr": {"new": fake_stderr},
- "CLOUDINIT_LOGS": {"new": [log1, log2]},
- "CLOUDINIT_RUN_DIR": {"new": self.run_dir},
- },
- logs.collect_logs,
- output_tarfile,
- include_userdata=False,
- )
+ mocker.patch(M_PATH + "subp", side_effect=fake_subp)
+ mocker.patch(M_PATH + "sys.stderr", fake_stderr)
+ mocker.patch(M_PATH + "CLOUDINIT_LOGS", [log1, log2])
+ mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir)
+ logs.collect_logs(output_tarfile, include_userdata=False)
# unpack the tarfile and check file contents
- subp(["tar", "zxvf", output_tarfile, "-C", self.new_root])
- out_logdir = self.tmp_path(date_logdir, self.new_root)
- self.assertFalse(
- os.path.exists(
- os.path.join(
- out_logdir,
- "run",
- "cloud-init",
- INSTANCE_JSON_SENSITIVE_FILE,
- )
- ),
- "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE,
- )
- self.assertEqual(
- "0.7fake\n", load_file(os.path.join(out_logdir, "dpkg-version"))
+ subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)])
+ out_logdir = tmpdir.join(date_logdir)
+ assert not os.path.exists(
+ os.path.join(
+ out_logdir,
+ "run",
+ "cloud-init",
+ INSTANCE_JSON_SENSITIVE_FILE,
+ )
+ ), (
+ "Unexpected file found: %s" % INSTANCE_JSON_SENSITIVE_FILE
)
- self.assertEqual(
- version_out, load_file(os.path.join(out_logdir, "version"))
+ assert "0.7fake\n" == load_file(
+ os.path.join(out_logdir, "dpkg-version")
)
- self.assertEqual(
- "cloud-init-log",
- load_file(os.path.join(out_logdir, "cloud-init.log")),
+ assert version_out == load_file(os.path.join(out_logdir, "version"))
+ assert "cloud-init-log" == load_file(
+ os.path.join(out_logdir, "cloud-init.log")
)
- self.assertEqual(
- "cloud-init-output-log",
- load_file(os.path.join(out_logdir, "cloud-init-output.log")),
+ assert "cloud-init-output-log" == load_file(
+ os.path.join(out_logdir, "cloud-init-output.log")
)
- self.assertEqual(
- "dmesg-out\n", load_file(os.path.join(out_logdir, "dmesg.txt"))
+ assert "dmesg-out\n" == load_file(
+ os.path.join(out_logdir, "dmesg.txt")
)
- self.assertEqual(
- "journal-out\n", load_file(os.path.join(out_logdir, "journal.txt"))
+ assert "journal-out\n" == load_file(
+ os.path.join(out_logdir, "journal.txt")
)
- self.assertEqual(
- "results",
- load_file(
- os.path.join(out_logdir, "run", "cloud-init", "results.json")
- ),
+ assert "results" == load_file(
+ os.path.join(out_logdir, "run", "cloud-init", "results.json")
)
fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
- def test_collect_logs_includes_optional_userdata(self, m_getuid):
+ def test_collect_logs_includes_optional_userdata(
+ self, m_getuid, mocker, tmpdir
+ ):
"""collect-logs include userdata when --include-userdata is set."""
m_getuid.return_value = 0
- log1 = self.tmp_path("cloud-init.log", self.new_root)
+ log1 = tmpdir.join("cloud-init.log")
write_file(log1, "cloud-init-log")
- log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+ log2 = tmpdir.join("cloud-init-output.log")
write_file(log2, "cloud-init-output-log")
- userdata = self.tmp_path("user-data.txt", self.new_root)
+ userdata = tmpdir.join("user-data.txt")
write_file(userdata, "user-data")
- ensure_dir(self.run_dir)
- write_file(self.tmp_path("results.json", self.run_dir), "results")
+ run_dir = tmpdir.join("run")
+ write_file(run_dir.join("results.json"), "results")
write_file(
- self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, self.run_dir),
+ run_dir.join(INSTANCE_JSON_SENSITIVE_FILE),
"sensitive",
)
- output_tarfile = self.tmp_path("logs.tgz")
+ output_tarfile = str(tmpdir.join("logs.tgz"))
date = datetime.utcnow().date().strftime("%Y-%m-%d")
date_logdir = "cloud-init-logs-{0}".format(date)
@@ -180,34 +164,31 @@ class TestCollectLogs(FilesystemMockingTestCase):
fake_stderr = mock.MagicMock()
- wrap_and_call(
- "cloudinit.cmd.devel.logs",
- {
- "subp": {"side_effect": fake_subp},
- "sys.stderr": {"new": fake_stderr},
- "CLOUDINIT_LOGS": {"new": [log1, log2]},
- "CLOUDINIT_RUN_DIR": {"new": self.run_dir},
- "USER_DATA_FILE": {"new": userdata},
- },
- logs.collect_logs,
- output_tarfile,
- include_userdata=True,
- )
+ mocker.patch(M_PATH + "subp", side_effect=fake_subp)
+ mocker.patch(M_PATH + "sys.stderr", fake_stderr)
+ mocker.patch(M_PATH + "CLOUDINIT_LOGS", [log1, log2])
+ mocker.patch(M_PATH + "CLOUDINIT_RUN_DIR", run_dir)
+ mocker.patch(M_PATH + "_get_user_data_file", return_value=userdata)
+ logs.collect_logs(output_tarfile, include_userdata=True)
# unpack the tarfile and check file contents
- subp(["tar", "zxvf", output_tarfile, "-C", self.new_root])
- out_logdir = self.tmp_path(date_logdir, self.new_root)
- self.assertEqual(
- "user-data", load_file(os.path.join(out_logdir, "user-data.txt"))
+ subp(["tar", "zxvf", output_tarfile, "-C", str(tmpdir)])
+ out_logdir = tmpdir.join(date_logdir)
+ assert "user-data" == load_file(
+ os.path.join(out_logdir, "user-data.txt")
)
- self.assertEqual(
- "sensitive",
- load_file(
- os.path.join(
- out_logdir,
- "run",
- "cloud-init",
- INSTANCE_JSON_SENSITIVE_FILE,
- )
- ),
+ assert "sensitive" == load_file(
+ os.path.join(
+ out_logdir,
+ "run",
+ "cloud-init",
+ INSTANCE_JSON_SENSITIVE_FILE,
+ )
)
fake_stderr.write.assert_any_call("Wrote %s\n" % output_tarfile)
+
+
+class TestParser:
+ def test_parser_help_has_userdata_file(self, mocker, tmpdir):
+ userdata = str(tmpdir.join("user-data.txt"))
+ mocker.patch(M_PATH + "_get_user_data_file", return_value=userdata)
+ assert userdata in re.sub(r"\s+", "", logs.get_parser().format_help())
diff --git a/tests/unittests/cmd/devel/test_net_convert.py b/tests/unittests/cmd/devel/test_net_convert.py
new file mode 100644
index 00000000..60acb1a6
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_net_convert.py
@@ -0,0 +1,187 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import itertools
+
+import pytest
+
+from cloudinit.cmd.devel import net_convert
+from cloudinit.distros.debian import NETWORK_FILE_HEADER
+from tests.unittests.helpers import mock
+
+M_PATH = "cloudinit.cmd.devel.net_convert."
+
+
+required_args = [
+ "--directory",
+ "--network-data",
+ "--distro=ubuntu",
+ "--kind=eni",
+ "--output-kind=eni",
+]
+
+
+SAMPLE_NET_V1 = """\
+network:
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: dhcp
+"""
+
+
+SAMPLE_NETPLAN_CONTENT = f"""\
+{NETWORK_FILE_HEADER}network:
+ version: 2
+ ethernets:
+ eth0:
+ dhcp4: true
+"""
+
+SAMPLE_ENI_CONTENT = f"""\
+{NETWORK_FILE_HEADER}auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet dhcp
+"""
+
+SAMPLE_NETWORKD_CONTENT = """\
+[Match]
+Name=eth0
+
+[Network]
+DHCP=ipv4
+
+"""
+
+SAMPLE_SYSCONFIG_CONTENT = """\
+# Created by cloud-init on instance boot automatically, do not edit.
+#
+BOOTPROTO=dhcp
+DEVICE=eth0
+NM_CONTROLLED=no
+ONBOOT=yes
+TYPE=Ethernet
+USERCTL=no
+"""
+
+SAMPLE_NETWORK_MANAGER_CONTENT = """\
+# Generated by cloud-init. Changes will be lost.
+
+[connection]
+id=cloud-init eth0
+uuid=1dd9a779-d327-56e1-8454-c65e2556c12c
+type=ethernet
+interface-name=eth0
+
+[user]
+org.freedesktop.NetworkManager.origin=cloud-init
+
+[ethernet]
+
+[ipv4]
+method=auto
+may-fail=false
+
+"""
+
+
+class TestNetConvert:
+
+ missing_required_args = itertools.combinations(
+ required_args, len(required_args) - 1
+ )
+
+ def _replace_path_args(self, cmd, tmpdir):
+ """Inject tmpdir replacements for parameterize args."""
+ updated_cmd = []
+ for arg in cmd:
+ if arg == "--network-data":
+ net_file = tmpdir.join("net")
+ net_file.write("")
+ updated_cmd.append(f"--network-data={net_file}")
+ elif arg == "--directory":
+ updated_cmd.append(f"--directory={tmpdir.strpath}")
+ else:
+ updated_cmd.append(arg)
+ return updated_cmd
+
+ @pytest.mark.parametrize("cmdargs", missing_required_args)
+ def test_argparse_error_on_missing_args(self, cmdargs, capsys, tmpdir):
+ """Log the appropriate error when required args are missing."""
+ params = self._replace_path_args(cmdargs, tmpdir)
+ with mock.patch("sys.argv", ["net-convert"] + params):
+ with pytest.raises(SystemExit):
+ net_convert.get_parser().parse_args()
+ _out, err = capsys.readouterr()
+ assert "the following arguments are required" in err
+
+ @pytest.mark.parametrize("debug", (False, True))
+ @pytest.mark.parametrize(
+ "output_kind,outfile_content",
+ (
+ (
+ "netplan",
+ {"etc/netplan/50-cloud-init.yaml": SAMPLE_NETPLAN_CONTENT},
+ ),
+ (
+ "eni",
+ {
+ "etc/network/interfaces.d/50-cloud-init.cfg": SAMPLE_ENI_CONTENT # noqa: E501
+ },
+ ),
+ (
+ "networkd",
+ {
+ "etc/systemd/network/10-cloud-init-eth0.network": SAMPLE_NETWORKD_CONTENT # noqa: E501
+ },
+ ),
+ (
+ "sysconfig",
+ {
+ "etc/sysconfig/network-scripts/ifcfg-eth0": SAMPLE_SYSCONFIG_CONTENT # noqa: E501
+ },
+ ),
+ (
+ "network-manager",
+ {
+ "etc/NetworkManager/system-connections/cloud-init-eth0.nmconnection": SAMPLE_NETWORK_MANAGER_CONTENT # noqa: E501
+ },
+ ),
+ ),
+ )
+ def test_convert_output_kind_artifacts(
+ self, output_kind, outfile_content, debug, capsys, tmpdir
+ ):
+ """Assert proper output-kind artifacts are written."""
+ network_data = tmpdir.join("network_data")
+ network_data.write(SAMPLE_NET_V1)
+ distro = "centos" if output_kind == "sysconfig" else "ubuntu"
+ args = [
+ f"--directory={tmpdir.strpath}",
+ f"--network-data={network_data.strpath}",
+ f"--distro={distro}",
+ "--kind=yaml",
+ f"--output-kind={output_kind}",
+ ]
+ if debug:
+ args.append("--debug")
+ params = self._replace_path_args(args, tmpdir)
+ with mock.patch("sys.argv", ["net-convert"] + params):
+ args = net_convert.get_parser().parse_args()
+ with mock.patch("cloudinit.util.chownbyname") as chown:
+ net_convert.handle_args("somename", args)
+ for path in outfile_content:
+ outfile = tmpdir.join(path)
+ assert outfile_content[path] == outfile.read()
+ if output_kind == "networkd":
+ assert [
+ mock.call(
+ outfile.strpath, "systemd-network", "systemd-network"
+ )
+ ] == chown.call_args_list
+
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
index 7d12017e..71f541c6 100644
--- a/tests/unittests/cmd/test_clean.py
+++ b/tests/unittests/cmd/test_clean.py
@@ -8,7 +8,7 @@ from cloudinit.cmd import clean
from cloudinit.util import ensure_dir, sym_link, write_file
from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
-mypaths = namedtuple("MyPaths", "cloud_dir")
+MyPaths = namedtuple("MyPaths", "cloud_dir")
class TestClean(CiTestCase):
@@ -25,7 +25,7 @@ class TestClean(CiTestCase):
"output": {"all": "|tee -a {0}".format(self.log2)},
}
# Ensure cloud_dir has a trailing slash, to match real behaviour
- paths = mypaths(cloud_dir="{}/".format(self.artifact_dir))
+ paths = MyPaths(cloud_dir="{}/".format(self.artifact_dir))
def __init__(self, ds_deps):
pass
diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py
index 907297a6..37f1df2c 100644
--- a/tests/unittests/cmd/test_cloud_id.py
+++ b/tests/unittests/cmd/test_cloud_id.py
@@ -2,8 +2,6 @@
"""Tests for cloud-id command line utility."""
-from collections import namedtuple
-
import pytest
from cloudinit import util
@@ -14,9 +12,6 @@ M_PATH = "cloudinit.cmd.cloud_id."
class TestCloudId:
-
- args = namedtuple("cloudidargs", "instance_data json long")
-
def test_cloud_id_arg_parser_defaults(self):
"""Validate the argument defaults when not provided by the end-user."""
cmd = ["cloud-id"]
diff --git a/tests/unittests/cmd/test_main.py b/tests/unittests/cmd/test_main.py
index 2f7a1fb1..e9ad0bb8 100644
--- a/tests/unittests/cmd/test_main.py
+++ b/tests/unittests/cmd/test_main.py
@@ -13,8 +13,7 @@ from cloudinit.cmd import main
from cloudinit.util import ensure_dir, load_file, write_file
from tests.unittests.helpers import FilesystemMockingTestCase, wrap_and_call
-mypaths = namedtuple("MyPaths", "run_dir")
-myargs = namedtuple("MyArgs", "debug files force local reporter subcommand")
+MyArgs = namedtuple("MyArgs", "debug files force local reporter subcommand")
class TestMain(FilesystemMockingTestCase):
@@ -58,7 +57,7 @@ class TestMain(FilesystemMockingTestCase):
def test_main_init_run_net_runs_modules(self):
"""Modules like write_files are run in 'net' mode."""
- cmdargs = myargs(
+ cmdargs = MyArgs(
debug=False,
files=None,
force=False,
@@ -104,7 +103,7 @@ class TestMain(FilesystemMockingTestCase):
}
cloud_cfg = safeyaml.dumps(self.cfg)
write_file(self.cloud_cfg_file, cloud_cfg)
- cmdargs = myargs(
+ cmdargs = MyArgs(
debug=False,
files=None,
force=False,
diff --git a/tests/unittests/config/test_cc_ntp.py b/tests/unittests/config/test_cc_ntp.py
index c2bce2a3..41b5fb9b 100644
--- a/tests/unittests/config/test_cc_ntp.py
+++ b/tests/unittests/config/test_cc_ntp.py
@@ -499,15 +499,6 @@ class TestNtp(FilesystemMockingTestCase):
expected_client = mycloud.distro.preferred_ntp_clients[0]
self.assertEqual("chrony", expected_client)
- @mock.patch("cloudinit.util.system_info")
- def test_ubuntu_xenial_picks_ntp(self, m_sysinfo):
- """Test Ubuntu picks ntp on xenial release"""
-
- m_sysinfo.return_value = {"dist": ("Ubuntu", "16.04", "xenial")}
- mycloud = self._get_cloud("ubuntu")
- expected_client = mycloud.distro.preferred_ntp_clients[0]
- self.assertEqual("ntp", expected_client)
-
@mock.patch("cloudinit.config.cc_ntp.subp.which")
def test_snappy_system_picks_timesyncd(self, m_which):
"""Test snappy systems prefer installed clients"""
diff --git a/tests/unittests/config/test_cc_phone_home.py b/tests/unittests/config/test_cc_phone_home.py
index 7264dda1..7964705d 100644
--- a/tests/unittests/config/test_cc_phone_home.py
+++ b/tests/unittests/config/test_cc_phone_home.py
@@ -1,11 +1,98 @@
+import logging
+from functools import partial
+from itertools import count
+from unittest import mock
+
import pytest
+from cloudinit.config.cc_phone_home import POST_LIST_ALL, handle
from cloudinit.config.schema import (
SchemaValidationError,
get_schema,
validate_cloudconfig_schema,
)
from tests.unittests.helpers import skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger("TestNoConfig")
+phone_home = partial(handle, name="test", cloud=get_cloud(), log=LOG, args=[])
+
+
+@pytest.fixture(autouse=True)
+def common_mocks(mocker):
+ mocker.patch("cloudinit.util.load_file", side_effect=count())
+
+
+@mock.patch("cloudinit.url_helper.readurl")
+class TestPhoneHome:
+ def test_default_call(self, m_readurl):
+ cfg = {"phone_home": {"url": "myurl"}}
+ phone_home(cfg=cfg)
+ assert m_readurl.call_args == mock.call(
+ "myurl",
+ data={
+ "pub_key_dsa": "0",
+ "pub_key_rsa": "1",
+ "pub_key_ecdsa": "2",
+ "pub_key_ed25519": "3",
+ "instance_id": "iid-datasource-none",
+ "hostname": "hostname",
+ "fqdn": "hostname",
+ },
+ retries=9,
+ sec_between=3,
+ ssl_details={},
+ )
+
+ def test_no_url(self, m_readurl, caplog):
+ cfg = {"phone_home": {}}
+ phone_home(cfg=cfg)
+ assert "Skipping module named" in caplog.text
+ assert m_readurl.call_count == 0
+
+ @pytest.mark.parametrize(
+ "tries, expected_retries",
+ [
+ (-1, -2),
+ (0, -1),
+ (1, 0),
+ (2, 1),
+ ("2", 1),
+ ("two", 9),
+ (None, 9),
+ ({}, 9),
+ ],
+ )
+ def test_tries(self, m_readurl, tries, expected_retries, caplog):
+ cfg = {"phone_home": {"url": "dontcare"}}
+ if tries is not None:
+ cfg["phone_home"]["tries"] = tries
+ phone_home(cfg=cfg)
+ assert m_readurl.call_args[1]["retries"] == expected_retries
+
+ def test_post_all(self, m_readurl):
+ cfg = {"phone_home": {"url": "test", "post": "all"}}
+ phone_home(cfg=cfg)
+ for key in POST_LIST_ALL:
+ assert key in m_readurl.call_args[1]["data"]
+
+ def test_custom_post_list(self, m_readurl):
+ post_list = ["pub_key_rsa, hostname"]
+ cfg = {"phone_home": {"url": "test", "post": post_list}}
+ phone_home(cfg=cfg)
+ for key in post_list:
+ assert key in m_readurl.call_args[1]["data"]
+ assert len(m_readurl.call_args[1]["data"]) == len(post_list)
+
+ def test_invalid_post(self, m_readurl, caplog):
+ post_list = ["spam", "hostname"]
+ cfg = {"phone_home": {"url": "test", "post": post_list}}
+ phone_home(cfg=cfg)
+ assert "hostname" in m_readurl.call_args[1]["data"]
+ assert m_readurl.call_args[1]["data"]["spam"] == "N/A"
+ assert (
+ "spam from 'post' configuration list not available" in caplog.text
+ )
class TestPhoneHomeSchema:
diff --git a/tests/unittests/config/test_cc_rh_subscription.py b/tests/unittests/config/test_cc_rh_subscription.py
index 57313361..1a9c1579 100644
--- a/tests/unittests/config/test_cc_rh_subscription.py
+++ b/tests/unittests/config/test_cc_rh_subscription.py
@@ -149,7 +149,7 @@ class TestBadInput(CiTestCase):
name = "cc_rh_subscription"
cloud_init = None
log = logging.getLogger("bad_tests")
- args = []
+ args: list = []
SM = cc_rh_subscription.SubscriptionManager
reg = (
"The system has been registered with ID:"
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
index fd994c4e..3d1d86ee 100644
--- a/tests/unittests/config/test_cc_set_hostname.py
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -11,6 +11,7 @@ from configobj import ConfigObj
from cloudinit import cloud, distros, helpers, util
from cloudinit.config import cc_set_hostname
+from cloudinit.sources import DataSourceNone
from tests.unittests import helpers as t_help
LOG = logging.getLogger(__name__)
@@ -153,7 +154,8 @@ class TestHostname(t_help.FilesystemMockingTestCase):
)
] not in m_subp.call_args_list
- def test_multiple_calls_skips_unchanged_hostname(self):
+ @mock.patch("cloudinit.util.get_hostname", return_value="localhost")
+ def test_multiple_calls_skips_unchanged_hostname(self, get_hostname):
"""Only new hostname or fqdn values will generate a hostname call."""
distro = self._fetch_distro("debian")
paths = helpers.Paths({"cloud_dir": self.tmp})
@@ -182,6 +184,42 @@ class TestHostname(t_help.FilesystemMockingTestCase):
self.logs.getvalue(),
)
+ @mock.patch("cloudinit.util.get_hostname", return_value="localhost")
+ def test_localhost_default_hostname(self, get_hostname):
+ """
+ No hostname set. Default value returned is localhost,
+ but we shouldn't write it in /etc/hostname
+ """
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = DataSourceNone.DataSourceNone({}, None, paths)
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+
+ util.write_file("/etc/hostname", "")
+ cc_set_hostname.handle("cc_set_hostname", {}, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("", contents.strip())
+
+ @mock.patch("cloudinit.util.get_hostname", return_value="localhost")
+ def test_localhost_user_given_hostname(self, get_hostname):
+ """
+ User set hostname is localhost. We should write it in /etc/hostname
+ """
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = DataSourceNone.DataSourceNone({}, None, paths)
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+
+ # user-provided localhost should not be ignored
+ util.write_file("/etc/hostname", "")
+ cc_set_hostname.handle(
+ "cc_set_hostname", {"hostname": "localhost"}, cc, LOG, []
+ )
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("localhost", contents.strip())
+
def test_error_on_distro_set_hostname_errors(self):
"""Raise SetHostnameError on exceptions from distro.set_hostname."""
distro = self._fetch_distro("debian")
diff --git a/tests/unittests/config/test_cc_snap.py b/tests/unittests/config/test_cc_snap.py
index 855c23fc..432f72ce 100644
--- a/tests/unittests/config/test_cc_snap.py
+++ b/tests/unittests/config/test_cc_snap.py
@@ -1,28 +1,24 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
import re
from io import StringIO
import pytest
-from cloudinit import util
-from cloudinit.config.cc_snap import (
- ASSERTIONS_FILE,
- add_assertions,
- handle,
- run_commands,
-)
+from cloudinit import helpers, util
+from cloudinit.config.cc_snap import add_assertions, handle, run_commands
from cloudinit.config.schema import (
SchemaValidationError,
get_schema,
validate_cloudconfig_schema,
)
-from tests.unittests.helpers import (
- CiTestCase,
- mock,
- skipUnlessJsonSchema,
- wrap_and_call,
-)
+from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+M_PATH = "cloudinit.config.cc_snap."
+ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
SYSTEM_USER_ASSERTION = """\
type: system-user
@@ -91,98 +87,81 @@ IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t
oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k"""
-class FakeCloud(object):
- def __init__(self, distro):
- self.distro = distro
-
-
-class TestAddAssertions(CiTestCase):
-
- with_logs = True
+@pytest.fixture()
+def fake_cloud(tmpdir):
+ paths = helpers.Paths(
+ {
+ "cloud_dir": tmpdir.join("cloud"),
+ "run_dir": tmpdir.join("cloud-init"),
+ "templates_dir": tmpdir.join("templates"),
+ }
+ )
+ cloud = get_cloud(paths=paths)
+ yield cloud
- def setUp(self):
- super(TestAddAssertions, self).setUp()
- self.tmp = self.tmp_dir()
+class TestAddAssertions:
@mock.patch("cloudinit.config.cc_snap.subp.subp")
- def test_add_assertions_on_empty_list(self, m_subp):
+ def test_add_assertions_on_empty_list(self, m_subp, caplog, tmpdir):
"""When provided with an empty list, add_assertions does nothing."""
- add_assertions([])
- self.assertEqual("", self.logs.getvalue())
- m_subp.assert_not_called()
+ assert_file = tmpdir.join("snapd.assertions")
+ add_assertions([], assert_file)
+ assert not caplog.text
+ assert 0 == m_subp.call_count
- def test_add_assertions_on_non_list_or_dict(self):
+ def test_add_assertions_on_non_list_or_dict(self, tmpdir):
"""When provided an invalid type, add_assertions raises an error."""
- with self.assertRaises(TypeError) as context_manager:
- add_assertions(assertions="I'm Not Valid")
- self.assertEqual(
- "assertion parameter was not a list or dict: I'm Not Valid",
- str(context_manager.exception),
- )
+ assert_file = tmpdir.join("snapd.assertions")
+ with pytest.raises(
+ TypeError,
+ match="assertion parameter was not a list or dict: I'm Not Valid",
+ ):
+ add_assertions("I'm Not Valid", assert_file)
@mock.patch("cloudinit.config.cc_snap.subp.subp")
- def test_add_assertions_adds_assertions_as_list(self, m_subp):
+ def test_add_assertions_adds_assertions_as_list(
+ self, m_subp, caplog, tmpdir
+ ):
"""When provided with a list, add_assertions adds all assertions."""
- self.assertEqual(
- ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions"
- )
- assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ assert_file = tmpdir.join("snapd.assertions")
assertions = [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]
- wrap_and_call(
- "cloudinit.config.cc_snap",
- {"ASSERTIONS_FILE": {"new": assert_file}},
- add_assertions,
- assertions,
- )
- self.assertIn(
- "Importing user-provided snap assertions", self.logs.getvalue()
- )
- self.assertIn("sertions", self.logs.getvalue())
- self.assertEqual(
- [mock.call(["snap", "ack", assert_file], capture=True)],
- m_subp.call_args_list,
- )
- compare_file = self.tmp_path("comparison", dir=self.tmp)
+ add_assertions(assertions, assert_file)
+ assert "Importing user-provided snap assertions" in caplog.text
+ assert "sertions" in caplog.text
+ assert [
+ mock.call(["snap", "ack", assert_file], capture=True)
+ ] == m_subp.call_args_list
+ compare_file = tmpdir.join("comparison")
util.write_file(compare_file, "\n".join(assertions).encode("utf-8"))
- self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file)
- )
+ assert util.load_file(compare_file) == util.load_file(assert_file)
@mock.patch("cloudinit.config.cc_snap.subp.subp")
- def test_add_assertions_adds_assertions_as_dict(self, m_subp):
+ def test_add_assertions_adds_assertions_as_dict(
+ self, m_subp, caplog, tmpdir
+ ):
"""When provided with a dict, add_assertions adds all assertions."""
- self.assertEqual(
- ASSERTIONS_FILE, "/var/lib/cloud/instance/snapd.assertions"
- )
- assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
+ assert_file = tmpdir.join("snapd.assertions")
assertions = {"00": SYSTEM_USER_ASSERTION, "01": ACCOUNT_ASSERTION}
- wrap_and_call(
- "cloudinit.config.cc_snap",
- {"ASSERTIONS_FILE": {"new": assert_file}},
- add_assertions,
- assertions,
- )
- self.assertIn(
- "Importing user-provided snap assertions", self.logs.getvalue()
- )
- self.assertIn(
- "DEBUG: Snap acking: ['type: system-user', 'authority-id: Lqv",
- self.logs.getvalue(),
- )
- self.assertIn(
- "DEBUG: Snap acking: ['type: account-key', 'authority-id: canonic",
- self.logs.getvalue(),
- )
- self.assertEqual(
- [mock.call(["snap", "ack", assert_file], capture=True)],
- m_subp.call_args_list,
- )
- compare_file = self.tmp_path("comparison", dir=self.tmp)
+ add_assertions(assertions, assert_file)
+ assert "Importing user-provided snap assertions" in caplog.text
+ assert (
+ M_PATH[:-1],
+ logging.DEBUG,
+ "Snap acking: ['type: system-user', 'authority-id: "
+ "LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp']",
+ ) in caplog.record_tuples
+ assert (
+ M_PATH[:-1],
+ logging.DEBUG,
+ "Snap acking: ['type: account-key', 'authority-id: canonical']",
+ ) in caplog.record_tuples
+ assert [
+ mock.call(["snap", "ack", assert_file], capture=True)
+ ] == m_subp.call_args_list
+ compare_file = tmpdir.join("comparison")
combined = "\n".join(assertions.values())
util.write_file(compare_file, combined.encode("utf-8"))
- self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file)
- )
+ assert util.load_file(compare_file) == util.load_file(assert_file)
class TestRunCommands(CiTestCase):
@@ -339,37 +318,21 @@ class TestSnapSchema:
validate_cloudconfig_schema(config, get_schema(), strict=True)
-class TestHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHandle, self).setUp()
- self.tmp = self.tmp_dir()
-
+class TestHandle:
@mock.patch("cloudinit.config.cc_snap.subp.subp")
- def test_handle_adds_assertions(self, m_subp):
+ def test_handle_adds_assertions(self, m_subp, fake_cloud, tmpdir):
"""Any configured snap assertions are provided to add_assertions."""
- assert_file = self.tmp_path("snapd.assertions", dir=self.tmp)
- compare_file = self.tmp_path("comparison", dir=self.tmp)
+ assert_file = os.path.join(
+ fake_cloud.paths.get_ipath_cur(), "snapd.assertions"
+ )
+ compare_file = tmpdir.join("comparison")
cfg = {
"snap": {"assertions": [SYSTEM_USER_ASSERTION, ACCOUNT_ASSERTION]}
}
- wrap_and_call(
- "cloudinit.config.cc_snap",
- {"ASSERTIONS_FILE": {"new": assert_file}},
- handle,
- "snap",
- cfg=cfg,
- cloud=None,
- log=self.logger,
- args=None,
- )
+ handle("snap", cfg=cfg, cloud=fake_cloud, log=mock.Mock(), args=None)
content = "\n".join(cfg["snap"]["assertions"])
util.write_file(compare_file, content.encode("utf-8"))
- self.assertEqual(
- util.load_file(compare_file), util.load_file(assert_file)
- )
+ assert util.load_file(compare_file) == util.load_file(assert_file)
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ubuntu_drivers.py b/tests/unittests/config/test_cc_ubuntu_drivers.py
index 3cbde8b2..9d54467e 100644
--- a/tests/unittests/config/test_cc_ubuntu_drivers.py
+++ b/tests/unittests/config/test_cc_ubuntu_drivers.py
@@ -6,6 +6,7 @@ import re
import pytest
+from cloudinit import log
from cloudinit.config import cc_ubuntu_drivers as drivers
from cloudinit.config.schema import (
SchemaValidationError,
@@ -13,7 +14,7 @@ from cloudinit.config.schema import (
validate_cloudconfig_schema,
)
from cloudinit.subp import ProcessExecutionError
-from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from tests.unittests.helpers import mock, skipUnlessJsonSchema
MPATH = "cloudinit.config.cc_ubuntu_drivers."
M_TMP_PATH = MPATH + "temp_utils.mkdtemp"
@@ -31,223 +32,286 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
# pylint: disable=no-value-for-parameter
-class AnyTempScriptAndDebconfFile(object):
- def __init__(self, tmp_dir, debconf_file):
- self.tmp_dir = tmp_dir
- self.debconf_file = debconf_file
-
- def __eq__(self, cmd):
- if not len(cmd) == 2:
- return False
- script, debconf_file = cmd
- if bool(script.startswith(self.tmp_dir) and script.endswith(".sh")):
- return debconf_file == self.debconf_file
- return False
-
-
-class TestUbuntuDrivers(CiTestCase):
- cfg_accepted = {"drivers": {"nvidia": {"license-accepted": True}}}
+@pytest.mark.parametrize(
+ "cfg_accepted,install_gpgpu",
+ [
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": True}}},
+ ["ubuntu-drivers", "install", "--gpgpu", "nvidia"],
+ id="without_version",
+ ),
+ pytest.param(
+ {
+ "drivers": {
+ "nvidia": {"license-accepted": True, "version": "123"}
+ }
+ },
+ ["ubuntu-drivers", "install", "--gpgpu", "nvidia:123"],
+ id="with_version",
+ ),
+ ],
+)
+@mock.patch(MPATH + "debconf")
+@mock.patch(MPATH + "HAS_DEBCONF", True)
+class TestUbuntuDrivers:
install_gpgpu = ["ubuntu-drivers", "install", "--gpgpu", "nvidia"]
- with_logs = True
-
+ @pytest.mark.parametrize(
+ "true_value",
+ [
+ True,
+ "yes",
+ "true",
+ "on",
+ "1",
+ ],
+ )
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
- def _assert_happy_path_taken(self, config, m_which, m_subp, m_tmp):
+ def test_happy_path_taken(
+ self,
+ m_which,
+ m_subp,
+ m_tmp,
+ m_debconf,
+ tmpdir,
+ cfg_accepted,
+ install_gpgpu,
+ true_value,
+ ):
"""Positive path test through handle. Package should be installed."""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, "nvidia.template")
+ new_config: dict = copy.deepcopy(cfg_accepted)
+ new_config["drivers"]["nvidia"]["license-accepted"] = true_value
+
+ tdir = tmpdir
+ debconf_file = tdir.join("nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
- drivers.handle("ubuntu_drivers", config, myCloud, None, None)
- self.assertEqual(
- [mock.call(["ubuntu-drivers-common"])],
- myCloud.distro.install_packages.call_args_list,
- )
- self.assertEqual(
- [
- mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu),
- ],
- m_subp.call_args_list,
- )
-
- def test_handle_does_package_install(self):
- self._assert_happy_path_taken(self.cfg_accepted)
-
- def test_trueish_strings_are_considered_approval(self):
- for true_value in ["yes", "true", "on", "1"]:
- new_config = copy.deepcopy(self.cfg_accepted)
- new_config["drivers"]["nvidia"]["license-accepted"] = true_value
- self._assert_happy_path_taken(new_config)
+ drivers.handle("ubuntu_drivers", new_config, myCloud, None, None)
+ assert [
+ mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
+ ] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
+ assert [
+ mock.call(["ubuntu-drivers-common"])
+ ] == myCloud.distro.install_packages.call_args_list
+ assert [mock.call(install_gpgpu)] == m_subp.call_args_list
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp")
@mock.patch(MPATH + "subp.which", return_value=False)
def test_handle_raises_error_if_no_drivers_found(
- self, m_which, m_subp, m_tmp
+ self,
+ m_which,
+ m_subp,
+ m_tmp,
+ m_debconf,
+ caplog,
+ tmpdir,
+ cfg_accepted,
+ install_gpgpu,
):
"""If ubuntu-drivers doesn't install any drivers, raise an error."""
- tdir = self.tmp_dir()
+ tdir = tmpdir
debconf_file = os.path.join(tdir, "nvidia.template")
m_tmp.return_value = tdir
myCloud = mock.MagicMock()
- def fake_subp(cmd):
- if cmd[0].startswith(tdir):
- return
- raise ProcessExecutionError(
- stdout="No drivers found for installation.\n", exit_code=1
- )
-
- m_subp.side_effect = fake_subp
-
- with self.assertRaises(Exception):
- drivers.handle(
- "ubuntu_drivers", self.cfg_accepted, myCloud, None, None
- )
- self.assertEqual(
- [mock.call(["ubuntu-drivers-common"])],
- myCloud.distro.install_packages.call_args_list,
- )
- self.assertEqual(
- [
- mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu),
- ],
- m_subp.call_args_list,
+ m_subp.side_effect = ProcessExecutionError(
+ stdout="No drivers found for installation.\n", exit_code=1
)
- self.assertIn(
- "ubuntu-drivers found no drivers for installation",
- self.logs.getvalue(),
+
+ with pytest.raises(Exception):
+ drivers.handle("ubuntu_drivers", cfg_accepted, myCloud, None, None)
+ assert [
+ mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
+ ] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
+ assert [
+ mock.call(["ubuntu-drivers-common"])
+ ] == myCloud.distro.install_packages.call_args_list
+ assert [mock.call(install_gpgpu)] == m_subp.call_args_list
+ assert (
+ "ubuntu-drivers found no drivers for installation" in caplog.text
)
+ @pytest.mark.parametrize(
+ "config",
+ [
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": False}}},
+ id="license_not_accepted",
+ ),
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": "garbage"}}},
+ id="garbage_in_license_field",
+ ),
+ pytest.param({"drivers": {"nvidia": {}}}, id="no_license_key"),
+ pytest.param(
+ {"drivers": {"acme": {"license-accepted": True}}},
+ id="no_nvidia_key",
+ ),
+ # ensure we don't do anything if string refusal given
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": "no"}}},
+ id="string_given_no",
+ ),
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": "false"}}},
+ id="string_given_false",
+ ),
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": "off"}}},
+ id="string_given_off",
+ ),
+ pytest.param(
+ {"drivers": {"nvidia": {"license-accepted": "0"}}},
+ id="string_given_0",
+ ),
+ # specifying_a_version_doesnt_override_license_acceptance
+ pytest.param(
+ {
+ "drivers": {
+ "nvidia": {"license-accepted": False, "version": "123"}
+ }
+ },
+ id="with_version",
+ ),
+ ],
+ )
@mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
- def _assert_inert_with_config(self, config, m_which, m_subp):
+ def test_handle_inert(
+ self, m_which, m_subp, m_debconf, cfg_accepted, install_gpgpu, config
+ ):
"""Helper to reduce repetition when testing negative cases"""
myCloud = mock.MagicMock()
drivers.handle("ubuntu_drivers", config, myCloud, None, None)
- self.assertEqual(0, myCloud.distro.install_packages.call_count)
- self.assertEqual(0, m_subp.call_count)
-
- def test_handle_inert_if_license_not_accepted(self):
- """Ensure we don't do anything if the license is rejected."""
- self._assert_inert_with_config(
- {"drivers": {"nvidia": {"license-accepted": False}}}
- )
-
- def test_handle_inert_if_garbage_in_license_field(self):
- """Ensure we don't do anything if unknown text is in license field."""
- self._assert_inert_with_config(
- {"drivers": {"nvidia": {"license-accepted": "garbage"}}}
- )
-
- def test_handle_inert_if_no_license_key(self):
- """Ensure we don't do anything if no license key."""
- self._assert_inert_with_config({"drivers": {"nvidia": {}}})
-
- def test_handle_inert_if_no_nvidia_key(self):
- """Ensure we don't do anything if other license accepted."""
- self._assert_inert_with_config(
- {"drivers": {"acme": {"license-accepted": True}}}
- )
-
- def test_handle_inert_if_string_given(self):
- """Ensure we don't do anything if string refusal given."""
- for false_value in ["no", "false", "off", "0"]:
- self._assert_inert_with_config(
- {"drivers": {"nvidia": {"license-accepted": false_value}}}
- )
+ assert 0 == myCloud.distro.install_packages.call_count
+ assert 0 == m_subp.call_count
@mock.patch(MPATH + "install_drivers")
- def test_handle_no_drivers_does_nothing(self, m_install_drivers):
+ def test_handle_no_drivers_does_nothing(
+ self, m_install_drivers, m_debconf, cfg_accepted, install_gpgpu
+ ):
"""If no 'drivers' key in the config, nothing should be done."""
myCloud = mock.MagicMock()
myLog = mock.MagicMock()
drivers.handle("ubuntu_drivers", {"foo": "bzr"}, myCloud, myLog, None)
- self.assertIn(
- "Skipping module named", myLog.debug.call_args_list[0][0][0]
- )
- self.assertEqual(0, m_install_drivers.call_count)
+ assert "Skipping module named" in myLog.debug.call_args_list[0][0][0]
+ assert 0 == m_install_drivers.call_count
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=True)
def test_install_drivers_no_install_if_present(
- self, m_which, m_subp, m_tmp
+ self,
+ m_which,
+ m_subp,
+ m_tmp,
+ m_debconf,
+ tmpdir,
+ cfg_accepted,
+ install_gpgpu,
):
"""If 'ubuntu-drivers' is present, no package install should occur."""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, "nvidia.template")
+ tdir = tmpdir
+ debconf_file = tmpdir.join("nvidia.template")
m_tmp.return_value = tdir
pkg_install = mock.MagicMock()
drivers.install_drivers(
- self.cfg_accepted["drivers"], pkg_install_func=pkg_install
+ cfg_accepted["drivers"], pkg_install_func=pkg_install
)
- self.assertEqual(0, pkg_install.call_count)
- self.assertEqual([mock.call("ubuntu-drivers")], m_which.call_args_list)
- self.assertEqual(
- [
- mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu),
- ],
- m_subp.call_args_list,
- )
-
- def test_install_drivers_rejects_invalid_config(self):
+ assert 0 == pkg_install.call_count
+ assert [mock.call("ubuntu-drivers")] == m_which.call_args_list
+ assert [
+ mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
+ ] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
+ assert [mock.call(install_gpgpu)] == m_subp.call_args_list
+
+ def test_install_drivers_rejects_invalid_config(
+ self, m_debconf, cfg_accepted, install_gpgpu
+ ):
"""install_drivers should raise TypeError if not given a config dict"""
pkg_install = mock.MagicMock()
- with self.assertRaisesRegex(TypeError, ".*expected dict.*"):
+ with pytest.raises(TypeError, match=".*expected dict.*"):
drivers.install_drivers("mystring", pkg_install_func=pkg_install)
- self.assertEqual(0, pkg_install.call_count)
+ assert 0 == pkg_install.call_count
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp")
@mock.patch(MPATH + "subp.which", return_value=False)
def test_install_drivers_handles_old_ubuntu_drivers_gracefully(
- self, m_which, m_subp, m_tmp
+ self,
+ m_which,
+ m_subp,
+ m_tmp,
+ m_debconf,
+ caplog,
+ tmpdir,
+ cfg_accepted,
+ install_gpgpu,
):
"""Older ubuntu-drivers versions should emit message and raise error"""
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, "nvidia.template")
- m_tmp.return_value = tdir
+ debconf_file = tmpdir.join("nvidia.template")
+ m_tmp.return_value = tmpdir
myCloud = mock.MagicMock()
- def fake_subp(cmd):
- if cmd[0].startswith(tdir):
- return
- raise ProcessExecutionError(
- stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2
- )
+ m_subp.side_effect = ProcessExecutionError(
+ stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2
+ )
- m_subp.side_effect = fake_subp
+ with pytest.raises(Exception):
+ drivers.handle("ubuntu_drivers", cfg_accepted, myCloud, None, None)
+ assert [
+ mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
+ ] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
+ assert [
+ mock.call(["ubuntu-drivers-common"])
+ ] == myCloud.distro.install_packages.call_args_list
+ assert [mock.call(install_gpgpu)] == m_subp.call_args_list
+ assert (
+ MPATH[:-1],
+ log.WARNING,
+ (
+ "the available version of ubuntu-drivers is"
+ " too old to perform requested driver installation"
+ ),
+ ) == caplog.record_tuples[-1]
- with self.assertRaises(Exception):
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "subp.subp", return_value=("", ""))
+ @mock.patch(MPATH + "subp.which", return_value=False)
+ def test_debconf_not_installed_does_nothing(
+ self,
+ m_which,
+ m_subp,
+ m_tmp,
+ m_debconf,
+ tmpdir,
+ cfg_accepted,
+ install_gpgpu,
+ ):
+ m_debconf.DebconfCommunicator.side_effect = AttributeError
+ m_tmp.return_value = tmpdir
+ myCloud = mock.MagicMock()
+ version_none_cfg = {
+ "drivers": {"nvidia": {"license-accepted": True, "version": None}}
+ }
+ with pytest.raises(AttributeError):
drivers.handle(
- "ubuntu_drivers", self.cfg_accepted, myCloud, None, None
+ "ubuntu_drivers", version_none_cfg, myCloud, None, None
)
- self.assertEqual(
- [mock.call(["ubuntu-drivers-common"])],
- myCloud.distro.install_packages.call_args_list,
- )
- self.assertEqual(
- [
- mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(self.install_gpgpu),
- ],
- m_subp.call_args_list,
- )
- self.assertIn(
- "WARNING: the available version of ubuntu-drivers is"
- " too old to perform requested driver installation",
- self.logs.getvalue(),
+ assert (
+ 0 == m_debconf.DebconfCommunicator.__enter__().command.call_count
)
+ assert 0 == m_subp.call_count
+
+@mock.patch(MPATH + "debconf")
+@mock.patch(MPATH + "HAS_DEBCONF", True)
+class TestUbuntuDriversWithVersion:
+ """With-version specific tests"""
-# Sub-class TestUbuntuDrivers to run the same test cases, but with a version
-class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
cfg_accepted = {
"drivers": {"nvidia": {"license-accepted": True, "version": "123"}}
}
@@ -256,30 +320,76 @@ class TestUbuntuDriversWithVersion(TestUbuntuDrivers):
@mock.patch(M_TMP_PATH)
@mock.patch(MPATH + "subp.subp", return_value=("", ""))
@mock.patch(MPATH + "subp.which", return_value=False)
- def test_version_none_uses_latest(self, m_which, m_subp, m_tmp):
- tdir = self.tmp_dir()
- debconf_file = os.path.join(tdir, "nvidia.template")
- m_tmp.return_value = tdir
+ def test_version_none_uses_latest(
+ self, m_which, m_subp, m_tmp, m_debconf, tmpdir
+ ):
+ debconf_file = tmpdir.join("nvidia.template")
+ m_tmp.return_value = tmpdir
myCloud = mock.MagicMock()
version_none_cfg = {
"drivers": {"nvidia": {"license-accepted": True, "version": None}}
}
drivers.handle("ubuntu_drivers", version_none_cfg, myCloud, None, None)
- self.assertEqual(
- [
- mock.call(AnyTempScriptAndDebconfFile(tdir, debconf_file)),
- mock.call(["ubuntu-drivers", "install", "--gpgpu", "nvidia"]),
- ],
- m_subp.call_args_list,
+ assert [
+ mock.call(drivers.X_LOADTEMPLATEFILE, debconf_file)
+ ] == m_debconf.DebconfCommunicator().__enter__().command.call_args_list
+ assert [
+ mock.call(["ubuntu-drivers", "install", "--gpgpu", "nvidia"]),
+ ] == m_subp.call_args_list
+
+
+@mock.patch(MPATH + "debconf")
+class TestUbuntuDriversNotRun:
+ @mock.patch(MPATH + "HAS_DEBCONF", True)
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "install_drivers")
+ def test_no_cfg_drivers_does_nothing(
+ self,
+ m_install_drivers,
+ m_tmp,
+ m_debconf,
+ tmpdir,
+ ):
+ m_tmp.return_value = tmpdir
+ m_log = mock.MagicMock()
+ myCloud = mock.MagicMock()
+ version_none_cfg = {}
+ drivers.handle(
+ "ubuntu_drivers", version_none_cfg, myCloud, m_log, None
+ )
+ assert 0 == m_install_drivers.call_count
+ assert (
+ mock.call(
+ "Skipping module named %s, no 'drivers' key in config",
+ "ubuntu_drivers",
+ )
+ == m_log.debug.call_args_list[-1]
)
- def test_specifying_a_version_doesnt_override_license_acceptance(self):
- self._assert_inert_with_config(
- {
- "drivers": {
- "nvidia": {"license-accepted": False, "version": "123"}
- }
- }
+ @mock.patch(MPATH + "HAS_DEBCONF", False)
+ @mock.patch(M_TMP_PATH)
+ @mock.patch(MPATH + "install_drivers")
+ def test_has_not_debconf_does_nothing(
+ self,
+ m_install_drivers,
+ m_tmp,
+ m_debconf,
+ tmpdir,
+ ):
+ m_tmp.return_value = tmpdir
+ m_log = mock.MagicMock()
+ myCloud = mock.MagicMock()
+ version_none_cfg = {"drivers": {"nvidia": {"license-accepted": True}}}
+ drivers.handle(
+ "ubuntu_drivers", version_none_cfg, myCloud, m_log, None
+ )
+ assert 0 == m_install_drivers.call_count
+ assert (
+ mock.call(
+ "Skipping module named %s, 'python3-debconf' is not installed",
+ "ubuntu_drivers",
+ )
+ == m_log.warning.call_args_list[-1]
)
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
index af8bdc30..a33fd55f 100644
--- a/tests/unittests/config/test_cc_users_groups.py
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -318,6 +318,10 @@ class TestUsersGroupsSchema:
({"users": ["default", ["aaa", "bbb"]]}, None),
({"users": ["foobar"]}, None), # no default user creation
({"users": [{"name": "bbsw"}]}, None),
+ (
+ {"users": [{"name": "bbsw", "groups": ["anygrp"]}]},
+ None,
+ ), # user with a list of groups
({"groups": [{"yep": ["user1"]}]}, None),
(
{"user": ["no_list_allowed"]},
diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py
index d6de2ec2..010bea18 100644
--- a/tests/unittests/config/test_cc_yum_add_repo.py
+++ b/tests/unittests/config/test_cc_yum_add_repo.py
@@ -61,11 +61,11 @@ class TestConfig(helpers.FilesystemMockingTestCase):
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
+ contents = util.load_file("/etc/yum.repos.d/epel-testing.repo")
parser = configparser.ConfigParser()
parser.read_string(contents)
expected = {
- "epel_testing": {
+ "epel-testing": {
"name": "Extra Packages for Enterprise Linux 5 - Testing",
"failovermethod": "priority",
"gpgkey": "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL",
@@ -101,11 +101,11 @@ class TestConfig(helpers.FilesystemMockingTestCase):
}
self.patchUtils(self.tmp)
cc_yum_add_repo.handle("yum_add_repo", cfg, None, LOG, [])
- contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
+ contents = util.load_file("/etc/yum.repos.d/puppetlabs-products.repo")
parser = configparser.ConfigParser()
parser.read_string(contents)
expected = {
- "puppetlabs_products": {
+ "puppetlabs-products": {
"name": "Puppet Labs Products El 6 - $basearch",
"baseurl": "http://yum.puppetlabs.com/el/6/products/$basearch",
"gpgkey": (
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
index c75b7227..4a41c4c1 100644
--- a/tests/unittests/config/test_schema.py
+++ b/tests/unittests/config/test_schema.py
@@ -7,14 +7,14 @@ import itertools
import json
import logging
import os
+import re
import sys
-from copy import copy, deepcopy
+from copy import copy
from pathlib import Path
from textwrap import dedent
from types import ModuleType
from typing import List
-import jsonschema
import pytest
from cloudinit.config.schema import (
@@ -80,7 +80,7 @@ def get_modules() -> List[ModuleType]:
def get_module_variable(var_name) -> dict:
"""Inspect modules and get variable from module matching var_name"""
- schemas = {}
+ schemas: dict = {}
get_modules()
for k, v in sys.modules.items():
path = Path(k)
@@ -96,16 +96,6 @@ def get_module_variable(var_name) -> dict:
class TestVersionedSchemas:
- def _relative_ref_to_local_file_path(self, source_schema):
- """Replace known relative ref URLs with full file path."""
- # jsonschema 2.6.0 doesn't support relative URLs in $refs (bionic)
- full_path_schema = deepcopy(source_schema)
- relative_ref = full_path_schema["oneOf"][0]["allOf"][1]["$ref"]
- full_local_filepath = get_schema_dir() + relative_ref[1:]
- file_ref = f"file://{full_local_filepath}"
- full_path_schema["oneOf"][0]["allOf"][1]["$ref"] = file_ref
- return full_path_schema
-
@pytest.mark.parametrize(
"schema,error_msg",
(
@@ -119,39 +109,30 @@ class TestVersionedSchemas:
def test_versioned_cloud_config_schema_is_valid_json(
self, schema, error_msg
):
+ schema_dir = get_schema_dir()
version_schemafile = os.path.join(
- get_schema_dir(), VERSIONED_USERDATA_SCHEMA_FILE
+ schema_dir, VERSIONED_USERDATA_SCHEMA_FILE
+ )
+ # Point to local schema files avoid JSON resolver trying to pull the
+ # reference from our upstream raw file in github.
+ version_schema = json.loads(
+ re.sub(
+ r"https:\/\/raw.githubusercontent.com\/canonical\/"
+ r"cloud-init\/main\/cloudinit\/config\/schemas\/",
+ f"file://{schema_dir}/",
+ load_file(version_schemafile),
+ )
)
- version_schema = json.loads(load_file(version_schemafile))
- # To avoid JSON resolver trying to pull the reference from our
- # upstream raw file in github.
- version_schema["$id"] = f"file://{version_schemafile}"
if error_msg:
with pytest.raises(SchemaValidationError) as context_mgr:
- try:
- validate_cloudconfig_schema(
- schema, schema=version_schema, strict=True
- )
- except jsonschema.exceptions.RefResolutionError:
- full_path_schema = self._relative_ref_to_local_file_path(
- version_schema
- )
- validate_cloudconfig_schema(
- schema, schema=full_path_schema, strict=True
- )
- assert error_msg in str(context_mgr.value)
- else:
- try:
validate_cloudconfig_schema(
schema, schema=version_schema, strict=True
)
- except jsonschema.exceptions.RefResolutionError:
- full_path_schema = self._relative_ref_to_local_file_path(
- version_schema
- )
- validate_cloudconfig_schema(
- schema, schema=full_path_schema, strict=True
- )
+ assert error_msg in str(context_mgr.value)
+ else:
+ validate_cloudconfig_schema(
+ schema, schema=version_schema, strict=True
+ )
class TestGetSchema:
@@ -224,6 +205,7 @@ class TestGetSchema:
{"$ref": "#/$defs/cc_write_files"},
{"$ref": "#/$defs/cc_yum_add_repo"},
{"$ref": "#/$defs/cc_zypper_add_repo"},
+ {"$ref": "#/$defs/reporting_config"},
]
found_subschema_defs = []
legacy_schema_keys = []
@@ -304,11 +286,32 @@ class TestValidateCloudConfigSchema:
assert "cloudinit.config.schema" == module
assert logging.WARNING == log_level
assert (
- "Invalid cloud-config provided:\np1: -1 is not of type 'string'"
+ "Invalid cloud-config provided: \np1: -1 is not of type 'string'"
== log_msg
)
@skipUnlessJsonSchema()
+ def test_validateconfig_schema_sensitive(self, caplog):
+ """When log_details=False, ensure details are omitted"""
+ schema = {
+ "properties": {"hashed_password": {"type": "string"}},
+ "additionalProperties": False,
+ }
+ validate_cloudconfig_schema(
+ {"hashed-password": "secret"},
+ schema,
+ strict=False,
+ log_details=False,
+ )
+ [(module, log_level, log_msg)] = caplog.record_tuples
+ assert "cloudinit.config.schema" == module
+ assert logging.WARNING == log_level
+ assert (
+ "Invalid cloud-config provided: Please run 'sudo cloud-init "
+ "schema --system' to see the schema errors." == log_msg
+ )
+
+ @skipUnlessJsonSchema()
def test_validateconfig_schema_emits_warning_on_missing_jsonschema(
self, caplog
):
diff --git a/tests/unittests/distros/test_networking.py b/tests/unittests/distros/test_networking.py
index f56b34ad..6f7465c9 100644
--- a/tests/unittests/distros/test_networking.py
+++ b/tests/unittests/distros/test_networking.py
@@ -2,7 +2,6 @@
# /parametrize.html#parametrizing-conditional-raising
import textwrap
-from contextlib import ExitStack as does_not_raise
from unittest import mock
import pytest
@@ -14,6 +13,7 @@ from cloudinit.distros.networking import (
LinuxNetworking,
Networking,
)
+from tests.unittests.helpers import does_not_raise
@pytest.fixture
diff --git a/tests/unittests/distros/test_sysconfig.py b/tests/unittests/distros/test_sysconfig.py
index d0979e17..9c3a2018 100644
--- a/tests/unittests/distros/test_sysconfig.py
+++ b/tests/unittests/distros/test_sysconfig.py
@@ -65,9 +65,7 @@ USEMD5=no"""
conf["IPV6TO4_ROUTING"] = "blah \tblah"
contents2 = str(conf).strip()
# Should be requoted due to whitespace
- self.assertRegMatches(
- contents2, r"IPV6TO4_ROUTING=[\']blah\s+blah[\']"
- )
+ self.assertRegex(contents2, r"IPV6TO4_ROUTING=[\']blah\s+blah[\']")
def test_parse_no_adjust_shell(self):
conf = SysConf("".splitlines())
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 67fed8c9..9d5a7ed2 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -13,10 +13,12 @@ import time
import unittest
from contextlib import ExitStack, contextmanager
from pathlib import Path
+from typing import ClassVar, List, Union
from unittest import mock
from unittest.util import strclass
import httpretty
+import pytest
import cloudinit
from cloudinit import cloud, distros
@@ -71,6 +73,13 @@ def retarget_many_wrapper(new_base, am, old_func):
return wrapper
+def random_string(length=8):
+ """return a random lowercase string with default length of 8"""
+ return "".join(
+ random.choice(string.ascii_lowercase) for _ in range(length)
+ )
+
+
class TestCase(unittest.TestCase):
def reset_global_state(self):
"""Reset any global state to its original settings.
@@ -85,9 +94,7 @@ class TestCase(unittest.TestCase):
In the future this should really be done with some registry that
can then be cleaned in a more obvious way.
"""
- util.PROC_CMDLINE = None
util._DNS_REDIRECT_IP = None
- util._LSB_RELEASE = {}
def setUp(self):
super(TestCase, self).setUp()
@@ -114,7 +121,7 @@ class CiTestCase(TestCase):
# Subclass overrides for specific test behavior
# Whether or not a unit test needs logfile setup
with_logs = False
- allowed_subp = False
+ allowed_subp: ClassVar[Union[List, bool]] = False
SUBP_SHELL_TRUE = "shell=true"
@contextmanager
@@ -226,10 +233,7 @@ class CiTestCase(TestCase):
@classmethod
def random_string(cls, length=8):
- """return a random lowercase string with default length of 8"""
- return "".join(
- random.choice(string.ascii_lowercase) for _ in range(length)
- )
+ return random_string(length)
class ResourceUsingTestCase(CiTestCase):
@@ -530,7 +534,7 @@ if not hasattr(mock.Mock, "assert_not_called"):
)
raise AssertionError(msg)
- mock.Mock.assert_not_called = __mock_assert_not_called
+ mock.Mock.assert_not_called = __mock_assert_not_called # type: ignore
def get_top_level_dir() -> Path:
@@ -551,4 +555,32 @@ def cloud_init_project_dir(sub_path: str) -> str:
return str(get_top_level_dir() / sub_path)
+@contextmanager
+def does_not_raise():
+ """Context manager to parametrize tests raising and not raising exceptions
+
+ Note: In python-3.7+, this can be substituted by contextlib.nullcontext
+ More info:
+ https://docs.pytest.org/en/6.2.x/example/parametrize.html?highlight=does_not_raise#parametrizing-conditional-raising
+
+ Example:
+ --------
+ >>> @pytest.mark.parametrize(
+ >>> "example_input,expectation",
+ >>> [
+ >>> (1, does_not_raise()),
+ >>> (0, pytest.raises(ZeroDivisionError)),
+ >>> ],
+ >>> )
+ >>> def test_division(example_input, expectation):
+ >>> with expectation:
+ >>> assert (0 / example_input) is not None
+
+ """
+ try:
+ yield
+ except Exception as ex:
+ raise pytest.fail("DID RAISE {0}".format(ex))
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/net/test_dhcp.py b/tests/unittests/net/test_dhcp.py
index 08ca001a..db9f0e97 100644
--- a/tests/unittests/net/test_dhcp.py
+++ b/tests/unittests/net/test_dhcp.py
@@ -7,7 +7,6 @@ from textwrap import dedent
import httpretty
import pytest
-import cloudinit.net as net
from cloudinit.net.dhcp import (
InvalidDHCPLeaseFileError,
NoDHCPLeaseError,
@@ -19,6 +18,7 @@ from cloudinit.net.dhcp import (
parse_dhcp_lease_file,
parse_static_routes,
)
+from cloudinit.net.ephemeral import EphemeralDHCPv4
from cloudinit.util import ensure_file, write_file
from tests.unittests.helpers import (
CiTestCase,
@@ -157,8 +157,8 @@ class TestDHCPRFC3442(CiTestCase):
write_file(lease_file, content)
self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
- @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_obtain_lease_parses_static_routes(self, m_maybe, m_ipv4):
"""EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network"""
lease = [
@@ -173,7 +173,7 @@ class TestDHCPRFC3442(CiTestCase):
}
]
m_maybe.return_value = lease
- eph = net.dhcp.EphemeralDHCPv4()
+ eph = EphemeralDHCPv4()
eph.obtain_lease()
expected_kwargs = {
"interface": "wlp3s0",
@@ -185,8 +185,8 @@ class TestDHCPRFC3442(CiTestCase):
}
m_ipv4.assert_called_with(**expected_kwargs)
- @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_obtain_centos_lease_parses_static_routes(self, m_maybe, m_ipv4):
"""
EphemeralDHPCv4 parses rfc3442 routes for EphemeralIPv4Network
@@ -204,7 +204,7 @@ class TestDHCPRFC3442(CiTestCase):
}
]
m_maybe.return_value = lease
- eph = net.dhcp.EphemeralDHCPv4()
+ eph = EphemeralDHCPv4()
eph.obtain_lease()
expected_kwargs = {
"interface": "wlp3s0",
@@ -776,7 +776,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
url = "http://example.org/index.html"
httpretty.register_uri(httpretty.GET, url)
- with net.dhcp.EphemeralDHCPv4(
+ with EphemeralDHCPv4(
connectivity_url_data={"url": url},
) as lease:
self.assertIsNone(lease)
@@ -784,7 +784,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
m_dhcp.assert_not_called()
@mock.patch("cloudinit.net.dhcp.subp.subp")
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_ephemeral_dhcp_setup_network_if_url_connectivity(
self, m_dhcp, m_subp
):
@@ -799,7 +799,7 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
m_subp.return_value = ("", "")
httpretty.register_uri(httpretty.GET, url, body={}, status=404)
- with net.dhcp.EphemeralDHCPv4(
+ with EphemeralDHCPv4(
connectivity_url_data={"url": url},
) as lease:
self.assertEqual(fake_lease, lease)
@@ -816,38 +816,38 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase):
],
)
class TestEphemeralDhcpLeaseErrors:
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_obtain_lease_raises_error(self, m_dhcp, error_class):
m_dhcp.side_effect = [error_class()]
with pytest.raises(error_class):
- net.dhcp.EphemeralDHCPv4().obtain_lease()
+ EphemeralDHCPv4().obtain_lease()
assert len(m_dhcp.mock_calls) == 1
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_obtain_lease_umbrella_error(self, m_dhcp, error_class):
m_dhcp.side_effect = [error_class()]
with pytest.raises(NoDHCPLeaseError):
- net.dhcp.EphemeralDHCPv4().obtain_lease()
+ EphemeralDHCPv4().obtain_lease()
assert len(m_dhcp.mock_calls) == 1
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_ctx_mgr_raises_error(self, m_dhcp, error_class):
m_dhcp.side_effect = [error_class()]
with pytest.raises(error_class):
- with net.dhcp.EphemeralDHCPv4():
+ with EphemeralDHCPv4():
pass
assert len(m_dhcp.mock_calls) == 1
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
def test_ctx_mgr_umbrella_error(self, m_dhcp, error_class):
m_dhcp.side_effect = [error_class()]
with pytest.raises(NoDHCPLeaseError):
- with net.dhcp.EphemeralDHCPv4():
+ with EphemeralDHCPv4():
pass
assert len(m_dhcp.mock_calls) == 1
diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py
index 768cc112..53bbb15a 100644
--- a/tests/unittests/net/test_init.py
+++ b/tests/unittests/net/test_init.py
@@ -13,6 +13,7 @@ import pytest
import requests
import cloudinit.net as net
+from cloudinit.net.ephemeral import EphemeralIPv4Network, EphemeralIPv6Network
from cloudinit.subp import ProcessExecutionError
from cloudinit.util import ensure_file, write_file
from tests.unittests.helpers import CiTestCase, HttprettyTestCase
@@ -767,7 +768,7 @@ class TestEphemeralIPV4Network(CiTestCase):
params = copy.deepcopy(required_params)
params[key] = None
with self.assertRaises(ValueError) as context_manager:
- net.EphemeralIPv4Network(**params)
+ EphemeralIPv4Network(**params)
error = context_manager.exception
self.assertIn("Cannot init network on", str(error))
self.assertEqual(0, m_subp.call_count)
@@ -783,7 +784,7 @@ class TestEphemeralIPV4Network(CiTestCase):
for error_val in invalid_masks:
params["prefix_or_mask"] = error_val
with self.assertRaises(ValueError) as context_manager:
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
pass
error = context_manager.exception
self.assertIn(
@@ -849,7 +850,7 @@ class TestEphemeralIPV4Network(CiTestCase):
"prefix_or_mask": "255.255.255.0",
"broadcast": "192.168.2.255",
}
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
m_subp.assert_has_calls(expected_teardown_calls)
@@ -867,7 +868,7 @@ class TestEphemeralIPV4Network(CiTestCase):
"connectivity_url_data": {"url": "http://example.org/index.html"},
}
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
self.assertEqual(
[mock.call(url="http://example.org/index.html", timeout=5)],
m_readurl.call_args_list,
@@ -907,7 +908,7 @@ class TestEphemeralIPV4Network(CiTestCase):
update_env={"LANG": "C"},
)
]
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
pass
self.assertEqual(expected_calls, m_subp.call_args_list)
self.assertIn(
@@ -925,7 +926,7 @@ class TestEphemeralIPV4Network(CiTestCase):
}
for prefix_val in ["24", 16]: # prefix can be int or string
params["prefix_or_mask"] = prefix_val
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
pass
m_subp.assert_has_calls(
[
@@ -1050,7 +1051,7 @@ class TestEphemeralIPV4Network(CiTestCase):
),
]
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
m_subp.assert_has_calls(expected_teardown_calls)
@@ -1189,11 +1190,26 @@ class TestEphemeralIPV4Network(CiTestCase):
capture=True,
),
]
- with net.EphemeralIPv4Network(**params):
+ with EphemeralIPv4Network(**params):
self.assertEqual(expected_setup_calls, m_subp.call_args_list)
m_subp.assert_has_calls(expected_setup_calls + expected_teardown_calls)
+class TestEphemeralIPV6Network:
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.subp.subp")
+ def test_ephemeral_ipv6_network_performs_setup(self, m_subp, _):
+ """EphemeralIPv4Network performs teardown on the device if setup."""
+ expected_setup_calls = [
+ mock.call(
+ ["ip", "link", "set", "dev", "eth0", "up"],
+ capture=False,
+ ),
+ ]
+ with EphemeralIPv6Network(interface="eth0"):
+ assert expected_setup_calls == m_subp.call_args_list
+
+
class TestHasURLConnectivity(HttprettyTestCase):
def setUp(self):
super(TestHasURLConnectivity, self).setUp()
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/reporting/test_reporting.py
index f6dd96e0..a6cf6a95 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/reporting/test_reporting.py
@@ -4,9 +4,16 @@
from unittest import mock
+import pytest
+
from cloudinit import reporting
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
from cloudinit.reporting import events, handlers
-from tests.unittests.helpers import TestCase
+from tests.unittests.helpers import TestCase, skipUnlessJsonSchema
def _fake_registry():
@@ -453,4 +460,110 @@ class TestStatusAccess(TestCase):
self.assertRaises(AttributeError, getattr, events.status, "BOGUS")
-# vi: ts=4 expandtab
+@skipUnlessJsonSchema()
+class TestReportingSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ [
+ # GOOD: Minimum valid parameters
+ ({"reporting": {"a": {"type": "print"}}}, None),
+ ({"reporting": {"a": {"type": "log"}}}, None),
+ (
+ {
+ "reporting": {
+ "a": {"type": "webhook", "endpoint": "http://a"}
+ }
+ },
+ None,
+ ),
+ ({"reporting": {"a": {"type": "hyperv"}}}, None),
+ # GOOD: All valid parameters
+ ({"reporting": {"a": {"type": "log", "level": "WARN"}}}, None),
+ (
+ {
+ "reporting": {
+ "a": {
+ "type": "webhook",
+ "endpoint": "http://a",
+ "timeout": 1,
+ "retries": 1,
+ "consumer_key": "somekey",
+ "token_key": "somekey",
+ "token_secret": "somesecret",
+ "consumer_secret": "somesecret",
+ }
+ }
+ },
+ None,
+ ),
+ (
+ {
+ "reporting": {
+ "a": {
+ "type": "hyperv",
+ "kvp_file_path": "/some/path",
+ "event_types": ["a", "b"],
+ }
+ }
+ },
+ None,
+ ),
+ # GOOD: All combined together
+ (
+ {
+ "reporting": {
+ "a": {"type": "print"},
+ "b": {"type": "log", "level": "WARN"},
+ "c": {
+ "type": "webhook",
+ "endpoint": "http://a",
+ "timeout": 1,
+ "retries": 1,
+ "consumer_key": "somekey",
+ "token_key": "somekey",
+ "token_secret": "somesecret",
+ "consumer_secret": "somesecret",
+ },
+ "d": {
+ "type": "hyperv",
+ "kvp_file_path": "/some/path",
+ "event_types": ["a", "b"],
+ },
+ }
+ },
+ None,
+ ),
+ # BAD: no top level objects
+ ({"reporting": "a"}, "'a' is not of type 'object'"),
+ ({"reporting": {"a": "b"}}, "'b' is not of type 'object'"),
+ # BAD: invalid type
+ ({"reporting": {"a": {"type": "b"}}}, "not valid"),
+ # BAD: invalid additional properties
+ ({"reporting": {"a": {"type": "print", "a": "b"}}}, "not valid"),
+ ({"reporting": {"a": {"type": "log", "a": "b"}}}, "not valid"),
+ (
+ {
+ "reporting": {
+ "a": {
+ "type": "webhook",
+ "endpoint": "http://a",
+ "a": "b",
+ }
+ }
+ },
+ "not valid",
+ ),
+ ({"reporting": {"a": {"type": "hyperv", "a": "b"}}}, "not valid"),
+ # BAD: missing required properties
+ ({"reporting": {"a": {"level": "FATAL"}}}, "not valid"),
+ ({"reporting": {"a": {"endpoint": "http://a"}}}, "not valid"),
+ ({"reporting": {"a": {"kvp_file_path": "/a/b"}}}, "not valid"),
+ ({"reporting": {"a": {"type": "webhook"}}}, "not valid"),
+ ],
+ )
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/reporting/test_reporting_hyperv.py
index 35ab0c58..35ab0c58 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/reporting/test_reporting_hyperv.py
diff --git a/tests/unittests/reporting/test_webhook_handler.py b/tests/unittests/reporting/test_webhook_handler.py
new file mode 100644
index 00000000..bef457c7
--- /dev/null
+++ b/tests/unittests/reporting/test_webhook_handler.py
@@ -0,0 +1,120 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import time
+from contextlib import suppress
+from unittest.mock import PropertyMock
+
+import pytest
+import responses
+
+from cloudinit.reporting import flush_events
+from cloudinit.reporting.events import report_start_event
+from cloudinit.reporting.handlers import WebHookHandler
+
+
+class TestWebHookHandler:
+ @pytest.fixture(autouse=True)
+ def setup(self, mocker):
+ handler = WebHookHandler(endpoint="http://localhost")
+ m_registered_items = mocker.patch(
+ "cloudinit.registry.DictRegistry.registered_items",
+ new_callable=PropertyMock,
+ )
+ m_registered_items.return_value = {"webhook": handler}
+
+ @responses.activate
+ def test_webhook_handler(self, caplog):
+ """Test the happy path."""
+ responses.add(responses.POST, "http://localhost", status=200)
+ report_start_event("name", "description")
+ flush_events()
+ assert 1 == caplog.text.count(
+ "Read from http://localhost (200, 0b) after 1 attempts"
+ )
+
+ @responses.activate
+ def test_404(self, caplog):
+ """Test failure"""
+ responses.add(responses.POST, "http://localhost", status=404)
+ report_start_event("name", "description")
+ flush_events()
+ assert 1 == caplog.text.count("Failed posting event")
+
+ @responses.activate
+ def test_background_processing(self, caplog):
+ """Test that processing happens in background.
+
+ In the non-flush case, ensure that the event is still posted.
+ Since the event is posted in the background, wait while looping.
+ """
+ responses.add(responses.POST, "http://localhost", status=200)
+ report_start_event("name", "description")
+ start_time = time.time()
+ while time.time() - start_time < 3:
+ with suppress(AssertionError):
+ assert (
+ "Read from http://localhost (200, 0b) after 1 attempts"
+ in caplog.text
+ )
+ break
+ else:
+ pytest.fail("Never got expected log message")
+
+ @responses.activate
+ @pytest.mark.parametrize(
+ "num_failures,expected_log_count,expected_cancel",
+ [(2, 2, False), (3, 3, True), (50, 3, True)],
+ )
+ def test_failures_cancel_flush(
+ self, caplog, num_failures, expected_log_count, expected_cancel
+ ):
+ """Test that too many failures will cancel further processing on flush.
+
+ 2 messages should not cancel on flush
+ 3 or more should cancel on flush
+ The number of received messages will be based on how many have
+ been processed before the flush was initiated.
+ """
+ responses.add(responses.POST, "http://localhost", status=404)
+ for _ in range(num_failures):
+ report_start_event("name", "description")
+ flush_events()
+ # Force a context switch. Without this, it's possible that the
+ # expected log message hasn't made it to the log file yet
+ time.sleep(0.01)
+
+ # If we've pushed a bunch of messages, any number could have been
+ # processed before we get to the flush.
+ assert (
+ expected_log_count
+ <= caplog.text.count("Failed posting event")
+ <= num_failures
+ )
+ cancelled_message = (
+ "Multiple consecutive failures in WebHookHandler. "
+ "Cancelling all queued events"
+ )
+ if expected_cancel:
+ assert cancelled_message in caplog.text
+ else:
+ assert cancelled_message not in caplog.text
+
+ @responses.activate
+ def test_multiple_failures_no_flush(self, caplog):
+ """Test we don't cancel posting if flush hasn't been requested.
+
+ Since processing happens in the background, wait in a loop
+ for all messages to be posted
+ """
+ responses.add(responses.POST, "http://localhost", status=404)
+ for _ in range(20):
+ report_start_event("name", "description")
+ start_time = time.time()
+ while time.time() - start_time < 3:
+ with suppress(AssertionError):
+ assert 20 == caplog.text.count("Failed posting event")
+ break
+ else:
+ pytest.fail(
+ "Expected 20 failures, only got "
+ f"{caplog.text.count('Failed posting event')}"
+ )
diff --git a/tests/unittests/sources/test_aliyun.py b/tests/unittests/sources/test_aliyun.py
index 8a61d5ee..e628dc02 100644
--- a/tests/unittests/sources/test_aliyun.py
+++ b/tests/unittests/sources/test_aliyun.py
@@ -149,7 +149,7 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase):
def _test_host_name(self):
self.assertEqual(
- self.default_metadata["hostname"], self.ds.get_hostname()
+ self.default_metadata["hostname"], self.ds.get_hostname().hostname
)
@mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun")
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index b7dae873..b1edf1f3 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -12,7 +12,6 @@ from pathlib import Path
import httpretty
import pytest
import requests
-import yaml
from cloudinit import distros, helpers, subp, url_helper
from cloudinit.net import dhcp
@@ -23,7 +22,6 @@ from cloudinit.sources.helpers import netlink
from cloudinit.util import (
MountFailedError,
b64e,
- decode_binary,
json_dumps,
load_file,
load_json,
@@ -87,6 +85,25 @@ def mock_azure_report_failure_to_fabric():
@pytest.fixture
+def mock_device_driver():
+ with mock.patch(
+ MOCKPATH + "device_driver",
+ autospec=True,
+ return_value=None,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_generate_fallback_config():
+ with mock.patch(
+ MOCKPATH + "net.generate_fallback_config",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
def mock_time():
with mock.patch(
MOCKPATH + "time",
@@ -122,7 +139,7 @@ def mock_ephemeral_dhcp_v4():
@pytest.fixture
def mock_net_dhcp_maybe_perform_dhcp_discovery():
with mock.patch(
- "cloudinit.net.dhcp.maybe_perform_dhcp_discovery",
+ "cloudinit.net.ephemeral.maybe_perform_dhcp_discovery",
return_value=[
{
"unknown-245": "0a:0b:0c:0d",
@@ -140,7 +157,7 @@ def mock_net_dhcp_maybe_perform_dhcp_discovery():
@pytest.fixture
def mock_net_dhcp_EphemeralIPv4Network():
with mock.patch(
- "cloudinit.net.dhcp.EphemeralIPv4Network",
+ "cloudinit.net.ephemeral.EphemeralIPv4Network",
autospec=True,
) as m:
yield m
@@ -279,83 +296,101 @@ def patched_markers_dir_path(tmpdir):
@pytest.fixture
-def patched_reported_ready_marker_path(patched_markers_dir_path):
+def patched_reported_ready_marker_path(azure_ds, patched_markers_dir_path):
reported_ready_marker = patched_markers_dir_path / "reported_ready"
- with mock.patch(
- MOCKPATH + "REPORTED_READY_MARKER_FILE", str(reported_ready_marker)
+ with mock.patch.object(
+ azure_ds, "_reported_ready_marker_file", str(reported_ready_marker)
):
yield reported_ready_marker
-def construct_valid_ovf_env(
- data=None, pubkeys=None, userdata=None, platform_settings=None
+def construct_ovf_env(
+ *,
+ custom_data=None,
+ hostname="test-host",
+ username="test-user",
+ password=None,
+ public_keys=None,
+ disable_ssh_password_auth=None,
+ preprovisioned_vm=None,
+ preprovisioned_vm_type=None,
):
- if data is None:
- data = {"HostName": "FOOHOST"}
- if pubkeys is None:
- pubkeys = {}
-
- content = """<?xml version="1.0" encoding="utf-8"?>
-<Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
- xmlns:wa="http://schemas.microsoft.com/windowsazure"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
-
- <wa:ProvisioningSection><wa:Version>1.0</wa:Version>
- <LinuxProvisioningConfigurationSet
- xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
- """
- for key, dval in data.items():
- if isinstance(dval, dict):
- val = dict(dval).get("text")
- attrs = " " + " ".join(
- [
- "%s='%s'" % (k, v)
- for k, v in dict(dval).items()
- if k != "text"
- ]
- )
- else:
- val = dval
- attrs = ""
- content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
-
- if userdata:
- content += "<UserData>%s</UserData>\n" % (b64e(userdata))
-
- if pubkeys:
- content += "<SSH><PublicKeys>\n"
- for fp, path, value in pubkeys:
- content += " <PublicKey>"
- if fp and path:
- content += "<Fingerprint>%s</Fingerprint><Path>%s</Path>" % (
- fp,
- path,
- )
- if value:
- content += "<Value>%s</Value>" % value
- content += "</PublicKey>\n"
- content += "</PublicKeys></SSH>"
- content += """
- </LinuxProvisioningConfigurationSet>
- </wa:ProvisioningSection>
- <wa:PlatformSettingsSection><wa:Version>1.0</wa:Version>
- <PlatformSettings xmlns="http://schemas.microsoft.com/windowsazure"
- xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
- <KmsServerHostname>kms.core.windows.net</KmsServerHostname>
- <ProvisionGuestAgent>false</ProvisionGuestAgent>
- <GuestAgentPackageName i:nil="true" />"""
- if platform_settings:
- for k, v in platform_settings.items():
- content += "<%s>%s</%s>\n" % (k, v, k)
- if "PreprovisionedVMType" not in platform_settings:
- content += """<PreprovisionedVMType i:nil="true" />"""
- content += """</PlatformSettings></wa:PlatformSettingsSection>
-</Environment>"""
-
- return content
+ content = [
+ '<?xml version="1.0" encoding="utf-8"?>',
+ '<ns0:Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"',
+ 'xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"',
+ 'xmlns:ns1="http://schemas.microsoft.com/windowsazure"',
+ 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">',
+ "<ns1:ProvisioningSection>",
+ "<ns1:Version>1.0</ns1:Version>",
+ "<ns1:LinuxProvisioningConfigurationSet>",
+ "<ns1:ConfigurationSetType>"
+ "LinuxProvisioningConfiguration"
+ "</ns1:ConfigurationSetType>",
+ ]
+ if hostname is not None:
+ content.append("<ns1:HostName>%s</ns1:HostName>" % hostname)
+ if username is not None:
+ content.append("<ns1:UserName>%s</ns1:UserName>" % username)
+ if password is not None:
+ content.append("<ns1:UserPassword>%s</ns1:UserPassword>" % password)
+ if custom_data is not None:
+ content.append(
+ "<ns1:CustomData>%s</ns1:CustomData>" % (b64e(custom_data))
+ )
+ if disable_ssh_password_auth is not None:
+ content.append(
+ "<ns1:DisableSshPasswordAuthentication>%s"
+ % str(disable_ssh_password_auth).lower()
+ + "</ns1:DisableSshPasswordAuthentication>"
+ )
+ if public_keys is not None:
+ content += ["<ns1:SSH>", "<ns1:PublicKeys>"]
+ for public_key in public_keys:
+ content.append("<ns1:PublicKey>")
+ fp = public_key.get("fingerprint")
+ if fp is not None:
+ content.append("<ns1:Fingerprint>%s</ns1:Fingerprint>" % fp)
+ path = public_key.get("path")
+ if path is not None:
+ content.append("<ns1:Path>%s</ns1:Path>" % path)
+ value = public_key.get("value")
+ if value is not None:
+ content.append("<ns1:Value>%s</ns1:Value>" % value)
+ content.append("</ns1:PublicKey>")
+ content += ["</ns1:PublicKeys>", "</ns1:SSH>"]
+ content += [
+ "</ns1:LinuxProvisioningConfigurationSet>",
+ "</ns1:ProvisioningSection>",
+ "<ns1:PlatformSettingsSection>",
+ "<ns1:Version>1.0</ns1:Version>",
+ "<ns1:PlatformSettings>",
+ "<ns1:KmsServerHostname>"
+ "kms.core.windows.net"
+ "</ns1:KmsServerHostname>",
+ "<ns1:ProvisionGuestAgent>false</ns1:ProvisionGuestAgent>",
+ '<ns1:GuestAgentPackageName xsi:nil="true" />',
+ ]
+ if preprovisioned_vm is not None:
+ content.append(
+ "<ns1:PreprovisionedVm>%s</ns1:PreprovisionedVm>"
+ % str(preprovisioned_vm).lower()
+ )
+
+ if preprovisioned_vm_type is None:
+ content.append('<ns1:PreprovisionedVMType xsi:nil="true" />')
+ else:
+ content.append(
+ "<ns1:PreprovisionedVMType>%s</ns1:PreprovisionedVMType>"
+ % preprovisioned_vm_type
+ )
+ content += [
+ "</ns1:PlatformSettings>",
+ "</ns1:PlatformSettingsSection>",
+ "</ns0:Environment>",
+ ]
+
+ return "\n".join(content)
NETWORK_METADATA = {
@@ -441,7 +476,7 @@ IMDS_NETWORK_METADATA = {
EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8"
-class TestParseNetworkConfig(CiTestCase):
+class TestNetworkConfig:
maxDiff = None
fallback_config = {
@@ -457,11 +492,8 @@ class TestParseNetworkConfig(CiTestCase):
],
}
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_single_ipv4_nic_configuration(self, m_driver):
- """parse_network_config emits dhcp on single nic with ipv4"""
+ def test_single_ipv4_nic_configuration(self, azure_ds, mock_device_driver):
+ """Network config emits dhcp on single nic with ipv4"""
expected = {
"ethernets": {
"eth0": {
@@ -474,13 +506,14 @@ class TestParseNetworkConfig(CiTestCase):
},
"version": 2,
}
- self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+ azure_ds._metadata_imds = NETWORK_METADATA
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_increases_route_metric_for_non_primary_nics(self, m_driver):
- """parse_network_config increases route-metric for each nic"""
+ assert azure_ds.network_config == expected
+
+ def test_increases_route_metric_for_non_primary_nics(
+ self, azure_ds, mock_device_driver
+ ):
+ """Network config increases route-metric for each nic"""
expected = {
"ethernets": {
"eth0": {
@@ -514,70 +547,14 @@ class TestParseNetworkConfig(CiTestCase):
third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
imds_data["network"]["interface"].append(third_intf)
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
-
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver):
- """parse_network_config emits matching ipv4 and ipv6 route-metrics."""
- expected = {
- "ethernets": {
- "eth0": {
- "addresses": ["10.0.0.5/24", "2001:dead:beef::2/128"],
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 100},
- "dhcp6": True,
- "dhcp6-overrides": {"route-metric": 100},
- "match": {"macaddress": "00:0d:3a:04:75:98"},
- "set-name": "eth0",
- },
- "eth1": {
- "set-name": "eth1",
- "match": {"macaddress": "22:0d:3a:04:75:98"},
- "dhcp4": True,
- "dhcp6": False,
- "dhcp4-overrides": {"route-metric": 200},
- },
- "eth2": {
- "set-name": "eth2",
- "match": {"macaddress": "33:0d:3a:04:75:98"},
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 300},
- "dhcp6": True,
- "dhcp6-overrides": {"route-metric": 300},
- },
- },
- "version": 2,
- }
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data["network"]["interface"][0]
- nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
+ azure_ds._metadata_imds = imds_data
- nic1["ipv6"] = {
- "subnet": [{"address": "2001:dead:beef::16"}],
- "ipAddress": [
- {"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"},
- ],
- }
- imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
- third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
- third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
- third_intf["ipv6"] = {
- "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
- }
- imds_data["network"]["interface"].append(third_intf)
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+ assert azure_ds.network_config == expected
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver):
- """parse_network_config emits primary ipv4 as dhcp others are static"""
+ def test_ipv4_secondary_ips_will_be_static_addrs(
+ self, azure_ds, mock_device_driver
+ ):
+ """Network config emits primary ipv4 as dhcp others are static"""
expected = {
"ethernets": {
"eth0": {
@@ -600,13 +577,14 @@ class TestParseNetworkConfig(CiTestCase):
"subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
"ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
}
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+ azure_ds._metadata_imds = imds_data
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver):
- """parse_network_config emits primary ipv6 as dhcp others are static"""
+ assert azure_ds.network_config == expected
+
+ def test_ipv6_secondary_ips_will_be_static_cidrs(
+ self, azure_ds, mock_device_driver
+ ):
+ """Network config emits primary ipv6 as dhcp others are static"""
expected = {
"ethernets": {
"eth0": {
@@ -633,14 +611,13 @@ class TestParseNetworkConfig(CiTestCase):
{"privateIpAddress": "2001:dead:beef::2"},
],
}
- self.assertEqual(expected, dsaz.parse_network_config(imds_data))
+ azure_ds._metadata_imds = imds_data
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver",
- return_value="hv_netvsc",
- )
- def test_match_driver_for_netvsc(self, m_driver):
- """parse_network_config emits driver when using netvsc."""
+ assert azure_ds.network_config == expected
+
+ def test_match_driver_for_netvsc(self, azure_ds, mock_device_driver):
+ """Network config emits driver when using netvsc."""
+ mock_device_driver.return_value = "hv_netvsc"
expected = {
"ethernets": {
"eth0": {
@@ -656,16 +633,31 @@ class TestParseNetworkConfig(CiTestCase):
},
"version": 2,
}
- self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA))
+ azure_ds._metadata_imds = NETWORK_METADATA
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- @mock.patch("cloudinit.net.generate_fallback_config")
- def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata(
- self, m_fallback_config, m_driver
+ assert azure_ds.network_config == expected
+
+ def test_uses_fallback_cfg_when_apply_network_config_is_false(
+ self, azure_ds, mock_device_driver, mock_generate_fallback_config
+ ):
+ azure_ds.ds_cfg["apply_network_config"] = False
+ azure_ds._metadata_imds = NETWORK_METADATA
+ mock_generate_fallback_config.return_value = self.fallback_config
+
+ assert azure_ds.network_config == self.fallback_config
+
+ def test_uses_fallback_cfg_when_imds_metadata_unset(
+ self, azure_ds, mock_device_driver, mock_generate_fallback_config
+ ):
+ azure_ds._metadata_imds = UNSET
+ mock_generate_fallback_config.return_value = self.fallback_config
+
+ assert azure_ds.network_config == self.fallback_config
+
+ def test_uses_fallback_cfg_when_no_network_metadata(
+ self, azure_ds, mock_device_driver, mock_generate_fallback_config
):
- """parse_network_config generates fallback network config when the
+ """Network config generates fallback network config when the
IMDS instance metadata is corrupted/invalid, such as when
network metadata is not present.
"""
@@ -673,20 +665,15 @@ class TestParseNetworkConfig(CiTestCase):
NETWORK_METADATA
)
del imds_metadata_missing_network_metadata["network"]
- m_fallback_config.return_value = self.fallback_config
- self.assertEqual(
- self.fallback_config,
- dsaz.parse_network_config(imds_metadata_missing_network_metadata),
- )
+ mock_generate_fallback_config.return_value = self.fallback_config
+ azure_ds._metadata_imds = imds_metadata_missing_network_metadata
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- @mock.patch("cloudinit.net.generate_fallback_config")
- def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata(
- self, m_fallback_config, m_driver
+ assert azure_ds.network_config == self.fallback_config
+
+ def test_uses_fallback_cfg_when_no_interface_metadata(
+ self, azure_ds, mock_device_driver, mock_generate_fallback_config
):
- """parse_network_config generates fallback network config when the
+ """Network config generates fallback network config when the
IMDS instance metadata is corrupted/invalid, such as when
network interface metadata is not present.
"""
@@ -694,13 +681,10 @@ class TestParseNetworkConfig(CiTestCase):
NETWORK_METADATA
)
del imds_metadata_missing_interface_metadata["network"]["interface"]
- m_fallback_config.return_value = self.fallback_config
- self.assertEqual(
- self.fallback_config,
- dsaz.parse_network_config(
- imds_metadata_missing_interface_metadata
- ),
- )
+ mock_generate_fallback_config.return_value = self.fallback_config
+ azure_ds._metadata_imds = imds_metadata_missing_interface_metadata
+
+ assert azure_ds.network_config == self.fallback_config
class TestGetMetadataFromIMDS(HttprettyTestCase):
@@ -1201,16 +1185,15 @@ scbus-1 on xpt0 bus 0
)
def test_basic_seed_dir(self):
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(hostname="myhost"),
"sys_cfg": {},
}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, "")
- self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertEqual(dsrc.metadata["local-hostname"], "myhost")
self.assertTrue(
os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
)
@@ -1221,9 +1204,8 @@ scbus-1 on xpt0 bus 0
)
def test_data_dir_without_imds_data(self):
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(hostname="myhost"),
"sys_cfg": {},
}
dsrc = self._get_ds(
@@ -1240,7 +1222,7 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, "")
- self.assertEqual(dsrc.metadata["local-hostname"], odata["HostName"])
+ self.assertEqual(dsrc.metadata["local-hostname"], "myhost")
self.assertTrue(
os.path.isfile(os.path.join(self.waagent_d, "ovf-env.xml"))
)
@@ -1269,9 +1251,10 @@ scbus-1 on xpt0 bus 0
def test_get_data_non_ubuntu_will_not_remove_network_scripts(self):
"""get_data on non-Ubuntu will not remove ubuntu net scripts."""
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(
+ hostname="myhost", username="myuser"
+ ),
"sys_cfg": {},
}
@@ -1282,9 +1265,8 @@ scbus-1 on xpt0 bus 0
def test_get_data_on_ubuntu_will_remove_network_scripts(self):
"""get_data will remove ubuntu net scripts on Ubuntu distro."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
@@ -1295,9 +1277,8 @@ scbus-1 on xpt0 bus 0
def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self):
"""When apply_network_config false, do not remove scripts on Ubuntu."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
@@ -1307,28 +1288,19 @@ scbus-1 on xpt0 bus 0
def test_crawl_metadata_returns_structured_data_and_caches_nothing(self):
"""Return all structured metadata and cache no class attributes."""
- yaml_cfg = ""
- odata = {
- "HostName": "myhost",
- "UserName": "myuser",
- "UserData": {"text": "FOOBAR", "encoding": "plain"},
- "dscfg": {"text": yaml_cfg, "encoding": "plain"},
- }
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(
+ hostname="myhost", username="myuser", custom_data="FOOBAR"
+ ),
"sys_cfg": {},
}
dsrc = self._get_ds(data)
expected_cfg = {
"PreprovisionedVMType": None,
"PreprovisionedVm": False,
- "datasource": {"Azure": {}},
"system_info": {"default_user": {"name": "myuser"}},
}
expected_metadata = {
- "azure_data": {
- "configurationsettype": "LinuxProvisioningConfiguration"
- },
"imds": NETWORK_METADATA,
"instance-id": EXAMPLE_UUID,
"local-hostname": "myhost",
@@ -1346,11 +1318,11 @@ scbus-1 on xpt0 bus 0
list(crawled_metadata["files"].keys()), ["ovf-env.xml"]
)
self.assertIn(
- b"<HostName>myhost</HostName>",
+ b"<ns1:HostName>myhost</ns1:HostName>",
crawled_metadata["files"]["ovf-env.xml"],
)
self.assertEqual(crawled_metadata["metadata"], expected_metadata)
- self.assertEqual(crawled_metadata["userdata_raw"], "FOOBAR")
+ self.assertEqual(crawled_metadata["userdata_raw"], b"FOOBAR")
self.assertEqual(dsrc.userdata_raw, None)
self.assertEqual(dsrc.metadata, {})
self.assertEqual(dsrc._metadata_imds, UNSET)
@@ -1372,9 +1344,7 @@ scbus-1 on xpt0 bus 0
def test_crawl_metadata_call_imds_once_no_reprovision(self):
"""If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "False"}
- )
+ ovfenv = construct_ovf_env(preprovisioned_vm=False)
data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
@@ -1390,9 +1360,7 @@ scbus-1 on xpt0 bus 0
self, poll_imds_func, m_report_ready, m_write
):
"""If reprovisioning, imds metadata will be fetched twice"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
+ ovfenv = construct_ovf_env(preprovisioned_vm=True)
data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
@@ -1409,9 +1377,7 @@ scbus-1 on xpt0 bus 0
self, poll_imds_func, m_report_ready, m_write
):
"""If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
+ ovfenv = construct_ovf_env(preprovisioned_vm=True)
data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
@@ -1432,11 +1398,8 @@ scbus-1 on xpt0 bus 0
self, detect_nics, poll_imds_func, report_ready_func, m_write
):
"""If reprovisioning, report ready at the end"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={
- "PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True",
- }
+ ovfenv = construct_ovf_env(
+ preprovisioned_vm=True, preprovisioned_vm_type="Savable"
)
data = {"ovfcontent": ovfenv, "sys_cfg": {}}
@@ -1459,9 +1422,7 @@ scbus-1 on xpt0 bus 0
self, m_readurl, m_report_ready, m_media_switch, m_write
):
"""If reprovisioning, report ready using the obtained lease"""
- ovfenv = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
+ ovfenv = construct_ovf_env(preprovisioned_vm=True)
data = {"ovfcontent": ovfenv, "sys_cfg": {}}
dsrc = self._get_ds(data)
@@ -1476,7 +1437,7 @@ scbus-1 on xpt0 bus 0
self.m_dhcp.return_value.obtain_lease.return_value = lease
m_media_switch.return_value = None
- reprovision_ovfenv = construct_valid_ovf_env()
+ reprovision_ovfenv = construct_ovf_env()
m_readurl.return_value = url_helper.StringResponse(
reprovision_ovfenv.encode("utf-8")
)
@@ -1490,7 +1451,7 @@ scbus-1 on xpt0 bus 0
def test_waagent_d_has_0700_perms(self):
# we expect /var/lib/waagent to be created 0700
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertTrue(os.path.isdir(self.waagent_d))
@@ -1502,9 +1463,8 @@ scbus-1 on xpt0 bus 0
def test_network_config_set_from_imds(self, m_driver):
"""Datasource.network_config returns IMDS network data."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
expected_network_config = {
@@ -1531,9 +1491,8 @@ scbus-1 on xpt0 bus 0
):
"""Datasource.network_config adds route-metric to secondary nics."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
expected_network_config = {
@@ -1583,9 +1542,8 @@ scbus-1 on xpt0 bus 0
):
"""If an IP address is empty then there should no config for it."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
expected_network_config = {
@@ -1610,9 +1568,8 @@ scbus-1 on xpt0 bus 0
def test_availability_zone_set_from_imds(self):
"""Datasource.availability returns IMDS platformFaultDomain."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -1622,9 +1579,8 @@ scbus-1 on xpt0 bus 0
def test_region_set_from_imds(self):
"""Datasource.region returns IMDS region location."""
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -1638,7 +1594,7 @@ scbus-1 on xpt0 bus 0
}
}
data = {
- "ovfcontent": construct_valid_ovf_env(data={}),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
@@ -1651,8 +1607,7 @@ scbus-1 on xpt0 bus 0
)
def test_username_used(self):
- odata = {"HostName": "myhost", "UserName": "myuser"}
- data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+ data = {"ovfcontent": construct_ovf_env(username="myuser")}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1661,13 +1616,14 @@ scbus-1 on xpt0 bus 0
dsrc.cfg["system_info"]["default_user"]["name"], "myuser"
)
+ assert "ssh_pwauth" not in dsrc.cfg
+
def test_password_given(self):
- odata = {
- "HostName": "myhost",
- "UserName": "myuser",
- "UserPassword": "mypass",
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser", password="mypass"
+ )
}
- data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1676,7 +1632,7 @@ scbus-1 on xpt0 bus 0
defuser = dsrc.cfg["system_info"]["default_user"]
# default user should be updated username and should not be locked.
- self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertEqual(defuser["name"], "myuser")
self.assertFalse(defuser["lock_passwd"])
# passwd is crypt formated string $id$salt$encrypted
# encrypting plaintext with salt value of everything up to final '$'
@@ -1684,19 +1640,102 @@ scbus-1 on xpt0 bus 0
pos = defuser["passwd"].rfind("$") + 1
self.assertEqual(
defuser["passwd"],
- crypt.crypt(odata["UserPassword"], defuser["passwd"][0:pos]),
+ crypt.crypt("mypass", defuser["passwd"][0:pos]),
)
# the same hashed value should also be present in cfg['password']
self.assertEqual(defuser["passwd"], dsrc.cfg["password"])
+ assert dsrc.cfg["ssh_pwauth"] is True
+
+ def test_password_with_disable_ssh_pw_auth_true(self):
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ password="mypass",
+ disable_ssh_password_auth=True,
+ )
+ }
+
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ assert dsrc.cfg["ssh_pwauth"] is False
+
+ def test_password_with_disable_ssh_pw_auth_false(self):
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ password="mypass",
+ disable_ssh_password_auth=False,
+ )
+ }
+
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ assert dsrc.cfg["ssh_pwauth"] is True
+
+ def test_password_with_disable_ssh_pw_auth_unspecified(self):
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ password="mypass",
+ disable_ssh_password_auth=None,
+ )
+ }
+
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ assert dsrc.cfg["ssh_pwauth"] is True
+
+ def test_no_password_with_disable_ssh_pw_auth_true(self):
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ disable_ssh_password_auth=True,
+ )
+ }
+
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ assert dsrc.cfg["ssh_pwauth"] is False
+
+ def test_no_password_with_disable_ssh_pw_auth_false(self):
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ disable_ssh_password_auth=False,
+ )
+ }
+
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ assert dsrc.cfg["ssh_pwauth"] is True
+
+ def test_no_password_with_disable_ssh_pw_auth_unspecified(self):
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ disable_ssh_password_auth=None,
+ )
+ }
+
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ assert "ssh_pwauth" not in dsrc.cfg
+
def test_user_not_locked_if_password_redacted(self):
- odata = {
- "HostName": "myhost",
- "UserName": "myuser",
- "UserPassword": dsaz.DEF_PASSWD_REDACTION,
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser",
+ password=dsaz.DEF_PASSWD_REDACTION,
+ )
}
- data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1705,24 +1744,13 @@ scbus-1 on xpt0 bus 0
defuser = dsrc.cfg["system_info"]["default_user"]
# default user should be updated username and should not be locked.
- self.assertEqual(defuser["name"], odata["UserName"])
+ self.assertEqual(defuser["name"], "myuser")
self.assertIn("lock_passwd", defuser)
self.assertFalse(defuser["lock_passwd"])
- def test_userdata_plain(self):
- mydata = "FOOBAR"
- odata = {"UserData": {"text": mydata, "encoding": "plain"}}
- data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
-
def test_userdata_found(self):
mydata = "FOOBAR"
- odata = {"UserData": {"text": b64e(mydata), "encoding": "base64"}}
- data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
+ data = {"ovfcontent": construct_ovf_env(custom_data=mydata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1731,9 +1759,8 @@ scbus-1 on xpt0 bus 0
def test_default_ephemeral_configs_ephemeral_exists(self):
# make sure the ephemeral configs are correct if disk present
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": {},
}
@@ -1761,9 +1788,8 @@ scbus-1 on xpt0 bus 0
def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
# make sure the ephemeral configs are correct if disk not present
- odata = {}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": {},
}
@@ -1783,34 +1809,9 @@ scbus-1 on xpt0 bus 0
assert "disk_setup" not in cfg
assert "fs_setup" not in cfg
- def test_provide_disk_aliases(self):
- # Make sure that user can affect disk aliases
- dscfg = {"disk_aliases": {"ephemeral0": "/dev/sdc"}}
- odata = {
- "HostName": "myhost",
- "UserName": "myuser",
- "dscfg": {"text": b64e(yaml.dump(dscfg)), "encoding": "base64"},
- }
- usercfg = {
- "disk_setup": {
- "/dev/sdc": {"something": "..."},
- "ephemeral0": False,
- }
- }
- userdata = "#cloud-config" + yaml.dump(usercfg) + "\n"
-
- ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
- data = {"ovfcontent": ovfcontent, "sys_cfg": {}}
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
- self.assertTrue(cfg)
-
def test_userdata_arrives(self):
userdata = "This is my user-data"
- xml = construct_valid_ovf_env(data={}, userdata=userdata)
+ xml = construct_ovf_env(custom_data=userdata)
data = {"ovfcontent": xml}
dsrc = self._get_ds(data)
dsrc.get_data()
@@ -1818,12 +1819,11 @@ scbus-1 on xpt0 bus 0
self.assertEqual(userdata.encode("us-ascii"), dsrc.userdata_raw)
def test_password_redacted_in_ovf(self):
- odata = {
- "HostName": "myhost",
- "UserName": "myuser",
- "UserPassword": "mypass",
+ data = {
+ "ovfcontent": construct_ovf_env(
+ username="myuser", password="mypass"
+ )
}
- data = {"ovfcontent": construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
ret = dsrc.get_data()
@@ -1846,7 +1846,7 @@ scbus-1 on xpt0 bus 0
self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text)
def test_ovf_env_arrives_in_waagent_dir(self):
- xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
+ xml = construct_ovf_env(custom_data="FOODATA")
dsrc = self._get_ds({"ovfcontent": xml})
dsrc.get_data()
@@ -1857,18 +1857,18 @@ scbus-1 on xpt0 bus 0
self.xml_equals(xml, load_file(ovf_env_path))
def test_ovf_can_include_unicode(self):
- xml = construct_valid_ovf_env(data={})
+ xml = construct_ovf_env()
xml = "\ufeff{0}".format(xml)
dsrc = self._get_ds({"ovfcontent": xml})
dsrc.get_data()
def test_dsaz_report_ready_returns_true_when_report_succeeds(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
assert dsrc._report_ready() == []
@mock.patch(MOCKPATH + "report_diagnostic_event")
def test_dsaz_report_ready_failure_reports_telemetry(self, m_report_diag):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
self.m_get_metadata_from_fabric.side_effect = Exception("foo")
with pytest.raises(Exception):
@@ -1883,7 +1883,7 @@ scbus-1 on xpt0 bus 0
]
def test_dsaz_report_failure_returns_true_when_report_succeeds(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
# mock crawl metadata failure to cause report failure
@@ -1895,7 +1895,7 @@ scbus-1 on xpt0 bus 0
def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc(
self,
):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(
dsrc, "crawl_metadata"
@@ -1923,7 +1923,7 @@ scbus-1 on xpt0 bus 0
self.assertEqual(2, self.m_report_failure_to_fabric.call_count)
def test_dsaz_report_failure_description_msg(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
# mock crawl metadata failure to cause report failure
@@ -1936,7 +1936,7 @@ scbus-1 on xpt0 bus 0
)
def test_dsaz_report_failure_no_description_msg(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
m_crawl_metadata.side_effect = Exception
@@ -1947,7 +1947,7 @@ scbus-1 on xpt0 bus 0
)
def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(
dsrc, "crawl_metadata"
@@ -1965,7 +1965,7 @@ scbus-1 on xpt0 bus 0
)
def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
# mock crawl metadata failure to cause report failure
@@ -1988,13 +1988,13 @@ scbus-1 on xpt0 bus 0
def test_exception_fetching_fabric_data_doesnt_propagate(self):
"""Errors communicating with fabric should warn, but return True."""
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
self.m_get_metadata_from_fabric.side_effect = Exception
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
def test_fabric_data_included_in_metadata(self):
- dsrc = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ dsrc = self._get_ds({"ovfcontent": construct_ovf_env()})
self.m_get_metadata_from_fabric.return_value = ["ssh-key-value"]
ret = self._get_and_setup(dsrc)
self.assertTrue(ret)
@@ -2006,7 +2006,7 @@ scbus-1 on xpt0 bus 0
upper_iid = EXAMPLE_UUID.upper()
# lowercase current UUID
ds = self._get_ds(
- {"ovfcontent": construct_valid_ovf_env()}, instance_id=lower_iid
+ {"ovfcontent": construct_ovf_env()}, instance_id=lower_iid
)
# UPPERCASE previous
write_file(
@@ -2018,7 +2018,7 @@ scbus-1 on xpt0 bus 0
# UPPERCASE current UUID
ds = self._get_ds(
- {"ovfcontent": construct_valid_ovf_env()}, instance_id=upper_iid
+ {"ovfcontent": construct_ovf_env()}, instance_id=upper_iid
)
# lowercase previous
write_file(
@@ -2030,7 +2030,7 @@ scbus-1 on xpt0 bus 0
def test_instance_id_endianness(self):
"""Return the previous iid when dmi uuid is the byteswapped iid."""
- ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds = self._get_ds({"ovfcontent": construct_ovf_env()})
# byte-swapped previous
write_file(
os.path.join(self.paths.cloud_dir, "data", "instance-id"),
@@ -2049,12 +2049,12 @@ scbus-1 on xpt0 bus 0
self.assertEqual(self.instance_id, ds.metadata["instance-id"])
def test_instance_id_from_dmidecode_used(self):
- ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds = self._get_ds({"ovfcontent": construct_ovf_env()})
ds.get_data()
self.assertEqual(self.instance_id, ds.metadata["instance-id"])
def test_instance_id_from_dmidecode_used_for_builtin(self):
- ds = self._get_ds({"ovfcontent": construct_valid_ovf_env()})
+ ds = self._get_ds({"ovfcontent": construct_ovf_env()})
ds.get_data()
self.assertEqual(self.instance_id, ds.metadata["instance-id"])
@@ -2080,126 +2080,12 @@ scbus-1 on xpt0 bus 0
[mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list
)
- @mock.patch(
- "cloudinit.sources.DataSourceAzure.device_driver", return_value=None
- )
- @mock.patch("cloudinit.net.generate_fallback_config")
- def test_imds_network_config(self, mock_fallback, m_driver):
- """Network config is generated from IMDS network data when present."""
- sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
- data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
- "sys_cfg": sys_cfg,
- }
-
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- expected_cfg = {
- "ethernets": {
- "eth0": {
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 100},
- "dhcp6": False,
- "match": {"macaddress": "00:0d:3a:04:75:98"},
- "set-name": "eth0",
- }
- },
- "version": 2,
- }
-
- self.assertEqual(expected_cfg, dsrc.network_config)
- mock_fallback.assert_not_called()
-
- @mock.patch("cloudinit.net.get_interface_mac")
- @mock.patch("cloudinit.net.get_devicelist")
- @mock.patch("cloudinit.net.device_driver")
- @mock.patch("cloudinit.net.generate_fallback_config")
- def test_imds_network_ignored_when_apply_network_config_false(
- self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
- ):
- """When apply_network_config is False, use fallback instead of IMDS."""
- sys_cfg = {"datasource": {"Azure": {"apply_network_config": False}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
- data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
- "sys_cfg": sys_cfg,
- }
- fallback_config = {
- "version": 1,
- "config": [
- {
- "type": "physical",
- "name": "eth0",
- "mac_address": "00:11:22:33:44:55",
- "params": {"driver": "hv_netsvc"},
- "subnets": [{"type": "dhcp"}],
- }
- ],
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ["eth0"]
- mock_dd.return_value = ["hv_netsvc"]
- mock_get_mac.return_value = "00:11:22:33:44:55"
-
- dsrc = self._get_ds(data)
- self.assertTrue(dsrc.get_data())
- self.assertEqual(dsrc.network_config, fallback_config)
-
- @mock.patch("cloudinit.net.get_interface_mac")
- @mock.patch("cloudinit.net.get_devicelist")
- @mock.patch("cloudinit.net.device_driver")
- @mock.patch("cloudinit.net.generate_fallback_config", autospec=True)
- def test_fallback_network_config(
- self, mock_fallback, mock_dd, mock_devlist, mock_get_mac
- ):
- """On absent IMDS network data, generate network fallback config."""
- odata = {"HostName": "myhost", "UserName": "myuser"}
- data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
- "sys_cfg": {},
- }
-
- fallback_config = {
- "version": 1,
- "config": [
- {
- "type": "physical",
- "name": "eth0",
- "mac_address": "00:11:22:33:44:55",
- "params": {"driver": "hv_netsvc"},
- "subnets": [{"type": "dhcp"}],
- }
- ],
- }
- mock_fallback.return_value = fallback_config
-
- mock_devlist.return_value = ["eth0"]
- mock_dd.return_value = ["hv_netsvc"]
- mock_get_mac.return_value = "00:11:22:33:44:55"
-
- dsrc = self._get_ds(data)
- # Represent empty response from network imds
- self.m_get_metadata_from_imds.return_value = {}
- ret = dsrc.get_data()
- self.assertTrue(ret)
-
- netconfig = dsrc.network_config
- self.assertEqual(netconfig, fallback_config)
- mock_fallback.assert_called_with(
- blacklist_drivers=["mlx4_core", "mlx5_core"], config_driver=True
- )
-
@mock.patch(MOCKPATH + "net.get_interfaces", autospec=True)
def test_blacklist_through_distro(self, m_net_get_interfaces):
"""Verify Azure DS updates blacklist drivers in the distro's
networking object."""
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": {},
}
@@ -2221,9 +2107,8 @@ scbus-1 on xpt0 bus 0
)
def test_get_public_ssh_keys_with_imds(self, m_parse_certificates):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -2256,9 +2141,8 @@ scbus-1 on xpt0 bus 0
imds_data["compute"]["publicKeys"][0]["keyData"] = "no-openssh-format"
m_get_metadata_from_imds.return_value = imds_data
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -2272,9 +2156,8 @@ scbus-1 on xpt0 bus 0
def test_get_public_ssh_keys_without_imds(self, m_get_metadata_from_imds):
m_get_metadata_from_imds.return_value = dict()
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -2295,9 +2178,8 @@ scbus-1 on xpt0 bus 0
m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -2326,9 +2208,8 @@ scbus-1 on xpt0 bus 0
)
def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
dsrc = self._get_ds(data)
@@ -2348,9 +2229,8 @@ scbus-1 on xpt0 bus 0
@mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_hostname_from_imds(self, m_get_metadata_from_imds):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
@@ -2367,9 +2247,8 @@ scbus-1 on xpt0 bus 0
@mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_username_from_imds(self, m_get_metadata_from_imds):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
@@ -2388,9 +2267,8 @@ scbus-1 on xpt0 bus 0
@mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_disable_password_from_imds(self, m_get_metadata_from_imds):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA)
@@ -2407,9 +2285,8 @@ scbus-1 on xpt0 bus 0
@mock.patch(MOCKPATH + "get_metadata_from_imds")
def test_userdata_from_imds(self, m_get_metadata_from_imds):
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
- odata = {"HostName": "myhost", "UserName": "myuser"}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(),
"sys_cfg": sys_cfg,
}
userdata = "userdataImds"
@@ -2431,14 +2308,9 @@ scbus-1 on xpt0 bus 0
self, m_get_metadata_from_imds
):
userdataOVF = "userdataOVF"
- odata = {
- "HostName": "myhost",
- "UserName": "myuser",
- "UserData": {"text": b64e(userdataOVF), "encoding": "base64"},
- }
sys_cfg = {"datasource": {"Azure": {"apply_network_config": True}}}
data = {
- "ovfcontent": construct_valid_ovf_env(data=odata),
+ "ovfcontent": construct_ovf_env(custom_data=userdataOVF),
"sys_cfg": sys_cfg,
}
@@ -2487,18 +2359,17 @@ class TestLoadAzureDsDir(CiTestCase):
class TestReadAzureOvf(CiTestCase):
def test_invalid_xml_raises_non_azure_ds(self):
- invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
+ invalid_xml = "<foo>" + construct_ovf_env()
self.assertRaises(
dsaz.BrokenAzureDataSource, dsaz.read_azure_ovf, invalid_xml
)
def test_load_with_pubkeys(self):
- mypklist = [{"fingerprint": "fp1", "path": "path1", "value": ""}]
- pubkeys = [(x["fingerprint"], x["path"], x["value"]) for x in mypklist]
- content = construct_valid_ovf_env(pubkeys=pubkeys)
+ public_keys = [{"fingerprint": "fp1", "path": "path1", "value": ""}]
+ content = construct_ovf_env(public_keys=public_keys)
(_md, _ud, cfg) = dsaz.read_azure_ovf(content)
- for mypk in mypklist:
- self.assertIn(mypk, cfg["_pubkeys"])
+ for pk in public_keys:
+ self.assertIn(pk, cfg["_pubkeys"])
class TestCanDevBeReformatted(CiTestCase):
@@ -2866,9 +2737,7 @@ class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
def test_read_azure_ovf_with_true_flag(self):
"""The read_azure_ovf method should set the PreprovisionedVM
cfg flag if the proper setting is present."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "True"}
- )
+ content = construct_ovf_env(preprovisioned_vm=True)
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
self.assertTrue(cfg["PreprovisionedVm"])
@@ -2876,9 +2745,7 @@ class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
def test_read_azure_ovf_with_false_flag(self):
"""The read_azure_ovf method should set the PreprovisionedVM
cfg flag to false if the proper setting is false."""
- content = construct_valid_ovf_env(
- platform_settings={"PreprovisionedVm": "False"}
- )
+ content = construct_ovf_env(preprovisioned_vm=False)
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
self.assertFalse(cfg["PreprovisionedVm"])
@@ -2886,7 +2753,7 @@ class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
def test_read_azure_ovf_without_flag(self):
"""The read_azure_ovf method should not set the
PreprovisionedVM cfg flag."""
- content = construct_valid_ovf_env()
+ content = construct_ovf_env()
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
self.assertFalse(cfg["PreprovisionedVm"])
@@ -2895,11 +2762,8 @@ class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
def test_read_azure_ovf_with_running_type(self):
"""The read_azure_ovf method should set PreprovisionedVMType
cfg flag to Running."""
- content = construct_valid_ovf_env(
- platform_settings={
- "PreprovisionedVMType": "Running",
- "PreprovisionedVm": "True",
- }
+ content = construct_ovf_env(
+ preprovisioned_vm=True, preprovisioned_vm_type="Running"
)
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
@@ -2909,11 +2773,8 @@ class TestPreprovisioningReadAzureOvfFlag(CiTestCase):
def test_read_azure_ovf_with_savable_type(self):
"""The read_azure_ovf method should set PreprovisionedVMType
cfg flag to Savable."""
- content = construct_valid_ovf_env(
- platform_settings={
- "PreprovisionedVMType": "Savable",
- "PreprovisionedVm": "True",
- }
+ content = construct_ovf_env(
+ preprovisioned_vm=True, preprovisioned_vm_type="Savable"
)
ret = dsaz.read_azure_ovf(content)
cfg = ret[2]
@@ -2997,7 +2858,7 @@ class TestDeterminePPSTypeScenarios:
== dsaz.PPSType.UNKNOWN
)
assert is_file.mock_calls == [
- mock.call(dsaz.REPORTED_READY_MARKER_FILE)
+ mock.call(azure_ds._reported_ready_marker_file)
]
@@ -3014,10 +2875,7 @@ class TestReprovision(CiTestCase):
def test_reprovision_calls__poll_imds(self, _poll_imds, isfile):
"""_reprovision will poll IMDS."""
isfile.return_value = False
- hostname = "myhost"
- username = "myuser"
- odata = {"HostName": hostname, "UserName": username}
- _poll_imds.return_value = construct_valid_ovf_env(data=odata)
+ _poll_imds.return_value = construct_ovf_env()
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
dsa._reprovision()
_poll_imds.assert_called_with()
@@ -3053,7 +2911,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
self.assertEqual(1, m_detach.call_count)
self.assertEqual(1, m_writefile.call_count)
m_writefile.assert_called_with(
- dsaz.REPORTED_READY_MARKER_FILE, mock.ANY
+ dsa._reported_ready_marker_file, mock.ANY
)
@mock.patch(MOCKPATH + "util.write_file", autospec=True)
@@ -3231,8 +3089,8 @@ class TestPreprovisioningHotAttachNics(CiTestCase):
@mock.patch("cloudinit.net.find_fallback_nic", return_value="eth9")
-@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
-@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
+@mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
@mock.patch(
"cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
)
@@ -3288,7 +3146,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_request.side_effect = fake_timeout_once
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ with mock.patch.object(
+ dsa, "_reported_ready_marker_file", report_file
+ ):
dsa._poll_imds()
assert m_report_ready.mock_calls == [mock.call()]
@@ -3316,7 +3176,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_isfile.return_value = True
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
dsa._ephemeral_dhcp_ctx = mock.Mock(lease={})
- with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ with mock.patch.object(
+ dsa, "_reported_ready_marker_file", report_file
+ ):
dsa._poll_imds()
self.assertEqual(0, m_dhcp.call_count)
self.assertEqual(0, m_media_switch.call_count)
@@ -3353,8 +3215,8 @@ class TestPreprovisioningPollIMDS(CiTestCase):
report_file = self.tmp_path("report_marker", self.tmp)
m_isfile.return_value = True
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
- with mock.patch(
- MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file
+ with mock.patch.object(
+ dsa, "_reported_ready_marker_file", report_file
), mock.patch.object(dsa, "_ephemeral_dhcp_ctx") as m_dhcp_ctx:
m_dhcp_ctx.obtain_lease.return_value = "Dummy lease"
dsa._ephemeral_dhcp_ctx = m_dhcp_ctx
@@ -3388,7 +3250,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
]
m_media_switch.return_value = None
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
- with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ with mock.patch.object(
+ dsa, "_reported_ready_marker_file", report_file
+ ):
dsa._poll_imds()
self.assertEqual(m_report_ready.call_count, 0)
@@ -3416,7 +3280,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_media_switch.return_value = None
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ with mock.patch.object(
+ dsa, "_reported_ready_marker_file", report_file
+ ):
dsa._poll_imds()
self.assertEqual(m_report_ready.call_count, 1)
self.assertTrue(os.path.exists(report_file))
@@ -3446,7 +3312,9 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_report_ready.side_effect = [Exception("fail")]
dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
- with mock.patch(MOCKPATH + "REPORTED_READY_MARKER_FILE", report_file):
+ with mock.patch.object(
+ dsa, "_reported_ready_marker_file", report_file
+ ):
self.assertRaises(InvalidMetaDataException, dsa._poll_imds)
self.assertEqual(m_report_ready.call_count, 1)
self.assertFalse(os.path.exists(report_file))
@@ -3458,8 +3326,8 @@ class TestPreprovisioningPollIMDS(CiTestCase):
@mock.patch(
"cloudinit.sources.helpers.netlink.wait_for_media_disconnect_connect"
)
-@mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network", autospec=True)
-@mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+@mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network", autospec=True)
+@mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
@mock.patch("requests.Session.request")
class TestAzureDataSourcePreprovisioning(CiTestCase):
def setUp(self):
@@ -3535,8 +3403,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
full_url = url.format(host)
hostname = "myhost"
username = "myuser"
- odata = {"HostName": hostname, "UserName": username}
- content = construct_valid_ovf_env(data=odata)
+ content = construct_ovf_env(username=username, hostname=hostname)
m_request.return_value = mock.MagicMock(
status_code=200, text=content, content=content
)
@@ -4203,15 +4070,12 @@ class TestProvisioning:
def test_running_pps(self):
self.imds_md["extended"]["compute"]["ppsType"] = "Running"
- ovf_data = {"HostName": "myhost", "UserName": "myuser"}
nl_sock = mock.MagicMock()
self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
self.mock_readurl.side_effect = [
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
- mock.MagicMock(
- contents=construct_valid_ovf_env(data=ovf_data).encode()
- ),
+ mock.MagicMock(contents=construct_ovf_env().encode()),
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
@@ -4293,7 +4157,6 @@ class TestProvisioning:
def test_savable_pps(self):
self.imds_md["extended"]["compute"]["ppsType"] = "Savable"
- ovf_data = {"HostName": "myhost", "UserName": "myuser"}
nl_sock = mock.MagicMock()
self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
@@ -4306,9 +4169,7 @@ class TestProvisioning:
mock.MagicMock(
contents=json.dumps(self.imds_md["network"]).encode()
),
- mock.MagicMock(
- contents=construct_valid_ovf_env(data=ovf_data).encode()
- ),
+ mock.MagicMock(contents=construct_ovf_env().encode()),
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
@@ -4402,13 +4263,10 @@ class TestProvisioning:
def test_recovery_pps(self, pps_type):
self.patched_reported_ready_marker_path.write_text("")
self.imds_md["extended"]["compute"]["ppsType"] = pps_type
- ovf_data = {"HostName": "myhost", "UserName": "myuser"}
self.mock_readurl.side_effect = [
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
- mock.MagicMock(
- contents=construct_valid_ovf_env(data=ovf_data).encode()
- ),
+ mock.MagicMock(contents=construct_ovf_env().encode()),
mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
]
self.mock_azure_get_metadata_from_fabric.return_value = []
diff --git a/tests/unittests/sources/test_bigstep.py b/tests/unittests/sources/test_bigstep.py
new file mode 100644
index 00000000..148cfa0b
--- /dev/null
+++ b/tests/unittests/sources/test_bigstep.py
@@ -0,0 +1,46 @@
+import json
+import os
+
+import httpretty
+import pytest
+
+from cloudinit import helpers
+from cloudinit.sources import DataSourceBigstep as bigstep
+from tests.unittests.helpers import mock
+
+M_PATH = "cloudinit.sources.DataSourceBigstep."
+
+IMDS_URL = "http://bigstep.com"
+METADATA_BODY = json.dumps(
+ {
+ "metadata": "metadata",
+ "vendordata_raw": "vendordata_raw",
+ "userdata_raw": "userdata_raw",
+ }
+)
+
+
+class TestBigstep:
+ @httpretty.activate
+ @pytest.mark.parametrize("custom_paths", [False, True])
+ @mock.patch(M_PATH + "util.load_file", return_value=IMDS_URL)
+ def test_get_data_honor_cloud_dir(self, m_load_file, custom_paths, tmpdir):
+ httpretty.register_uri(httpretty.GET, IMDS_URL, body=METADATA_BODY)
+
+ paths = {}
+ url_file = "/var/lib/cloud/data/seed/bigstep/url"
+ if custom_paths:
+ paths = {
+ "cloud_dir": tmpdir.join("cloud"),
+ "run_dir": tmpdir,
+ "templates_dir": tmpdir,
+ }
+ url_file = os.path.join(
+ paths["cloud_dir"], "data", "seed", "bigstep", "url"
+ )
+
+ ds = bigstep.DataSourceBigstep(
+ sys_cfg={}, distro=mock.Mock(), paths=helpers.Paths(paths)
+ )
+ assert ds._get_data()
+ assert [mock.call(url_file)] == m_load_file.call_args_list
diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index 8cd58c96..b92c3723 100644
--- a/tests/unittests/sources/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -58,12 +58,14 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
def test_get_hostname(self):
self.datasource.get_data()
- self.assertEqual("test_server", self.datasource.get_hostname())
+ self.assertEqual(
+ "test_server", self.datasource.get_hostname().hostname
+ )
self.datasource.metadata["name"] = ""
- self.assertEqual("65b2fb23", self.datasource.get_hostname())
+ self.assertEqual("65b2fb23", self.datasource.get_hostname().hostname)
utf8_hostname = b"\xd1\x82\xd0\xb5\xd1\x81\xd1\x82".decode("utf-8")
self.datasource.metadata["name"] = utf8_hostname
- self.assertEqual("65b2fb23", self.datasource.get_hostname())
+ self.assertEqual("65b2fb23", self.datasource.get_hostname().hostname)
def test_get_public_ssh_keys(self):
self.datasource.get_data()
diff --git a/tests/unittests/sources/test_cloudstack.py b/tests/unittests/sources/test_cloudstack.py
index f7c69f91..b37400d3 100644
--- a/tests/unittests/sources/test_cloudstack.py
+++ b/tests/unittests/sources/test_cloudstack.py
@@ -40,6 +40,11 @@ class TestCloudStackPasswordFetching(CiTestCase):
get_networkd_server_address,
)
)
+ get_data_server = mock.MagicMock(return_value=None)
+ self.patches.enter_context(
+ mock.patch(mod_name + ".get_data_server", get_data_server)
+ )
+
self.tmp = self.tmp_dir()
def _set_password_server_response(self, response_string):
diff --git a/tests/unittests/sources/test_digitalocean.py b/tests/unittests/sources/test_digitalocean.py
index f3e6224e..47e46c66 100644
--- a/tests/unittests/sources/test_digitalocean.py
+++ b/tests/unittests/sources/test_digitalocean.py
@@ -178,7 +178,7 @@ class TestDataSourceDigitalOcean(CiTestCase):
self.assertEqual(DO_META.get("vendor_data"), ds.get_vendordata_raw())
self.assertEqual(DO_META.get("region"), ds.availability_zone)
self.assertEqual(DO_META.get("droplet_id"), ds.get_instance_id())
- self.assertEqual(DO_META.get("hostname"), ds.get_hostname())
+ self.assertEqual(DO_META.get("hostname"), ds.get_hostname().hostname)
# Single key
self.assertEqual(
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index e5648007..b7476391 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -211,7 +211,7 @@ SECONDARY_IP_METADATA_2018_09_24 = {
M_PATH_NET = "cloudinit.sources.DataSourceEc2.net."
-TAGS_METADATA_2021_03_23 = {
+TAGS_METADATA_2021_03_23: dict = {
**DEFAULT_METADATA,
"tags": {
"instance": {
@@ -837,13 +837,14 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.logs.getvalue(),
)
- @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.ephemeral.EphemeralIPv6Network")
+ @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
@mock.patch("cloudinit.net.find_fallback_nic")
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
@mock.patch("cloudinit.sources.DataSourceEc2.util.is_FreeBSD")
@responses.activate
def test_ec2_local_performs_dhcp_on_non_bsd(
- self, m_is_bsd, m_dhcp, m_fallback_nic, m_net
+ self, m_is_bsd, m_dhcp, m_fallback_nic, m_net4, m_net6
):
"""Ec2Local returns True for valid platform data on non-BSD with dhcp.
@@ -873,7 +874,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
ret = ds.get_data()
self.assertTrue(ret)
m_dhcp.assert_called_once_with("eth9", None)
- m_net.assert_called_once_with(
+ m_net4.assert_called_once_with(
broadcast="192.168.2.255",
interface="eth9",
ip="192.168.2.9",
@@ -881,7 +882,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
router="192.168.2.1",
static_routes=None,
)
- self.assertIn("Crawl of metadata service took", self.logs.getvalue())
+ self.assertIn("Crawl of metadata service ", self.logs.getvalue())
@responses.activate
def test_get_instance_tags(self):
diff --git a/tests/unittests/sources/test_gce.py b/tests/unittests/sources/test_gce.py
index e030931b..1ce0c6ec 100644
--- a/tests/unittests/sources/test_gce.py
+++ b/tests/unittests/sources/test_gce.py
@@ -126,7 +126,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
self.ds.get_data()
shostname = GCE_META.get("instance/hostname").split(".")[0]
- self.assertEqual(shostname, self.ds.get_hostname())
+ self.assertEqual(shostname, self.ds.get_hostname().hostname)
self.assertEqual(
GCE_META.get("instance/id"), self.ds.get_instance_id()
@@ -147,7 +147,7 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
)
shostname = GCE_META_PARTIAL.get("instance/hostname").split(".")[0]
- self.assertEqual(shostname, self.ds.get_hostname())
+ self.assertEqual(shostname, self.ds.get_hostname().hostname)
def test_userdata_no_encoding(self):
"""check that user-data is read."""
diff --git a/tests/unittests/sources/test_hetzner.py b/tests/unittests/sources/test_hetzner.py
index f80ed45f..193b7e42 100644
--- a/tests/unittests/sources/test_hetzner.py
+++ b/tests/unittests/sources/test_hetzner.py
@@ -116,7 +116,7 @@ class TestDataSourceHetzner(CiTestCase):
self.assertTrue(m_readmd.called)
- self.assertEqual(METADATA.get("hostname"), ds.get_hostname())
+ self.assertEqual(METADATA.get("hostname"), ds.get_hostname().hostname)
self.assertEqual(METADATA.get("public-keys"), ds.get_public_ssh_keys())
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index ce8fc970..a42c6a72 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -272,9 +272,11 @@ class TestDataSource(CiTestCase):
self.assertEqual(
"test-subclass-hostname", datasource.metadata["local-hostname"]
)
- self.assertEqual("test-subclass-hostname", datasource.get_hostname())
+ self.assertEqual(
+ "test-subclass-hostname", datasource.get_hostname().hostname
+ )
datasource.metadata["local-hostname"] = "hostname.my.domain.com"
- self.assertEqual("hostname", datasource.get_hostname())
+ self.assertEqual("hostname", datasource.get_hostname().hostname)
def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
"""Datasource.get_hostname with fqdn set gets qualified hostname."""
@@ -285,7 +287,8 @@ class TestDataSource(CiTestCase):
self.assertTrue(datasource.get_data())
datasource.metadata["local-hostname"] = "hostname.my.domain.com"
self.assertEqual(
- "hostname.my.domain.com", datasource.get_hostname(fqdn=True)
+ "hostname.my.domain.com",
+ datasource.get_hostname(fqdn=True).hostname,
)
def test_get_hostname_without_metadata_uses_system_hostname(self):
@@ -300,10 +303,12 @@ class TestDataSource(CiTestCase):
with mock.patch(mock_fqdn) as m_fqdn:
m_gethost.return_value = "systemhostname.domain.com"
m_fqdn.return_value = None # No maching fqdn in /etc/hosts
- self.assertEqual("systemhostname", datasource.get_hostname())
+ self.assertEqual(
+ "systemhostname", datasource.get_hostname().hostname
+ )
self.assertEqual(
"systemhostname.domain.com",
- datasource.get_hostname(fqdn=True),
+ datasource.get_hostname(fqdn=True).hostname,
)
def test_get_hostname_without_metadata_returns_none(self):
@@ -316,9 +321,13 @@ class TestDataSource(CiTestCase):
mock_fqdn = "cloudinit.sources.util.get_fqdn_from_hosts"
with mock.patch("cloudinit.sources.util.get_hostname") as m_gethost:
with mock.patch(mock_fqdn) as m_fqdn:
- self.assertIsNone(datasource.get_hostname(metadata_only=True))
self.assertIsNone(
- datasource.get_hostname(fqdn=True, metadata_only=True)
+ datasource.get_hostname(metadata_only=True).hostname
+ )
+ self.assertIsNone(
+ datasource.get_hostname(
+ fqdn=True, metadata_only=True
+ ).hostname
)
self.assertEqual([], m_gethost.call_args_list)
self.assertEqual([], m_fqdn.call_args_list)
@@ -335,10 +344,12 @@ class TestDataSource(CiTestCase):
with mock.patch(mock_fqdn) as m_fqdn:
m_gethost.return_value = "systemhostname.domain.com"
m_fqdn.return_value = "fqdnhostname.domain.com"
- self.assertEqual("fqdnhostname", datasource.get_hostname())
+ self.assertEqual(
+ "fqdnhostname", datasource.get_hostname().hostname
+ )
self.assertEqual(
"fqdnhostname.domain.com",
- datasource.get_hostname(fqdn=True),
+ datasource.get_hostname(fqdn=True).hostname,
)
def test_get_data_does_not_write_instance_data_on_failure(self):
@@ -750,7 +761,9 @@ class TestDataSource(CiTestCase):
"""Validate get_hostname signature on all subclasses of DataSource."""
base_args = inspect.getfullargspec(DataSource.get_hostname)
# Import all DataSource subclasses so we can inspect them.
- modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
+ modules = util.get_modules_from_dir(
+ os.path.dirname(os.path.dirname(__file__))
+ )
for _loc, name in modules.items():
mod_locs, _ = importer.find_module(name, ["cloudinit.sources"], [])
if mod_locs:
diff --git a/tests/unittests/sources/test_lxd.py b/tests/unittests/sources/test_lxd.py
index e11c3746..e60bb71f 100644
--- a/tests/unittests/sources/test_lxd.py
+++ b/tests/unittests/sources/test_lxd.py
@@ -17,7 +17,7 @@ from cloudinit.sources import InvalidMetaDataException
DS_PATH = "cloudinit.sources.DataSourceLXD."
-LStatResponse = namedtuple("lstatresponse", "st_mode")
+LStatResponse = namedtuple("LStatResponse", "st_mode")
NETWORK_V1 = {
@@ -34,7 +34,7 @@ NETWORK_V1 = {
def _add_network_v1_device(devname) -> dict:
"""Helper to inject device name into default network v1 config."""
- network_cfg = deepcopy(NETWORK_V1)
+ network_cfg: dict = deepcopy(NETWORK_V1)
network_cfg["config"][0]["name"] = devname
return network_cfg
@@ -51,14 +51,27 @@ LXD_V1_METADATA = {
},
}
+LXD_V1_METADATA_NO_NETWORK_CONFIG = {
+ "meta-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user-data": "#cloud-config\npackages: [sl]\n",
+ "vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ "config": {
+ "user.user-data": "instance-id: my-lxc\nlocal-hostname: my-lxc\n\n",
+ "user.vendor-data": "#cloud-config\nruncmd: ['echo vendor-data']\n",
+ },
+}
+
-@pytest.fixture
def lxd_metadata():
return LXD_V1_METADATA
+def lxd_metadata_no_network_config():
+ return LXD_V1_METADATA_NO_NETWORK_CONFIG
+
+
@pytest.fixture
-def lxd_ds(request, paths, lxd_metadata):
+def lxd_ds(request, paths):
"""
Return an instantiated DataSourceLXD.
@@ -69,7 +82,30 @@ def lxd_ds(request, paths, lxd_metadata):
(This uses the paths fixture for the required helpers.Paths object)
"""
with mock.patch(DS_PATH + "is_platform_viable", return_value=True):
- with mock.patch(DS_PATH + "read_metadata", return_value=lxd_metadata):
+ with mock.patch(
+ DS_PATH + "read_metadata", return_value=lxd_metadata()
+ ):
+ yield lxd.DataSourceLXD(
+ sys_cfg={}, distro=mock.Mock(), paths=paths
+ )
+
+
+@pytest.fixture
+def lxd_ds_no_network_config(request, paths):
+ """
+ Return an instantiated DataSourceLXD.
+
+ This also performs the mocking required for the default test case:
+ * ``is_platform_viable`` returns True,
+ * ``read_metadata`` returns ``LXD_V1_METADATA_NO_NETWORK_CONFIG``
+
+ (This uses the paths fixture for the required helpers.Paths object)
+ """
+ with mock.patch(DS_PATH + "is_platform_viable", return_value=True):
+ with mock.patch(
+ DS_PATH + "read_metadata",
+ return_value=lxd_metadata_no_network_config(),
+ ):
yield lxd.DataSourceLXD(
sys_cfg={}, distro=mock.Mock(), paths=paths
)
@@ -142,6 +178,37 @@ class TestDataSourceLXD:
assert LXD_V1_METADATA["user-data"] == lxd_ds.userdata_raw
assert LXD_V1_METADATA["vendor-data"] == lxd_ds.vendordata_raw
+ def test_network_config_when_unset(self, lxd_ds):
+ """network_config is correctly computed when _network_config and
+ _crawled_metadata are unset.
+ """
+ assert UNSET == lxd_ds._crawled_metadata
+ assert UNSET == lxd_ds._network_config
+ assert None is lxd_ds.userdata_raw
+ # network-config is dumped from YAML
+ assert NETWORK_V1 == lxd_ds.network_config
+ assert LXD_V1_METADATA == lxd_ds._crawled_metadata
+
+ def test_network_config_crawled_metadata_no_network_config(
+ self, lxd_ds_no_network_config
+ ):
+ """network_config is correctly computed when _network_config is unset
+ and _crawled_metadata does not contain network_config.
+ """
+ lxd.generate_fallback_network_config = mock.Mock(
+ return_value=NETWORK_V1
+ )
+ assert UNSET == lxd_ds_no_network_config._crawled_metadata
+ assert UNSET == lxd_ds_no_network_config._network_config
+ assert None is lxd_ds_no_network_config.userdata_raw
+ # network-config is dumped from YAML
+ assert NETWORK_V1 == lxd_ds_no_network_config.network_config
+ assert (
+ LXD_V1_METADATA_NO_NETWORK_CONFIG
+ == lxd_ds_no_network_config._crawled_metadata
+ )
+ assert 1 == lxd.generate_fallback_network_config.call_count
+
class TestIsPlatformViable:
@pytest.mark.parametrize(
diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index e05c4749..af1c45b8 100644
--- a/tests/unittests/sources/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -73,7 +73,7 @@ class TestOpenNebulaDataSource(CiTestCase):
orig_find_devs_with = util.find_devs_with
try:
# dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
+ util.find_devs_with = lambda n: [] # type: ignore
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
self.assertFalse(ret)
@@ -84,7 +84,7 @@ class TestOpenNebulaDataSource(CiTestCase):
orig_find_devs_with = util.find_devs_with
try:
# dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
+ util.find_devs_with = lambda n: [] # type: ignore
populate_dir(self.seed_dir, {"context.sh": INVALID_CONTEXT})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
@@ -107,7 +107,7 @@ class TestOpenNebulaDataSource(CiTestCase):
] = invalid_user
# dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
+ util.find_devs_with = lambda n: [] # type: ignore
populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data)
@@ -118,7 +118,7 @@ class TestOpenNebulaDataSource(CiTestCase):
orig_find_devs_with = util.find_devs_with
try:
# dont' try to lookup for CDs
- util.find_devs_with = lambda n: []
+ util.find_devs_with = lambda n: [] # type: ignore
populate_context_dir(self.seed_dir, {"KEY1": "val1"})
dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths)
ret = dsrc.get_data()
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
index c111bbcd..f65aab8b 100644
--- a/tests/unittests/sources/test_openstack.py
+++ b/tests/unittests/sources/test_openstack.py
@@ -39,7 +39,7 @@ USER_DATA = b"#!/bin/sh\necho This is user data\n"
VENDOR_DATA = {
"magic": "",
}
-VENDOR_DATA2 = {"static": {}}
+VENDOR_DATA2: dict = {"static": {}}
OSTACK_META = {
"availability_zone": "nova",
"files": [
@@ -284,8 +284,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
m_dhcp.assert_not_called()
@hp.activate
- @test_helpers.mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
- @test_helpers.mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
+ @test_helpers.mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
+ @test_helpers.mock.patch(
+ "cloudinit.net.ephemeral.maybe_perform_dhcp_discovery"
+ )
def test_local_datasource(self, m_dhcp, m_net):
"""OpenStackLocal calls EphemeralDHCPNetwork and gets instance data."""
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
index b7b16952..7cd55be0 100644
--- a/tests/unittests/sources/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -3,7 +3,7 @@
import base64
import copy
import json
-from contextlib import ExitStack
+import logging
from unittest import mock
import pytest
@@ -13,6 +13,7 @@ from cloudinit.sources import NetworkConfigSource
from cloudinit.sources.DataSourceOracle import OpcMetadata
from cloudinit.url_helper import UrlError
from tests.unittests import helpers as test_helpers
+from tests.unittests.helpers import does_not_raise
DS_PATH = "cloudinit.sources.DataSourceOracle"
@@ -87,6 +88,25 @@ OPC_V2_METADATA = """\
# Just a small meaningless change to differentiate the two metadatas
OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
+MAC_ADDR = "00:00:17:02:2b:b1"
+
+DHCP = {
+ "name": "eth0",
+ "type": "physical",
+ "subnets": [
+ {
+ "broadcast": "192.168.122.255",
+ "control": "manual",
+ "gateway": "192.168.122.1",
+ "dns_search": ["foo.com"],
+ "type": "dhcp",
+ "netmask": "255.255.255.0",
+ "dns_nameservers": ["192.168.122.1"],
+ }
+ ],
+}
+KLIBC_NET_CFG = {"version": 1, "config": [DHCP]}
+
@pytest.fixture
def metadata_version():
@@ -94,15 +114,20 @@ def metadata_version():
@pytest.fixture
-def oracle_ds(request, fixture_utils, paths, metadata_version):
+def oracle_ds(request, fixture_utils, paths, metadata_version, mocker):
"""
Return an instantiated DataSourceOracle.
- This also performs the mocking required for the default test case:
+ This also performs the mocking required:
* ``_read_system_uuid`` returns something,
* ``_is_platform_viable`` returns True,
- * ``_is_iscsi_root`` returns True (the simpler code path),
- * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
+ * ``DataSourceOracle._is_iscsi_root`` returns True by default or what
+ pytest.mark.is_iscsi gives as first param,
+ * ``DataSourceOracle._get_iscsi_config`` returns a network cfg if
+ is_iscsi else an empty network config,
+ * ``read_opc_metadata`` returns ``OPC_V1_METADATA``,
+ * ``ephemeral.EphemeralDHCPv4`` and ``net.find_fallback_nic`` mocked to
+ avoid subp calls
(This uses the paths fixture for the required helpers.Paths object, and the
fixture_utils fixture for fetching markers.)
@@ -110,19 +135,29 @@ def oracle_ds(request, fixture_utils, paths, metadata_version):
sys_cfg = fixture_utils.closest_marker_first_arg_or(
request, "ds_sys_cfg", mock.MagicMock()
)
+ is_iscsi = fixture_utils.closest_marker_first_arg_or(
+ request, "is_iscsi", True
+ )
metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
- with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
- with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
- with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
- with mock.patch(
- DS_PATH + ".read_opc_metadata",
- return_value=metadata,
- ):
- yield oracle.DataSourceOracle(
- sys_cfg=sys_cfg,
- distro=mock.Mock(),
- paths=paths,
- )
+
+ mocker.patch(DS_PATH + ".net.find_fallback_nic")
+ mocker.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4")
+ mocker.patch(DS_PATH + "._read_system_uuid", return_value="someuuid")
+ mocker.patch(DS_PATH + "._is_platform_viable", return_value=True)
+ mocker.patch(DS_PATH + ".read_opc_metadata", return_value=metadata)
+ mocker.patch(DS_PATH + ".KlibcOracleNetworkConfigSource")
+ ds = oracle.DataSourceOracle(
+ sys_cfg=sys_cfg,
+ distro=mock.Mock(),
+ paths=paths,
+ )
+ mocker.patch.object(ds, "_is_iscsi_root", return_value=is_iscsi)
+ if is_iscsi:
+ iscsi_config = copy.deepcopy(KLIBC_NET_CFG)
+ else:
+ iscsi_config = {"version": 1, "config": []}
+ mocker.patch.object(ds, "_get_iscsi_config", return_value=iscsi_config)
+ yield ds
class TestDataSourceOracle:
@@ -158,28 +193,27 @@ class TestDataSourceOracle:
assert oracle_ds.ds_cfg["configure_secondary_nics"]
-class TestIsPlatformViable(test_helpers.CiTestCase):
- @mock.patch(
- DS_PATH + ".dmi.read_dmi_data", return_value=oracle.CHASSIS_ASSET_TAG
+class TestIsPlatformViable:
+ @pytest.mark.parametrize(
+ "dmi_data, platform_viable",
+ [
+ # System with known chassis tag is viable.
+ (oracle.CHASSIS_ASSET_TAG, True),
+ # System without known chassis tag is not viable.
+ (None, False),
+ # System with unknown chassis tag is not viable.
+ ("LetsGoCubs", False),
+ ],
)
- def test_expected_viable(self, m_read_dmi_data):
- """System with known chassis tag is viable."""
- self.assertTrue(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
-
- @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None)
- def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
- """System without known chassis tag is not viable."""
- self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
-
- @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs")
- def test_expected_not_viable_other(self, m_read_dmi_data):
- """System with unnown chassis tag is not viable."""
- self.assertFalse(oracle._is_platform_viable())
+ def test_is_platform_viable(self, dmi_data, platform_viable):
+ with mock.patch(
+ DS_PATH + ".dmi.read_dmi_data", return_value=dmi_data
+ ) as m_read_dmi_data:
+ assert platform_viable == oracle._is_platform_viable()
m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
+@pytest.mark.is_iscsi(False)
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
mock.Mock(return_value=False),
@@ -190,7 +224,7 @@ class TestNetworkConfigFromOpcImds:
# We test this by using in a non-dict to ensure that no dict
# operations are used; failure would be seen as exceptions
oracle_ds._network_config = object()
- oracle_ds._add_network_config_from_opc_imds()
+ oracle_ds._add_network_config_from_opc_imds(set_primary=False)
def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
# nicIndex in the first entry indicates a bare metal machine
@@ -198,40 +232,47 @@ class TestNetworkConfigFromOpcImds:
# We test this by using a non-dict to ensure that no dict
# operations are used
oracle_ds._network_config = object()
- oracle_ds._add_network_config_from_opc_imds()
+ oracle_ds._add_network_config_from_opc_imds(set_primary=False)
assert "bare metal machine" in caplog.text
- def test_missing_mac_skipped(self, oracle_ds, caplog):
- oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
-
- oracle_ds._network_config = {
- "version": 1,
- "config": [{"primary": "nic"}],
- }
- with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
- oracle_ds._add_network_config_from_opc_imds()
-
- assert 1 == len(oracle_ds.network_config["config"])
- assert (
- "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
- in caplog.text
- )
-
- def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
+ @pytest.mark.parametrize(
+ "network_config, network_config_key",
+ [
+ pytest.param(
+ {
+ "version": 1,
+ "config": [{"primary": "nic"}],
+ },
+ "config",
+ id="v1",
+ ),
+ pytest.param(
+ {
+ "version": 2,
+ "ethernets": {"primary": {"nic": {}}},
+ },
+ "ethernets",
+ id="v2",
+ ),
+ ],
+ )
+ def test_missing_mac_skipped(
+ self,
+ oracle_ds,
+ network_config,
+ network_config_key,
+ caplog,
+ ):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
-
- oracle_ds._network_config = {
- "version": 2,
- "ethernets": {"primary": {"nic": {}}},
- }
+ oracle_ds._network_config = network_config
with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
- oracle_ds._add_network_config_from_opc_imds()
+ oracle_ds._add_network_config_from_opc_imds(set_primary=False)
- assert 1 == len(oracle_ds.network_config["ethernets"])
+ assert 1 == len(oracle_ds._network_config[network_config_key])
assert (
- "Interface with MAC 00:00:17:02:2b:b1 not found; skipping"
- in caplog.text
+ f"Interface with MAC {MAC_ADDR} not found; skipping" in caplog.text
)
+ assert 1 == caplog.text.count(" not found; skipping")
def test_secondary_nic(self, oracle_ds):
oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
@@ -239,12 +280,12 @@ class TestNetworkConfigFromOpcImds:
"version": 1,
"config": [{"primary": "nic"}],
}
- mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ mac_addr, nic_name = MAC_ADDR, "ens3"
with mock.patch(
DS_PATH + ".get_interfaces_by_mac",
return_value={mac_addr: nic_name},
):
- oracle_ds._add_network_config_from_opc_imds()
+ oracle_ds._add_network_config_from_opc_imds(set_primary=False)
# The input is mutated
assert 2 == len(oracle_ds.network_config["config"])
@@ -266,12 +307,12 @@ class TestNetworkConfigFromOpcImds:
"version": 2,
"ethernets": {"primary": {"nic": {}}},
}
- mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
+ mac_addr, nic_name = MAC_ADDR, "ens3"
with mock.patch(
DS_PATH + ".get_interfaces_by_mac",
return_value={mac_addr: nic_name},
):
- oracle_ds._add_network_config_from_opc_imds()
+ oracle_ds._add_network_config_from_opc_imds(set_primary=False)
# The input is mutated
assert 2 == len(oracle_ds.network_config["ethernets"])
@@ -286,77 +327,180 @@ class TestNetworkConfigFromOpcImds:
# These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
assert "10.0.0.231" == secondary_nic_cfg["addresses"][0]
+ @pytest.mark.parametrize("error_add_network", [None, Exception])
+ @pytest.mark.parametrize(
+ "configure_secondary_nics",
+ [False, True],
+ )
+ @mock.patch(DS_PATH + "._ensure_netfailover_safe")
+ def test_network_config_log_errors(
+ self,
+ m_ensure_netfailover_safe,
+ configure_secondary_nics,
+ error_add_network,
+ oracle_ds,
+ caplog,
+ capsys,
+ ):
+ assert not oracle_ds._has_network_config()
+ oracle_ds.ds_cfg["configure_secondary_nics"] = configure_secondary_nics
+ with mock.patch.object(
+ oracle.DataSourceOracle,
+ "_add_network_config_from_opc_imds",
+ ) as m_add_network_config_from_opc_imds:
+ if error_add_network:
+ m_add_network_config_from_opc_imds.side_effect = (
+ error_add_network
+ )
+ oracle_ds.network_config # pylint: disable=pointless-statement # noqa: E501
+ assert [
+ mock.call(True, False)
+ == m_add_network_config_from_opc_imds.call_args_list
+ ]
+ assert 1 == oracle_ds._is_iscsi_root.call_count
+ assert 1 == m_ensure_netfailover_safe.call_count
+
+ assert ("", "") == capsys.readouterr()
+ if not error_add_network:
+ log_initramfs_index = -1
+ else:
+ log_initramfs_index = -3
+ # Primary
+ assert (
+ logging.WARNING,
+ "Failed to parse IMDS network configuration!",
+ ) == caplog.record_tuples[-2][1:]
+ # Secondary
+ assert (
+ logging.DEBUG,
+ "Failed to parse IMDS network configuration!",
+ ) == caplog.record_tuples[-1][1:]
-class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
- def setUp(self):
- super(TestNetworkConfigFiltersNetFailover, self).setUp()
- self.add_patch(
- DS_PATH + ".get_interfaces_by_mac", "m_get_interfaces_by_mac"
- )
- self.add_patch(DS_PATH + ".is_netfail_master", "m_netfail_master")
+ assert (
+ logging.WARNING,
+ "Could not obtain network configuration from initramfs."
+ " Falling back to IMDS.",
+ ) == caplog.record_tuples[log_initramfs_index][1:]
- def test_ignore_bogus_network_config(self):
- netcfg = {"something": "here"}
- passed_netcfg = copy.copy(netcfg)
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- def test_ignore_network_config_unknown_versions(self):
- netcfg = {"something": "here", "version": 3}
+@mock.patch(DS_PATH + ".get_interfaces_by_mac")
+@mock.patch(DS_PATH + ".is_netfail_master")
+class TestNetworkConfigFiltersNetFailover:
+ @pytest.mark.parametrize(
+ "netcfg",
+ [
+ pytest.param({"something": "here"}, id="bogus"),
+ pytest.param(
+ {"something": "here", "version": 3}, id="unknown_version"
+ ),
+ ],
+ )
+ def test_ignore_network_config(
+ self, m_netfail_master, m_get_interfaces_by_mac, netcfg
+ ):
passed_netcfg = copy.copy(netcfg)
oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
+ assert netcfg == passed_netcfg
- def test_checks_v1_type_physical_interfaces(self):
- mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {
- "version": 1,
- "config": [
+ @pytest.mark.parametrize(
+ "nic_name, netcfg, netfail_master_return, call_args_list",
+ [
+ pytest.param(
+ "ens3",
{
- "type": "physical",
- "name": nic_name,
- "mac_address": mac_addr,
- "subnets": [{"type": "dhcp4"}],
- }
- ],
- }
- passed_netcfg = copy.copy(netcfg)
- self.m_netfail_master.return_value = False
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual(
- [mock.call(nic_name)], self.m_netfail_master.call_args_list
- )
-
- def test_checks_v1_skips_non_phys_interfaces(self):
- mac_addr, nic_name = "00:00:17:02:2b:b1", "bond0"
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {
- "version": 1,
- "config": [
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "ens3",
+ "mac_address": MAC_ADDR,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ },
+ False,
+ [mock.call("ens3")],
+ id="checks_v1_type_physical_interfaces",
+ ),
+ pytest.param(
+ "bond0",
{
- "type": "bond",
- "name": nic_name,
- "mac_address": mac_addr,
- "subnets": [{"type": "dhcp4"}],
- }
- ],
+ "version": 1,
+ "config": [
+ {
+ "type": "bond",
+ "name": "bond0",
+ "mac_address": MAC_ADDR,
+ "subnets": [{"type": "dhcp4"}],
+ }
+ ],
+ },
+ None,
+ [],
+ id="skips_v1_non_phys_interfaces",
+ ),
+ pytest.param(
+ "ens3",
+ {
+ "version": 2,
+ "ethernets": {
+ "ens3": {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": "ens3",
+ "match": {"macaddress": MAC_ADDR},
+ }
+ },
+ },
+ False,
+ [mock.call("ens3")],
+ id="checks_v2_type_ethernet_interfaces",
+ ),
+ pytest.param(
+ "wlps0",
+ {
+ "version": 2,
+ "ethernets": {
+ "wlps0": {
+ "dhcp4": True,
+ "critical": True,
+ "set-name": "wlps0",
+ "match": {"macaddress": MAC_ADDR},
+ }
+ },
+ },
+ None,
+ [mock.call("wlps0")],
+ id="skips_v2_non_ethernet_interfaces",
+ ),
+ ],
+ )
+ def test__ensure_netfailover_safe(
+ self,
+ m_netfail_master,
+ m_get_interfaces_by_mac,
+ nic_name,
+ netcfg,
+ netfail_master_return,
+ call_args_list,
+ ):
+ m_get_interfaces_by_mac.return_value = {
+ MAC_ADDR: nic_name,
}
passed_netcfg = copy.copy(netcfg)
+ if netfail_master_return is not None:
+ m_netfail_master.return_value = netfail_master_return
oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual(0, self.m_netfail_master.call_count)
-
- def test_removes_master_mac_property_v1(self):
- nic_master, mac_master = "ens3", self.random_string()
- nic_other, mac_other = "ens7", self.random_string()
- nic_extra, mac_extra = "enp0s1f2", self.random_string()
- self.m_get_interfaces_by_mac.return_value = {
+ assert netcfg == passed_netcfg
+ assert call_args_list == m_netfail_master.call_args_list
+
+ def test_removes_master_mac_property_v1(
+ self, m_netfail_master, m_get_interfaces_by_mac
+ ):
+ nic_master, mac_master = "ens3", test_helpers.random_string()
+ nic_other, mac_other = "ens7", test_helpers.random_string()
+ nic_extra, mac_extra = "enp0s1f2", test_helpers.random_string()
+ m_get_interfaces_by_mac.return_value = {
mac_master: nic_master,
mac_other: nic_other,
mac_extra: nic_extra,
@@ -387,7 +531,7 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
return True
return False
- self.m_netfail_master.side_effect = _is_netfail_master
+ m_netfail_master.side_effect = _is_netfail_master
expected_cfg = {
"version": 1,
"config": [
@@ -405,58 +549,15 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
],
}
oracle._ensure_netfailover_safe(netcfg)
- self.assertEqual(expected_cfg, netcfg)
-
- def test_checks_v2_type_ethernet_interfaces(self):
- mac_addr, nic_name = "00:00:17:02:2b:b1", "ens3"
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {
- "version": 2,
- "ethernets": {
- nic_name: {
- "dhcp4": True,
- "critical": True,
- "set-name": nic_name,
- "match": {"macaddress": mac_addr},
- }
- },
- }
- passed_netcfg = copy.copy(netcfg)
- self.m_netfail_master.return_value = False
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual(
- [mock.call(nic_name)], self.m_netfail_master.call_args_list
- )
+ assert expected_cfg == netcfg
- def test_skips_v2_non_ethernet_interfaces(self):
- mac_addr, nic_name = "00:00:17:02:2b:b1", "wlps0"
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {
- "version": 2,
- "wifis": {
- nic_name: {
- "dhcp4": True,
- "critical": True,
- "set-name": nic_name,
- "match": {"macaddress": mac_addr},
- }
- },
- }
- passed_netcfg = copy.copy(netcfg)
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual(0, self.m_netfail_master.call_count)
-
- def test_removes_master_mac_property_v2(self):
- nic_master, mac_master = "ens3", self.random_string()
- nic_other, mac_other = "ens7", self.random_string()
- nic_extra, mac_extra = "enp0s1f2", self.random_string()
- self.m_get_interfaces_by_mac.return_value = {
+ def test_removes_master_mac_property_v2(
+ self, m_netfail_master, m_get_interfaces_by_mac
+ ):
+ nic_master, mac_master = "ens3", test_helpers.random_string()
+ nic_other, mac_other = "ens7", test_helpers.random_string()
+ nic_extra, mac_extra = "enp0s1f2", test_helpers.random_string()
+ m_get_interfaces_by_mac.return_value = {
mac_master: nic_master,
mac_other: nic_other,
mac_extra: nic_extra,
@@ -487,7 +588,7 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
return True
return False
- self.m_netfail_master.side_effect = _is_netfail_master
+ m_netfail_master.side_effect = _is_netfail_master
expected_cfg = {
"version": 2,
@@ -511,7 +612,7 @@ class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
pprint.pprint(netcfg)
print("---- ^^ modified ^^ ---- vv original vv ----")
pprint.pprint(expected_cfg)
- self.assertEqual(expected_cfg, netcfg)
+ assert expected_cfg == netcfg
def _mock_v2_urls(httpretty):
@@ -557,7 +658,6 @@ def _mock_no_v2_urls(httpretty):
class TestReadOpcMetadata:
# See https://docs.pytest.org/en/stable/example
# /parametrize.html#parametrizing-conditional-raising
- does_not_raise = ExitStack
@mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
@pytest.mark.parametrize(
@@ -636,7 +736,29 @@ class TestReadOpcMetadata:
with expectation:
assert expected_body == oracle.read_opc_metadata().instance_data
+ # No need to actually wait between retries in the tests
+ @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
+ def test_fetch_vnics_error(self, caplog):
+ def mocked_fetch(*args, path="instance", **kwargs):
+ if path == "vnics":
+ raise UrlError("cause")
+
+ with mock.patch(DS_PATH + "._fetch", side_effect=mocked_fetch):
+ opc_metadata = oracle.read_opc_metadata(fetch_vnics_data=True)
+ assert None is opc_metadata.vnics_data
+ assert (
+ logging.WARNING,
+ "Failed to fetch IMDS network configuration!",
+ ) == caplog.record_tuples[-2][1:]
+
+@pytest.mark.parametrize(
+ "",
+ [
+ pytest.param(marks=pytest.mark.is_iscsi(True), id="iscsi"),
+ pytest.param(marks=pytest.mark.is_iscsi(False), id="non-iscsi"),
+ ],
+)
class TestCommon_GetDataBehaviour:
"""This test class tests behaviour common to iSCSI and non-iSCSI root.
@@ -649,33 +771,14 @@ class TestCommon_GetDataBehaviour:
separate class for that case.)
"""
- @pytest.fixture(params=[True, False])
- def parameterized_oracle_ds(self, request, oracle_ds):
- """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
- is_iscsi_root = request.param
- with ExitStack() as stack:
- stack.enter_context(
- mock.patch(
- DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
- )
- )
- if not is_iscsi_root:
- stack.enter_context(
- mock.patch(DS_PATH + ".net.find_fallback_nic")
- )
- stack.enter_context(
- mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
- )
- yield oracle_ds
-
@mock.patch(
DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
)
def test_false_if_platform_not_viable(
self,
- parameterized_oracle_ds,
+ oracle_ds,
):
- assert not parameterized_oracle_ds._get_data()
+ assert not oracle_ds._get_data()
@pytest.mark.parametrize(
"keyname,expected_value",
@@ -699,10 +802,10 @@ class TestCommon_GetDataBehaviour:
self,
keyname,
expected_value,
- parameterized_oracle_ds,
+ oracle_ds,
):
- assert parameterized_oracle_ds._get_data()
- assert expected_value == parameterized_oracle_ds.metadata[keyname]
+ assert oracle_ds._get_data()
+ assert expected_value == oracle_ds.metadata[keyname]
@pytest.mark.parametrize(
"attribute_name,expected_value",
@@ -722,12 +825,10 @@ class TestCommon_GetDataBehaviour:
self,
attribute_name,
expected_value,
- parameterized_oracle_ds,
+ oracle_ds,
):
- assert parameterized_oracle_ds._get_data()
- assert expected_value == getattr(
- parameterized_oracle_ds, attribute_name
- )
+ assert oracle_ds._get_data()
+ assert expected_value == getattr(oracle_ds, attribute_name)
@pytest.mark.parametrize(
"ssh_keys,expected_value",
@@ -746,7 +847,7 @@ class TestCommon_GetDataBehaviour:
],
)
def test_public_keys_handled_correctly(
- self, ssh_keys, expected_value, parameterized_oracle_ds
+ self, ssh_keys, expected_value, oracle_ds
):
instance_data = json.loads(OPC_V1_METADATA)
if ssh_keys is None:
@@ -758,14 +859,10 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert parameterized_oracle_ds._get_data()
- assert (
- expected_value == parameterized_oracle_ds.get_public_ssh_keys()
- )
+ assert oracle_ds._get_data()
+ assert expected_value == oracle_ds.get_public_ssh_keys()
- def test_missing_user_data_handled_gracefully(
- self, parameterized_oracle_ds
- ):
+ def test_missing_user_data_handled_gracefully(self, oracle_ds):
instance_data = json.loads(OPC_V1_METADATA)
del instance_data["metadata"]["user_data"]
metadata = OpcMetadata(None, instance_data, None)
@@ -773,13 +870,11 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert parameterized_oracle_ds._get_data()
+ assert oracle_ds._get_data()
- assert parameterized_oracle_ds.userdata_raw is None
+ assert oracle_ds.userdata_raw is None
- def test_missing_metadata_handled_gracefully(
- self, parameterized_oracle_ds
- ):
+ def test_missing_metadata_handled_gracefully(self, oracle_ds):
instance_data = json.loads(OPC_V1_METADATA)
del instance_data["metadata"]
metadata = OpcMetadata(None, instance_data, None)
@@ -787,17 +882,17 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert parameterized_oracle_ds._get_data()
+ assert oracle_ds._get_data()
- assert parameterized_oracle_ds.userdata_raw is None
- assert [] == parameterized_oracle_ds.get_public_ssh_keys()
+ assert oracle_ds.userdata_raw is None
+ assert [] == oracle_ds.get_public_ssh_keys()
-@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
+@pytest.mark.is_iscsi(False)
class TestNonIscsiRoot_GetDataBehaviour:
- @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4")
@mock.patch(DS_PATH + ".net.find_fallback_nic")
- def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ def test_run_net_files(
self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
):
in_context_manager = False
@@ -837,74 +932,122 @@ class TestNonIscsiRoot_GetDataBehaviour:
)
] == m_EphemeralDHCPv4.call_args_list
+ @mock.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4")
+ @mock.patch(DS_PATH + ".net.find_fallback_nic")
+ def test_read_opc_metadata_called_with_ephemeral_dhcp(
+ self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
+ ):
+ in_context_manager = False
-@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
-@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
-class TestNetworkConfig:
- def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
- """.network_config should be cached"""
- assert 0 == m_read_initramfs_config.call_count
- oracle_ds.network_config # pylint: disable=pointless-statement
- assert 1 == m_read_initramfs_config.call_count
- oracle_ds.network_config # pylint: disable=pointless-statement
- assert 1 == m_read_initramfs_config.call_count
+ def enter_context_manager():
+ nonlocal in_context_manager
+ in_context_manager = True
- def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
- """network_config should prefer initramfs config over fallback"""
- ncfg = {"version": 1, "config": [{"a": "b"}]}
- m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
+ def exit_context_manager(*args):
+ nonlocal in_context_manager
+ in_context_manager = False
- assert ncfg == oracle_ds.network_config
- assert 0 == oracle_ds.distro.generate_fallback_config.call_count
+ m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
+ enter_context_manager
+ )
+ m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
+ exit_context_manager
+ )
- def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
- """network_config should prefer initramfs config over fallback"""
- ncfg = {"version": 1, "config": [{"a": "b"}]}
+ def assert_in_context_manager(**kwargs):
+ assert in_context_manager
+ return mock.MagicMock()
- m_read_initramfs_config.return_value = None
- oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
- ncfg
- )
+ with mock.patch(
+ DS_PATH + ".read_opc_metadata",
+ mock.Mock(side_effect=assert_in_context_manager),
+ ):
+ assert oracle_ds._get_data()
+
+ assert [
+ mock.call(
+ iface=m_find_fallback_nic.return_value,
+ connectivity_url_data={
+ "headers": {"Authorization": "Bearer Oracle"},
+ "url": "http://169.254.169.254/opc/v2/instance/",
+ },
+ )
+ ] == m_EphemeralDHCPv4.call_args_list
- assert ncfg == oracle_ds.network_config
+
+@mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={})
+class TestNetworkConfig:
+ def test_network_config_cached(self, m_get_interfaces_by_mac, oracle_ds):
+ """.network_config should be cached"""
+ assert 0 == oracle_ds._get_iscsi_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == oracle_ds._get_iscsi_config.call_count
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert 1 == oracle_ds._get_iscsi_config.call_count
@pytest.mark.parametrize(
- "configure_secondary_nics,expect_secondary_nics",
- [(True, True), (False, False), (None, False)],
+ "configure_secondary_nics,is_iscsi,expected_set_primary",
+ [
+ pytest.param(
+ True,
+ True,
+ [mock.call(False)],
+ marks=pytest.mark.is_iscsi(True),
+ ),
+ pytest.param(
+ True,
+ False,
+ [mock.call(True)],
+ marks=pytest.mark.is_iscsi(False),
+ ),
+ pytest.param(False, True, [], marks=pytest.mark.is_iscsi(True)),
+ pytest.param(
+ False,
+ False,
+ [mock.call(True)],
+ marks=pytest.mark.is_iscsi(False),
+ ),
+ pytest.param(None, True, [], marks=pytest.mark.is_iscsi(True)),
+ pytest.param(
+ None,
+ False,
+ [mock.call(True)],
+ marks=pytest.mark.is_iscsi(False),
+ ),
+ ],
)
def test_secondary_nic_addition(
self,
- m_read_initramfs_config,
+ m_get_interfaces_by_mac,
configure_secondary_nics,
- expect_secondary_nics,
+ is_iscsi,
+ expected_set_primary,
oracle_ds,
):
"""Test that _add_network_config_from_opc_imds is called as expected
(configure_secondary_nics=None is used to test the default behaviour.)
"""
- m_read_initramfs_config.return_value = {"version": 1, "config": []}
if configure_secondary_nics is not None:
oracle_ds.ds_cfg[
"configure_secondary_nics"
] = configure_secondary_nics
- def side_effect(self):
- self._network_config["secondary_added"] = mock.sentinel.needle
-
oracle_ds._vnics_data = "DummyData"
with mock.patch.object(
- oracle.DataSourceOracle,
+ oracle_ds,
"_add_network_config_from_opc_imds",
- new=side_effect,
- ):
- was_secondary_added = "secondary_added" in oracle_ds.network_config
- assert expect_secondary_nics == was_secondary_added
+ ) as m_add_network_config_from_opc_imds:
+ oracle_ds.network_config # pylint: disable=pointless-statement
+ assert (
+ expected_set_primary
+ == m_add_network_config_from_opc_imds.call_args_list
+ )
def test_secondary_nic_failure_isnt_blocking(
self,
- m_read_initramfs_config,
+ m_get_interfaces_by_mac,
caplog,
oracle_ds,
):
@@ -917,15 +1060,88 @@ class TestNetworkConfig:
side_effect=Exception(),
):
network_config = oracle_ds.network_config
- assert network_config == m_read_initramfs_config.return_value
- assert "Failed to parse secondary network configuration" in caplog.text
+ assert network_config == oracle_ds._get_iscsi_config.return_value
+ assert 2 == caplog.text.count(
+ "Failed to parse IMDS network configuration"
+ )
- def test_ds_network_cfg_preferred_over_initramfs(self, _m):
+ def test_ds_network_cfg_preferred_over_initramfs(
+ self, m_get_interfaces_by_mac
+ ):
"""Ensure that DS net config is preferred over initramfs config"""
config_sources = oracle.DataSourceOracle.network_config_sources
ds_idx = config_sources.index(NetworkConfigSource.DS)
initramfs_idx = config_sources.index(NetworkConfigSource.INITRAMFS)
assert ds_idx < initramfs_idx
+ @pytest.mark.parametrize("set_primary", [True, False])
+ def test__add_network_config_from_opc_imds_no_vnics_data(
+ self,
+ m_get_interfaces_by_mac,
+ set_primary,
+ oracle_ds,
+ caplog,
+ ):
+ assert not oracle_ds._has_network_config()
+ with mock.patch.object(oracle_ds, "_vnics_data", None):
+ oracle_ds._add_network_config_from_opc_imds(set_primary)
+ assert not oracle_ds._has_network_config()
+ assert (
+ logging.WARNING,
+ "NIC data is UNSET but should not be",
+ ) == caplog.record_tuples[-1][1:]
+
+ def test_missing_mac_skipped(
+ self,
+ m_get_interfaces_by_mac,
+ oracle_ds,
+ caplog,
+ ):
+ """If no intefaces by mac found, then _network_config not setted and
+ correct logs.
+ """
+ vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ assert not oracle_ds._has_network_config()
+ with mock.patch.object(oracle_ds, "_vnics_data", vnics_data):
+ oracle_ds._add_network_config_from_opc_imds(set_primary=True)
+ assert not oracle_ds._has_network_config()
+ assert (
+ logging.WARNING,
+ "Interface with MAC 02:00:17:05:d1:db not found; skipping",
+ ) == caplog.record_tuples[-2][1:]
+ assert (
+ logging.WARNING,
+ f"Interface with MAC {MAC_ADDR} not found; skipping",
+ ) == caplog.record_tuples[-1][1:]
+
+ @pytest.mark.parametrize("set_primary", [True, False])
+ def test_nics(
+ self,
+ m_get_interfaces_by_mac,
+ set_primary,
+ oracle_ds,
+ caplog,
+ mocker,
+ ):
+ """Correct number of configs added"""
+ vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
+ if set_primary:
+ assert not oracle_ds._has_network_config()
+ else:
+ # Simulate primary config was taken from iscsi
+ oracle_ds._network_config = copy.deepcopy(KLIBC_NET_CFG)
+
+ mocker.patch(
+ DS_PATH + ".get_interfaces_by_mac",
+ return_value={"02:00:17:05:d1:db": "eth_0", MAC_ADDR: "name_1"},
+ )
+ mocker.patch.object(oracle_ds, "_vnics_data", vnics_data)
+
+ oracle_ds._add_network_config_from_opc_imds(set_primary)
+ assert 2 == len(
+ oracle_ds._network_config["config"]
+ ), "Config not added"
+ assert "" == caplog.text
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ovf.py b/tests/unittests/sources/test_ovf.py
index c2c87f12..1fbd564f 100644
--- a/tests/unittests/sources/test_ovf.py
+++ b/tests/unittests/sources/test_ovf.py
@@ -13,6 +13,7 @@ from cloudinit import subp, util
from cloudinit.helpers import Paths
from cloudinit.safeyaml import YAMLError
from cloudinit.sources import DataSourceOVF as dsovf
+from cloudinit.sources.DataSourceOVF import GuestCustScriptDisabled
from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
CustomScriptNotFound,
)
@@ -447,7 +448,7 @@ class TestDatasourceOVF(CiTestCase):
with mock.patch(
MPATH + "set_customization_status", return_value=("msg", b"")
):
- with self.assertRaises(RuntimeError) as context:
+ with self.assertRaises(GuestCustScriptDisabled) as context:
wrap_and_call(
"cloudinit.sources.DataSourceOVF",
{
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
index 52bcbc17..64c785d6 100644
--- a/tests/unittests/sources/test_scaleway.py
+++ b/tests/unittests/sources/test_scaleway.py
@@ -236,7 +236,7 @@ class TestDataSourceScaleway(HttprettyTestCase):
].sort(),
)
self.assertEqual(
- self.datasource.get_hostname(),
+ self.datasource.get_hostname().hostname,
MetadataResponses.FAKE_METADATA["hostname"],
)
self.assertEqual(
diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py
index 55239c4e..702a67f7 100644
--- a/tests/unittests/sources/test_smartos.py
+++ b/tests/unittests/sources/test_smartos.py
@@ -23,8 +23,9 @@ import unittest
import uuid
from binascii import crc32
+import serial
+
from cloudinit import helpers as c_helpers
-from cloudinit import serial
from cloudinit.event import EventScope, EventType
from cloudinit.sources import DataSourceSmartOS
from cloudinit.sources.DataSourceSmartOS import SERIAL_DEVICE, SMARTOS_ENV_KVM
@@ -44,14 +45,6 @@ from tests.unittests.helpers import (
skipIf,
)
-try:
- import serial as _pyserial
-
- assert _pyserial # avoid pyflakes error F401: import unused
- HAS_PYSERIAL = True
-except ImportError:
- HAS_PYSERIAL = False
-
DSMOS = "cloudinit.sources.DataSourceSmartOS"
SDC_NICS = json.loads(
"""
@@ -1357,7 +1350,6 @@ class TestNetworkConversion(CiTestCase):
os.access(SERIAL_DEVICE, os.W_OK),
"Requires write access to " + SERIAL_DEVICE,
)
-@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
This class tests locking on an actual serial port, and as such can only
diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py
index e1125b65..317cb638 100644
--- a/tests/unittests/sources/test_upcloud.py
+++ b/tests/unittests/sources/test_upcloud.py
@@ -216,8 +216,8 @@ class TestUpCloudNetworkSetup(CiTestCase):
@mock.patch("cloudinit.sources.helpers.upcloud.read_metadata")
@mock.patch("cloudinit.net.find_fallback_nic")
- @mock.patch("cloudinit.net.dhcp.maybe_perform_dhcp_discovery")
- @mock.patch("cloudinit.net.dhcp.EphemeralIPv4Network")
+ @mock.patch("cloudinit.net.ephemeral.maybe_perform_dhcp_discovery")
+ @mock.patch("cloudinit.net.ephemeral.EphemeralIPv4Network")
def test_network_configured_metadata(
self, m_net, m_dhcp, m_fallback_nic, mock_readmd
):
diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py
index 3579041a..37a1f259 100644
--- a/tests/unittests/sources/test_vmware.py
+++ b/tests/unittests/sources/test_vmware.py
@@ -418,7 +418,9 @@ class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase):
def assert_metadata(test_obj, ds, metadata):
test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id())
- test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname())
+ test_obj.assertEqual(
+ metadata.get("local-hostname"), ds.get_hostname().hostname
+ )
expected_public_keys = metadata.get("public_keys")
if not isinstance(expected_public_keys, list):
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
index c8398579..5f2ccd4a 100644
--- a/tests/unittests/sources/test_vultr.py
+++ b/tests/unittests/sources/test_vultr.py
@@ -344,9 +344,15 @@ class TestDataSourceVultr(CiTestCase):
return
# Test interface seeking to ensure we are able to find the correct one
- @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init)
- @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__enter__", override_enter)
- @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__exit__", override_exit)
+ @mock.patch(
+ "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__", ephemeral_init
+ )
+ @mock.patch(
+ "cloudinit.net.ephemeral.EphemeralDHCPv4.__enter__", override_enter
+ )
+ @mock.patch(
+ "cloudinit.net.ephemeral.EphemeralDHCPv4.__exit__", override_exit
+ )
@mock.patch("cloudinit.sources.helpers.vultr.check_route")
@mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
@mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
@@ -377,10 +383,15 @@ class TestDataSourceVultr(CiTestCase):
# Test route checking sucessful DHCPs
@mock.patch("cloudinit.sources.helpers.vultr.check_route", check_route)
@mock.patch(
- "cloudinit.net.dhcp.EphemeralDHCPv4.__init__", ephemeral_init_always
+ "cloudinit.net.ephemeral.EphemeralDHCPv4.__init__",
+ ephemeral_init_always,
+ )
+ @mock.patch(
+ "cloudinit.net.ephemeral.EphemeralDHCPv4.__enter__", override_enter
+ )
+ @mock.patch(
+ "cloudinit.net.ephemeral.EphemeralDHCPv4.__exit__", override_exit
)
- @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__enter__", override_enter)
- @mock.patch("cloudinit.net.dhcp.EphemeralDHCPv4.__exit__", override_exit)
@mock.patch("cloudinit.sources.helpers.vultr.get_interface_list")
@mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
@mock.patch("cloudinit.sources.helpers.vultr.read_metadata")
diff --git a/tests/unittests/test_apport.py b/tests/unittests/test_apport.py
new file mode 100644
index 00000000..a2c866b9
--- /dev/null
+++ b/tests/unittests/test_apport.py
@@ -0,0 +1,23 @@
+from tests.unittests.helpers import mock
+
+M_PATH = "cloudinit.apport."
+
+
+class TestApport:
+ def test_attach_user_data(self, mocker, tmpdir):
+ m_hookutils = mock.Mock()
+ mocker.patch.dict("sys.modules", {"apport.hookutils": m_hookutils})
+ user_data_file = tmpdir.join("instance", "user-data.txt")
+ mocker.patch(
+ M_PATH + "_get_user_data_file", return_value=user_data_file
+ )
+
+ from cloudinit import apport
+
+ ui = mock.Mock()
+ ui.yesno.return_value = True
+ report = object()
+ apport.attach_user_data(report, ui)
+ assert [
+ mock.call(report, user_data_file, "user_data.txt")
+ ] == m_hookutils.attach_file.call_args_list
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 7846d0d3..04f5f457 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -5,22 +5,27 @@ import io
import os
from collections import namedtuple
+import pytest
+
+from cloudinit import helpers
from cloudinit.cmd import main as cli
from cloudinit.util import load_file, load_json
from tests.unittests import helpers as test_helpers
mock = test_helpers.mock
+M_PATH = "cloudinit.cmd.main."
-class TestCLI(test_helpers.FilesystemMockingTestCase):
- with_logs = True
+@pytest.fixture(autouse=False)
+def mock_get_user_data_file(mocker, tmpdir):
+ yield mocker.patch(
+ "cloudinit.cmd.devel.logs._get_user_data_file",
+ return_value=tmpdir.join("cloud"),
+ )
- def setUp(self):
- super(TestCLI, self).setUp()
- self.stderr = io.StringIO()
- self.patchStdoutAndStderr(stderr=self.stderr)
+class TestCLI:
def _call_main(self, sysv_args=None):
if not sysv_args:
sysv_args = ["cloud-init"]
@@ -29,57 +34,48 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
except SystemExit as e:
return e.code
- def test_status_wrapper_errors_on_invalid_name(self):
- """status_wrapper will error when the name parameter is not valid.
-
- Valid name values are only init and modules.
- """
- tmpd = self.tmp_dir()
- data_d = self.tmp_path("data", tmpd)
- link_d = self.tmp_path("link", tmpd)
+ @pytest.mark.parametrize(
+ "action,name,match",
+ [
+ pytest.param(
+ "doesnotmatter",
+ "init1",
+ "^unknown name: init1$",
+ id="invalid_name",
+ ),
+ pytest.param(
+ "modules_name",
+ "modules",
+ "^Invalid cloud init mode specified 'modules-bogusmode'$",
+ id="invalid_modes",
+ ),
+ ],
+ )
+ def test_status_wrapper_errors(self, action, name, match, caplog, tmpdir):
+ data_d = tmpdir.join("data")
+ link_d = tmpdir.join("link")
FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
def myaction():
raise Exception("Should not call myaction")
- myargs = FakeArgs(("doesnotmatter", myaction), False, "bogusmode")
- with self.assertRaises(ValueError) as cm:
- cli.status_wrapper("init1", myargs, data_d, link_d)
- self.assertEqual("unknown name: init1", str(cm.exception))
- self.assertNotIn("Should not call myaction", self.logs.getvalue())
-
- def test_status_wrapper_errors_on_invalid_modes(self):
- """status_wrapper will error if a parameter combination is invalid."""
- tmpd = self.tmp_dir()
- data_d = self.tmp_path("data", tmpd)
- link_d = self.tmp_path("link", tmpd)
- FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
-
- def myaction():
- raise Exception("Should not call myaction")
+ myargs = FakeArgs((action, myaction), False, "bogusmode")
+ with pytest.raises(ValueError, match=match):
+ cli.status_wrapper(name, myargs, data_d, link_d)
+ assert "Should not call myaction" not in caplog.text
- myargs = FakeArgs(("modules_name", myaction), False, "bogusmode")
- with self.assertRaises(ValueError) as cm:
- cli.status_wrapper("modules", myargs, data_d, link_d)
- self.assertEqual(
- "Invalid cloud init mode specified 'modules-bogusmode'",
- str(cm.exception),
- )
- self.assertNotIn("Should not call myaction", self.logs.getvalue())
-
- def test_status_wrapper_init_local_writes_fresh_status_info(self):
+ def test_status_wrapper_init_local_writes_fresh_status_info(self, tmpdir):
"""When running in init-local mode, status_wrapper writes status.json.
Old status and results artifacts are also removed.
"""
- tmpd = self.tmp_dir()
- data_d = self.tmp_path("data", tmpd)
- link_d = self.tmp_path("link", tmpd)
- status_link = self.tmp_path("status.json", link_d)
+ data_d = tmpdir.join("data")
+ link_d = tmpdir.join("link")
+ status_link = link_d.join("status.json")
# Write old artifacts which will be removed or updated.
for _dir in data_d, link_d:
test_helpers.populate_dir(
- _dir, {"status.json": "old", "result.json": "old"}
+ str(_dir), {"status.json": "old", "result.json": "old"}
)
FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
@@ -92,39 +88,63 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
cli.status_wrapper("init", myargs, data_d, link_d)
# No errors reported in status
status_v1 = load_json(load_file(status_link))["v1"]
- self.assertEqual(["an error"], status_v1["init-local"]["errors"])
- self.assertEqual("SomeDatasource", status_v1["datasource"])
- self.assertFalse(
- os.path.exists(self.tmp_path("result.json", data_d)),
- "unexpected result.json found",
- )
- self.assertFalse(
- os.path.exists(self.tmp_path("result.json", link_d)),
- "unexpected result.json link found",
- )
+ assert ["an error"] == status_v1["init-local"]["errors"]
+ assert "SomeDatasource" == status_v1["datasource"]
+ assert False is os.path.exists(
+ data_d.join("result.json")
+ ), "unexpected result.json found"
+ assert False is os.path.exists(
+ link_d.join("result.json")
+ ), "unexpected result.json link found"
+
+ def test_status_wrapper_init_local_honor_cloud_dir(self, mocker, tmpdir):
+ """When running in init-local mode, status_wrapper honors cloud_dir."""
+ cloud_dir = tmpdir.join("cloud")
+ paths = helpers.Paths({"cloud_dir": str(cloud_dir)})
+ mocker.patch(M_PATH + "read_cfg_paths", return_value=paths)
+ data_d = cloud_dir.join("data")
+ link_d = tmpdir.join("link")
+
+ FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
+
+ def myaction(name, args):
+ # Return an error to watch status capture them
+ return "SomeDatasource", ["an_error"]
- def test_no_arguments_shows_usage(self):
+ myargs = FakeArgs(("ignored_name", myaction), True, "bogusmode")
+ cli.status_wrapper("init", myargs, link_d=link_d) # No explicit data_d
+ # Access cloud_dir directly
+ status_v1 = load_json(load_file(data_d.join("status.json")))["v1"]
+ assert ["an_error"] == status_v1["init-local"]["errors"]
+ assert "SomeDatasource" == status_v1["datasource"]
+ assert False is os.path.exists(
+ data_d.join("result.json")
+ ), "unexpected result.json found"
+ assert False is os.path.exists(
+ link_d.join("result.json")
+ ), "unexpected result.json link found"
+
+ def test_no_arguments_shows_usage(self, capsys):
exit_code = self._call_main()
- self.assertIn("usage: cloud-init", self.stderr.getvalue())
- self.assertEqual(2, exit_code)
+ _out, err = capsys.readouterr()
+ assert "usage: cloud-init" in err
+ assert 2 == exit_code
- def test_no_arguments_shows_error_message(self):
+ def test_no_arguments_shows_error_message(self, capsys):
exit_code = self._call_main()
- missing_subcommand_message = [
- "too few arguments", # python2.7 msg
- "the following arguments are required: subcommand", # python3 msg
- ]
- error = self.stderr.getvalue()
- matches = [msg in error for msg in missing_subcommand_message]
- self.assertTrue(
- any(matches), "Did not find error message for missing subcommand"
+ missing_subcommand_message = (
+ "the following arguments are required: subcommand"
)
- self.assertEqual(2, exit_code)
+ _out, err = capsys.readouterr()
+ assert (
+ missing_subcommand_message in err
+ ), "Did not find error message for missing subcommand"
+ assert 2 == exit_code
- def test_all_subcommands_represented_in_help(self):
+ def test_all_subcommands_represented_in_help(self, capsys):
"""All known subparsers are represented in the cloud-int help doc."""
self._call_main()
- error = self.stderr.getvalue()
+ _out, err = capsys.readouterr()
expected_subcommands = [
"analyze",
"clean",
@@ -137,241 +157,188 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
"schema",
]
for subcommand in expected_subcommands:
- self.assertIn(subcommand, error)
-
- @mock.patch("cloudinit.cmd.main.status_wrapper")
- def test_init_subcommand_parser(self, m_status_wrapper):
- """The subcommand 'init' calls status_wrapper passing init."""
- self._call_main(["cloud-init", "init"])
- (name, parseargs) = m_status_wrapper.call_args_list[0][0]
- self.assertEqual("init", name)
- self.assertEqual("init", parseargs.subcommand)
- self.assertEqual("init", parseargs.action[0])
- self.assertEqual("main_init", parseargs.action[1].__name__)
+ assert subcommand in err
+ @pytest.mark.parametrize("subcommand", ["init", "modules"])
@mock.patch("cloudinit.cmd.main.status_wrapper")
- def test_modules_subcommand_parser(self, m_status_wrapper):
- """The subcommand 'modules' calls status_wrapper passing modules."""
- self._call_main(["cloud-init", "modules"])
+ def test_modules_subcommand_parser(self, m_status_wrapper, subcommand):
+ """The subcommand 'subcommand' calls status_wrapper passing modules."""
+ self._call_main(["cloud-init", subcommand])
(name, parseargs) = m_status_wrapper.call_args_list[0][0]
- self.assertEqual("modules", name)
- self.assertEqual("modules", parseargs.subcommand)
- self.assertEqual("modules", parseargs.action[0])
- self.assertEqual("main_modules", parseargs.action[1].__name__)
-
- def test_conditional_subcommands_from_entry_point_sys_argv(self):
- """Subcommands from entry-point are properly parsed from sys.argv."""
- stdout = io.StringIO()
- self.patchStdoutAndStderr(stdout=stdout)
-
- expected_errors = [
- "usage: cloud-init analyze",
- "usage: cloud-init clean",
- "usage: cloud-init collect-logs",
- "usage: cloud-init devel",
- "usage: cloud-init status",
- "usage: cloud-init schema",
- ]
- conditional_subcommands = [
+ assert subcommand == name
+ assert subcommand == parseargs.subcommand
+ assert subcommand == parseargs.action[0]
+ assert f"main_{subcommand}" == parseargs.action[1].__name__
+
+ @pytest.mark.parametrize(
+ "subcommand",
+ [
"analyze",
"clean",
"collect-logs",
"devel",
"status",
"schema",
- ]
+ ],
+ )
+ def test_conditional_subcommands_from_entry_point_sys_argv(
+ self, subcommand, capsys, mock_get_user_data_file, tmpdir
+ ):
+ """Subcommands from entry-point are properly parsed from sys.argv."""
+ expected_error = f"usage: cloud-init {subcommand}"
# The cloud-init entrypoint calls main without passing sys_argv
- for subcommand in conditional_subcommands:
- with mock.patch("sys.argv", ["cloud-init", subcommand, "-h"]):
- try:
- cli.main()
- except SystemExit as e:
- self.assertEqual(0, e.code) # exit 2 on proper -h usage
- for error_message in expected_errors:
- self.assertIn(error_message, stdout.getvalue())
-
- def test_analyze_subcommand_parser(self):
- """The subcommand cloud-init analyze calls the correct subparser."""
- self._call_main(["cloud-init", "analyze"])
- # These subcommands only valid for cloud-init analyze script
- expected_subcommands = ["blame", "show", "dump"]
- error = self.stderr.getvalue()
- for subcommand in expected_subcommands:
- self.assertIn(subcommand, error)
-
- def test_collect_logs_subcommand_parser(self):
- """The subcommand cloud-init collect-logs calls the subparser."""
- # Provide -h param to collect-logs to avoid having to mock behavior.
- stdout = io.StringIO()
- self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(["cloud-init", "collect-logs", "-h"])
- self.assertIn("usage: cloud-init collect-log", stdout.getvalue())
-
- def test_clean_subcommand_parser(self):
- """The subcommand cloud-init clean calls the subparser."""
- # Provide -h param to clean to avoid having to mock behavior.
- stdout = io.StringIO()
- self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(["cloud-init", "clean", "-h"])
- self.assertIn("usage: cloud-init clean", stdout.getvalue())
-
- def test_status_subcommand_parser(self):
- """The subcommand cloud-init status calls the subparser."""
- # Provide -h param to clean to avoid having to mock behavior.
- stdout = io.StringIO()
- self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(["cloud-init", "status", "-h"])
- self.assertIn("usage: cloud-init status", stdout.getvalue())
-
- def test_subcommand_parser(self):
+ with mock.patch("sys.argv", ["cloud-init", subcommand, "-h"]):
+ try:
+ cli.main()
+ except SystemExit as e:
+ assert 0 == e.code # exit 2 on proper -h usage
+ out, _err = capsys.readouterr()
+ assert expected_error in out
+
+ @pytest.mark.parametrize(
+ "subcommand",
+ [
+ "clean",
+ "collect-logs",
+ "status",
+ ],
+ )
+ def test_subcommand_parser(self, subcommand, mock_get_user_data_file):
+ """cloud-init `subcommand` calls its subparser."""
+ # Provide -h param to `subcommand` to avoid having to mock behavior.
+ out = io.StringIO()
+ with contextlib.redirect_stdout(out):
+ self._call_main(["cloud-init", subcommand, "-h"])
+ assert f"usage: cloud-init {subcommand}" in out.getvalue()
+
+ @pytest.mark.parametrize(
+ "args,expected_subcommands",
+ [
+ ([], ["schema"]),
+ (["analyze"], ["blame", "show", "dump"]),
+ ],
+ )
+ def test_subcommand_parser_multi_arg(
+ self, args, expected_subcommands, capsys
+ ):
"""The subcommand cloud-init schema calls the correct subparser."""
- self._call_main(["cloud-init"])
- # These subcommands only valid for cloud-init schema script
- expected_subcommands = ["schema"]
- error = self.stderr.getvalue()
+ self._call_main(["cloud-init"] + args)
+ _out, err = capsys.readouterr()
for subcommand in expected_subcommands:
- self.assertIn(subcommand, error)
+ assert subcommand in err
- def test_wb_schema_subcommand_parser(self):
+ def test_wb_schema_subcommand_parser(self, capsys):
"""The subcommand cloud-init schema calls the correct subparser."""
exit_code = self._call_main(["cloud-init", "schema"])
- self.assertEqual(1, exit_code)
+ _out, err = capsys.readouterr()
+ assert 1 == exit_code
# Known whitebox output from schema subcommand
- self.assertEqual(
+ assert (
"Error:\n"
- "Expected one of --config-file, --system or --docs arguments\n",
- self.stderr.getvalue(),
+ "Expected one of --config-file, --system or --docs arguments\n"
+ == err
)
- def test_wb_schema_subcommand_doc_all_spot_check(self):
- """Validate that doc content has correct values from known examples.
-
- Ensure that schema doc is returned
- """
-
- # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
- # but inspecting the code for debugging is not ideal
- # contextlib.redirect_stdout() provides similar behavior as a context
- # manager
- stdout = io.StringIO()
- with contextlib.redirect_stdout(stdout):
- self._call_main(["cloud-init", "schema", "--docs", "all"])
- expected_doc_sections = [
- "**Supported distros:** all",
- "**Supported distros:** almalinux, alpine, centos, "
- "cloudlinux, debian, eurolinux, fedora, miraclelinux, "
- "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, "
- "virtuozzo",
- "**Config schema**:\n **resize_rootfs:** "
- "(``true``/``false``/``noblock``)",
- "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
- ]
- stdout = stdout.getvalue()
- for expected in expected_doc_sections:
- self.assertIn(expected, stdout)
-
- def test_wb_schema_subcommand_single_spot_check(self):
- """Validate that doc content has correct values from known example.
-
- Validate 'all' arg
- """
-
- # Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
- # but inspecting the code for debugging is not ideal
- # contextlib.redirect_stdout() provides similar behavior as a context
- # manager
- stdout = io.StringIO()
- with contextlib.redirect_stdout(stdout):
- self._call_main(["cloud-init", "schema", "--docs", "cc_runcmd"])
- expected_doc_sections = [
- "Runcmd\n------\n**Summary:** Run arbitrary commands"
- ]
- stdout = stdout.getvalue()
- for expected in expected_doc_sections:
- self.assertIn(expected, stdout)
-
- def test_wb_schema_subcommand_multiple_spot_check(self):
- """Validate that doc content has correct values from known example.
-
- Validate single arg
- """
-
- stdout = io.StringIO()
- with contextlib.redirect_stdout(stdout):
- self._call_main(
+ @pytest.mark.parametrize(
+ "args,expected_doc_sections,is_error",
+ [
+ pytest.param(
+ ["all"],
+ [
+ "**Supported distros:** all",
+ "**Supported distros:** almalinux, alpine, centos, "
+ "cloudlinux, debian, eurolinux, fedora, miraclelinux, "
+ "openEuler, openmandriva, opensuse, photon, rhel, rocky, "
+ "sles, ubuntu, virtuozzo",
+ "**Config schema**:\n **resize_rootfs:** "
+ "(``true``/``false``/``noblock``)",
+ "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n",
+ ],
+ False,
+ id="all_spot_check",
+ ),
+ pytest.param(
+ ["cc_runcmd"],
+ ["Runcmd\n------\n**Summary:** Run arbitrary commands"],
+ False,
+ id="single_spot_check",
+ ),
+ pytest.param(
[
- "cloud-init",
- "schema",
- "--docs",
"cc_runcmd",
"cc_resizefs",
- ]
- )
- expected_doc_sections = [
- "Runcmd\n------\n**Summary:** Run arbitrary commands",
- "Resizefs\n--------\n**Summary:** Resize filesystem",
- ]
- stdout = stdout.getvalue()
- for expected in expected_doc_sections:
- self.assertIn(expected, stdout)
-
- def test_wb_schema_subcommand_bad_arg_fails(self):
- """Validate that doc content has correct values from known example.
-
- Validate multiple args
- """
+ ],
+ [
+ "Runcmd\n------\n**Summary:** Run arbitrary commands",
+ "Resizefs\n--------\n**Summary:** Resize filesystem",
+ ],
+ False,
+ id="multiple_spot_check",
+ ),
+ pytest.param(
+ ["garbage_value"],
+ ["Invalid --docs value"],
+ True,
+ id="bad_arg_fails",
+ ),
+ ],
+ )
+ def test_wb_schema_subcommand(self, args, expected_doc_sections, is_error):
+ """Validate that doc content has correct values."""
# Note: patchStdoutAndStderr() is convenient for reducing boilerplate,
# but inspecting the code for debugging is not ideal
# contextlib.redirect_stdout() provides similar behavior as a context
# manager
- stderr = io.StringIO()
- with contextlib.redirect_stderr(stderr):
- self._call_main(
- ["cloud-init", "schema", "--docs", "garbage_value"]
- )
- expected_doc_sections = ["Invalid --docs value"]
- stderr = stderr.getvalue()
+ out_or_err = io.StringIO()
+ redirecter = (
+ contextlib.redirect_stderr
+ if is_error
+ else contextlib.redirect_stdout
+ )
+ with redirecter(out_or_err):
+ self._call_main(["cloud-init", "schema", "--docs"] + args)
+ out_or_err = out_or_err.getvalue()
for expected in expected_doc_sections:
- self.assertIn(expected, stderr)
+ assert expected in out_or_err
@mock.patch("cloudinit.cmd.main.main_single")
def test_single_subcommand(self, m_main_single):
"""The subcommand 'single' calls main_single with valid args."""
self._call_main(["cloud-init", "single", "--name", "cc_ntp"])
(name, parseargs) = m_main_single.call_args_list[0][0]
- self.assertEqual("single", name)
- self.assertEqual("single", parseargs.subcommand)
- self.assertEqual("single", parseargs.action[0])
- self.assertFalse(parseargs.debug)
- self.assertFalse(parseargs.force)
- self.assertIsNone(parseargs.frequency)
- self.assertEqual("cc_ntp", parseargs.name)
- self.assertFalse(parseargs.report)
+ assert "single" == name
+ assert "single" == parseargs.subcommand
+ assert "single" == parseargs.action[0]
+ assert False is parseargs.debug
+ assert False is parseargs.force
+ assert None is parseargs.frequency
+ assert "cc_ntp" == parseargs.name
+ assert False is parseargs.report
@mock.patch("cloudinit.cmd.main.dhclient_hook.handle_args")
def test_dhclient_hook_subcommand(self, m_handle_args):
"""The subcommand 'dhclient-hook' calls dhclient_hook with args."""
self._call_main(["cloud-init", "dhclient-hook", "up", "eth0"])
(name, parseargs) = m_handle_args.call_args_list[0][0]
- self.assertEqual("dhclient-hook", name)
- self.assertEqual("dhclient-hook", parseargs.subcommand)
- self.assertEqual("dhclient-hook", parseargs.action[0])
- self.assertFalse(parseargs.debug)
- self.assertFalse(parseargs.force)
- self.assertEqual("up", parseargs.event)
- self.assertEqual("eth0", parseargs.interface)
+ assert "dhclient-hook" == name
+ assert "dhclient-hook" == parseargs.subcommand
+ assert "dhclient-hook" == parseargs.action[0]
+ assert False is parseargs.debug
+ assert False is parseargs.force
+ assert "up" == parseargs.event
+ assert "eth0" == parseargs.interface
@mock.patch("cloudinit.cmd.main.main_features")
def test_features_hook_subcommand(self, m_features):
"""The subcommand 'features' calls main_features with args."""
self._call_main(["cloud-init", "features"])
(name, parseargs) = m_features.call_args_list[0][0]
- self.assertEqual("features", name)
- self.assertEqual("features", parseargs.subcommand)
- self.assertEqual("features", parseargs.action[0])
- self.assertFalse(parseargs.debug)
- self.assertFalse(parseargs.force)
+ assert "features" == name
+ assert "features" == parseargs.subcommand
+ assert "features" == parseargs.action[0]
+ assert False is parseargs.debug
+ assert False is parseargs.force
# : ts=4 expandtab
diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py
index 6c28724a..91d424c1 100644
--- a/tests/unittests/test_dmi.py
+++ b/tests/unittests/test_dmi.py
@@ -68,7 +68,9 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
)
def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
- self.patch_mapping({"mapped-key": dmi.kdmi("mapped-value", None)})
+ self.patch_mapping(
+ {"mapped-key": dmi.KernelNames("mapped-value", None)}
+ )
expected_dmi_value = "sys-used-correctly"
self._create_sysfs_file("mapped-value", expected_dmi_value)
self._configure_dmidecode_return("mapped-key", "wrong-wrong-wrong")
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index ecf33070..bfc13734 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -443,7 +443,7 @@ network:
macaddress: 68:05:ca:64:d3:6c
mtu: 9000
parameters:
- gratuitious-arp: 1
+ gratuitous-arp: 1
bond1:
interfaces:
- ens4
@@ -2987,7 +2987,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
parameters:
down-delay: 10
fail-over-mac-policy: active
- gratuitious-arp: 5
+ gratuitous-arp: 5
mii-monitor-interval: 100
mode: active-backup
primary: bond0s0
@@ -3095,7 +3095,7 @@ iface bond0 inet6 static
parameters:
down-delay: 10
fail-over-mac-policy: active
- gratuitious-arp: 5
+ gratuitous-arp: 5
mii-monitor-interval: 100
mode: active-backup
primary: bond0s0
@@ -3128,7 +3128,7 @@ iface bond0 inet6 static
parameters:
down-delay: 10
fail-over-mac-policy: active
- gratuitious-arp: 5
+ gratuitous-arp: 5
mii-monitor-interval: 100
mode: active-backup
primary: bond0s0
@@ -6782,7 +6782,7 @@ class TestNetplanRoundTrip(CiTestCase):
entry = {
"yaml": NETPLAN_BOND_GRAT_ARP,
"expected_netplan": NETPLAN_BOND_GRAT_ARP.replace(
- "gratuitous", "gratuitious"
+ "gratuitious", "gratuitous"
),
}
network_config = yaml.load(entry["yaml"]).get("network")
@@ -7533,7 +7533,7 @@ class TestGetInterfaces(CiTestCase):
"tun0": None,
},
}
- data = {}
+ data: dict = {}
def _se_get_devicelist(self):
return list(self.data["devices"])
@@ -7707,7 +7707,7 @@ class TestGetInterfacesByMac(CiTestCase):
"tun0": None,
},
}
- data = {}
+ data: dict = {}
def _se_get_devicelist(self):
return list(self.data["devices"])
@@ -7917,7 +7917,7 @@ class TestGetIBHwaddrsByInterface(CiTestCase):
},
"ib_hwaddr": {"ib0": {True: _ib_addr_eth_format, False: _ib_addr}},
}
- data = {}
+ data: dict = {}
def _mock_setup(self):
self.data = copy.deepcopy(self._data)
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index 9eec74c9..7494b438 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -35,7 +35,7 @@ ethernets:
dhcp4: true
"""
-NETPLAN_CALL_LIST = [
+NETPLAN_CALL_LIST: list = [
((["netplan", "apply"],), {}),
]
@@ -156,12 +156,12 @@ class TestActivatorsAvailable:
assert available_mocks.m_which.call_args_list == available_calls
-IF_UP_DOWN_BRING_UP_CALL_LIST = [
+IF_UP_DOWN_BRING_UP_CALL_LIST: list = [
((["ifup", "eth0"],), {}),
((["ifup", "eth1"],), {}),
]
-NETWORK_MANAGER_BRING_UP_CALL_LIST = [
+NETWORK_MANAGER_BRING_UP_CALL_LIST: list = [
(
(
[
@@ -230,7 +230,7 @@ NETWORK_MANAGER_BRING_UP_CALL_LIST = [
),
]
-NETWORKD_BRING_UP_CALL_LIST = [
+NETWORKD_BRING_UP_CALL_LIST: list = [
((["ip", "link", "set", "up", "eth0"],), {}),
((["ip", "link", "set", "up", "eth1"],), {}),
((["systemctl", "restart", "systemd-networkd", "systemd-resolved"],), {}),
@@ -286,17 +286,17 @@ class TestActivatorsBringUp:
assert call in expected_call_list
-IF_UP_DOWN_BRING_DOWN_CALL_LIST = [
+IF_UP_DOWN_BRING_DOWN_CALL_LIST: list = [
((["ifdown", "eth0"],), {}),
((["ifdown", "eth1"],), {}),
]
-NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
+NETWORK_MANAGER_BRING_DOWN_CALL_LIST: list = [
((["nmcli", "device", "disconnect", "eth0"],), {}),
((["nmcli", "device", "disconnect", "eth1"],), {}),
]
-NETWORKD_BRING_DOWN_CALL_LIST = [
+NETWORKD_BRING_DOWN_CALL_LIST: list = [
((["ip", "link", "set", "down", "eth0"],), {}),
((["ip", "link", "set", "down", "eth1"],), {}),
]
diff --git a/tests/unittests/test_persistence.py b/tests/unittests/test_persistence.py
index ec1152a9..8cc0d25a 100644
--- a/tests/unittests/test_persistence.py
+++ b/tests/unittests/test_persistence.py
@@ -25,6 +25,7 @@ simple metaclass, ``_Collector``, to gather them up.
"""
import pickle
+from typing import List, Type
from unittest import mock
import pytest
@@ -35,7 +36,7 @@ from cloudinit.persistence import CloudInitPickleMixin
class _Collector(type):
"""Any class using this as a metaclass will be stored in test_classes."""
- test_classes = []
+ test_classes: List[Type] = []
def __new__(cls, *args):
new_cls = super().__new__(cls, *args)
diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py
index a9b9a85f..f756a838 100644
--- a/tests/unittests/test_url_helper.py
+++ b/tests/unittests/test_url_helper.py
@@ -282,7 +282,7 @@ class TestDualStack:
"""
@pytest.mark.parametrize(
- "func," "addresses," "stagger_delay," "timeout," "expected_val,",
+ ["func", "addresses", "stagger_delay", "timeout", "expected_val"],
[
# Assert order based on timeout
(lambda x, _: x, ("one", "two"), 1, 1, "one"),
@@ -346,12 +346,14 @@ class TestDualStack:
event.set()
@pytest.mark.parametrize(
- "func,"
- "addresses,"
- "stagger_delay,"
- "timeout,"
- "message,"
- "expected_exc",
+ [
+ "func",
+ "addresses",
+ "stagger_delay",
+ "timeout",
+ "message",
+ "expected_exc",
+ ],
[
(
lambda _a, _b: 1 / 0,
@@ -370,7 +372,7 @@ class TestDualStack:
ZeroDivisionError,
),
(
- lambda _a, _b: [][0],
+ lambda _a, _b: [][0], # pylint: disable=E0643
("matter", "these"),
0,
1,
@@ -479,7 +481,7 @@ class TestUrlHelper:
return (200, {"request-id": "0"}, cls.success)
@pytest.mark.parametrize(
- "addresses," "expected_address_index," "response,",
+ ["addresses", "expected_address_index", "response"],
[
# Use timeout to test ordering happens as expected
((ADDR1, SLEEP1), 0, "SUCCESS"),
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index bcb63787..b8e16e31 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -15,13 +15,14 @@ import stat
import tempfile
from collections import deque
from textwrap import dedent
-from typing import Tuple
from unittest import mock
import pytest
import yaml
from cloudinit import importer, subp, util
+from cloudinit.helpers import Paths
+from cloudinit.sources import DataSourceHostname
from cloudinit.subp import SubpResult
from tests.unittests import helpers
from tests.unittests.helpers import CiTestCase
@@ -321,6 +322,25 @@ OS_RELEASE_PHOTON = """\
BUG_REPORT_URL="https://github.com/vmware/photon/issues"
"""
+OS_RELEASE_OPENMANDRIVA = dedent(
+ """\
+ NAME="OpenMandriva Lx"\n
+ VERSION="4.90 (Nickel) Cooker"\n
+ ID="openmandriva"\n
+ VERSION_ID="4.90"\n
+ PRETTY_NAME="OpenMandriva Lx 4.90 (Nickel) Cooker"\n
+ BUILD_ID="20220606.19"\n
+ VERSION_CODENAME="nickel"\n
+ ANSI_COLOR="1;43"\n
+ LOGO="openmandriva"\n
+ CPE_NAME="cpe:/o:openmandriva:openmandriva_lx:4.90"\n
+ HOME_URL="http://openmandriva.org/"\n
+ BUG_REPORT_URL="http://issues.openmandriva.org/"\n
+ SUPPORT_URL="https://forum.openmandriva.org"\n
+ PRIVACY_POLICY_URL="https://www.openmandriva.org/tos"\n
+"""
+)
+
class FakeCloud(object):
def __init__(self, hostname, fqdn):
@@ -336,8 +356,8 @@ class FakeCloud(object):
myargs["metadata_only"] = metadata_only
self.calls.append(myargs)
if fqdn:
- return self.fqdn
- return self.hostname
+ return DataSourceHostname(self.fqdn, False)
+ return DataSourceHostname(self.hostname, False)
class TestUtil:
@@ -443,6 +463,23 @@ class TestUtil:
assert [mock.call(confd_fn)] == m_read_confd.call_args_list
assert [expected_call] == m_mergemanydict.call_args_list
+ @pytest.mark.parametrize("custom_cloud_dir", [True, False])
+ @mock.patch(M_PATH + "os.path.isfile", return_value=True)
+ @mock.patch(M_PATH + "os.path.isdir", return_value=True)
+ def test_fetch_ssl_details(
+ self, m_isdir, m_isfile, custom_cloud_dir, tmpdir
+ ):
+ cloud_dir = "/var/lib/cloud"
+ if custom_cloud_dir:
+ cloud_dir = tmpdir.join("cloud")
+ cert = os.path.join(cloud_dir, "instance", "data", "ssl", "cert.pem")
+ key = os.path.join(cloud_dir, "instance", "data", "ssl", "key.pem")
+
+ paths = Paths({"cloud_dir": cloud_dir})
+ ssl_details = util.fetch_ssl_details(paths)
+ assert {"cert_file": cert, "key_file": key} == ssl_details
+ assert 2 == m_isdir.call_count == m_isfile.call_count
+
class TestSymlink(CiTestCase):
def test_sym_link_simple(self):
@@ -552,7 +589,7 @@ class TestShellify(CiTestCase):
class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_only_cfg_fqdn(self):
"""When cfg only has the fqdn key, derive hostname and fqdn from it."""
- hostname, fqdn = util.get_hostname_fqdn(
+ hostname, fqdn, _ = util.get_hostname_fqdn(
cfg={"fqdn": "myhost.domain.com"}, cloud=None
)
self.assertEqual("myhost", hostname)
@@ -560,7 +597,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self):
"""When cfg has both fqdn and hostname keys, return them."""
- hostname, fqdn = util.get_hostname_fqdn(
+ hostname, fqdn, _ = util.get_hostname_fqdn(
cfg={"fqdn": "myhost.domain.com", "hostname": "other"}, cloud=None
)
self.assertEqual("other", hostname)
@@ -568,7 +605,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self):
"""When cfg has only hostname key which represents a fqdn, use that."""
- hostname, fqdn = util.get_hostname_fqdn(
+ hostname, fqdn, _ = util.get_hostname_fqdn(
cfg={"hostname": "myhost.domain.com"}, cloud=None
)
self.assertEqual("myhost", hostname)
@@ -577,7 +614,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self):
"""When cfg has a hostname without a '.' query cloud.get_hostname."""
mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
- hostname, fqdn = util.get_hostname_fqdn(
+ hostname, fqdn, _ = util.get_hostname_fqdn(
cfg={"hostname": "myhost"}, cloud=mycloud
)
self.assertEqual("myhost", hostname)
@@ -589,7 +626,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self):
"""When cfg has neither hostname nor fqdn cloud.get_hostname."""
mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
- hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud)
+ hostname, fqdn, _ = util.get_hostname_fqdn(cfg={}, cloud=mycloud)
self.assertEqual("cloudhost", hostname)
self.assertEqual("cloudhost.mycloud.com", fqdn)
self.assertEqual(
@@ -600,7 +637,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
"""Calls to cloud.get_hostname pass the metadata_only parameter."""
mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
- _hn, _fqdn = util.get_hostname_fqdn(
+ _hn, _fqdn, _def_hostname = util.get_hostname_fqdn(
cfg={}, cloud=mycloud, metadata_only=True
)
self.assertEqual(
@@ -754,9 +791,7 @@ class TestUdevadmSettle(CiTestCase):
@mock.patch("os.path.exists")
class TestGetLinuxDistro(CiTestCase):
def setUp(self):
- # python2 has no lru_cache, and therefore, no cache_clear()
- if hasattr(util.get_linux_distro, "cache_clear"):
- util.get_linux_distro.cache_clear()
+ util.get_linux_distro.cache_clear()
@classmethod
def os_release_exists(self, path):
@@ -1027,6 +1062,14 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(("photon", "4.0", "VMware Photon OS/Linux"), dist)
+ @mock.patch(M_PATH + "load_file")
+ def test_get_linux_openmandriva(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on OpenMandriva"""
+ m_os_release.return_value = OS_RELEASE_OPENMANDRIVA
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(("openmandriva", "4.90", "nickel"), dist)
+
@mock.patch("platform.system")
@mock.patch("platform.dist", create=True)
def test_get_linux_distro_no_data(
@@ -1141,21 +1184,11 @@ class TestIsLXD(CiTestCase):
class TestReadCcFromCmdline:
-
- random_string: Tuple
-
- if hasattr(pytest, "param"):
- random_string = pytest.param(
- CiTestCase.random_string(), None, id="random_string"
- )
- else:
- random_string = (CiTestCase.random_string(), None)
-
@pytest.mark.parametrize(
"cmdline,expected_cfg",
[
# Return None if cmdline has no cc:<YAML>end_cc content.
- random_string,
+ pytest.param(CiTestCase.random_string(), None, id="random_string"),
# Return None if YAML content is empty string.
("foo cc: end_cc bar", None),
# Return expected dictionary without trailing end_cc marker.
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
index f57a3d25..3f0fe400 100644
--- a/tests/unittests/util.py
+++ b/tests/unittests/util.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import cloud, distros, helpers
+from cloudinit.sources import DataSourceHostname
from cloudinit.sources.DataSourceNone import DataSourceNone
@@ -37,7 +38,7 @@ def abstract_to_concrete(abclass):
class DataSourceTesting(DataSourceNone):
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return "hostname"
+ return DataSourceHostname("hostname", False)
def persist_instance_data(self):
return True
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index cd7efbd4..16a89f9b 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -12,6 +12,7 @@ antonyc
aswinrajamannar
beantaxi
beezly
+berolinux
bipinbachhao
BirknerAlex
bmhughes
@@ -25,6 +26,7 @@ cjp256
Conan-Kudo
cvstealth
dankenigsberg
+david-caro
ddymko
dermotbradley
dhensby
@@ -54,6 +56,7 @@ kallioli
klausenbusk
KsenijaS
landon912
+linitio
lkundrak
lucasmoura
lucendio
@@ -75,9 +78,11 @@ omBratteng
onitake
Oursin
qubidt
+RedKrieg
renanrodrigo
rhansen
riedel
+rongz609
sarahwzadara
shi2wei3
slingamn
diff --git a/tools/read-version b/tools/read-version
index 02c90643..c5cd153f 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -11,19 +11,11 @@ if "avoid-pep8-E402-import-not-top-of-file":
from cloudinit import version as ci_version
-def tiny_p(cmd, capture=True):
- # python 2.6 doesn't have check_output
- stdout = subprocess.PIPE
+def tiny_p(cmd):
stderr = subprocess.PIPE
- sp = subprocess.Popen(cmd, stdout=stdout,
- stderr=stderr, stdin=None,
- universal_newlines=True)
- (out, err) = sp.communicate()
- ret = sp.returncode
- if ret not in [0]:
- raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" %
- (cmd, ret, out, err))
- return out
+ return subprocess.check_output(
+ cmd, stderr=stderr, stdin=None, universal_newlines=True
+ )
def which(program):
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index 176df36b..eae83217 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -25,6 +25,7 @@ def main():
"netbsd",
"openbsd",
"openEuler",
+ "openmandriva",
"photon",
"rhel",
"suse",
diff --git a/tox.ini b/tox.ini
index e69c6e13..26588585 100644
--- a/tox.ini
+++ b/tox.ini
@@ -2,6 +2,9 @@
envlist = py3, lowest-supported-dev, black, flake8, isort, mypy, pylint
recreate = True
+[doc8]
+ignore-path-errors=doc/rtd/topics/faq.rst;D001
+
[testenv]
basepython = python3
setenv =
@@ -14,7 +17,7 @@ black==22.3.0
flake8==4.0.1
isort==5.10.1
mypy==0.950
-pylint==2.13.8
+pylint==2.13.9
pytest==7.0.1
types-jsonschema==4.4.2
types-oauthlib==3.1.6
@@ -77,6 +80,24 @@ commands =
{[testenv:mypy]commands}
{[testenv:pylint]commands}
+[testenv:check_format_tip]
+deps =
+ black
+ flake8
+ isort
+ mypy
+ pylint
+ pytest
+ types-jsonschema
+ types-oauthlib
+ types-pyyaml
+ types-requests
+ types-setuptools
+ -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/integration-requirements.txt
+commands =
+ {[testenv:check_format]commands}
+
[testenv:do_format]
deps =
black=={[format_deps]black}
@@ -85,6 +106,13 @@ commands =
{envpython} -m isort .
{envpython} -m black .
+[testenv:do_format_tip]
+deps =
+ black
+ isort
+commands =
+ {[testenv:do_format]commands}
+
[testenv:py3]
deps =
-r{toxinidir}/test-requirements.txt
@@ -146,12 +174,24 @@ commands =
{envpython} -m sphinx {posargs:-W doc/rtd doc/rtd_html}
doc8 doc/rtd
+# linkcheck shows false positives and has noisy output.
+# Despite these limitations, it is better than a manual search of the docs.
+# suggested workflow is:
+#
+# tox -e linkcheck | grep broken # takes some time
+#
+# followed by manual verification of the links reported
+[testenv:linkcheck]
+deps =
+ -r{toxinidir}/doc-requirements.txt
+commands =
+ {envpython} -m sphinx {posargs:-b linkcheck doc/rtd doc/rtd_html}
+
[testenv:tip-flake8]
-commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ conftest.py setup.py}
deps = flake8
+commands = {[testenv:flake8]commands}
[testenv:tip-mypy]
-commands = {envpython} -m mypy --install-types --non-interactive cloudinit/ tests/ tools/
deps =
mypy
pytest
@@ -160,15 +200,24 @@ deps =
types-PyYAML
types-requests
types-setuptools
+commands = {[testenv:mypy]commands}
[testenv:tip-pylint]
-commands = {envpython} -m pylint {posargs:cloudinit/ tests/ tools/ conftest.py setup.py}
deps =
# requirements
pylint
# test-requirements
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
+commands = {[testenv:pylint]commands}
+
+[testenv:tip-black]
+deps = black
+commands = {[testenv:black]commands}
+
+[testenv:tip-isort]
+deps = isort
+commands = {[testenv:isort]commands}
[testenv:integration-tests]
commands = {envpython} -m pytest --log-cli-level=INFO -vv {posargs:tests/integration_tests}
@@ -184,7 +233,10 @@ setenv =
PYTEST_ADDOPTS="-m ci and not adhoc"
[testenv:integration-tests-jenkins]
-commands = {[testenv:integration-tests]commands}
+# Pytest's RC=1 means "Tests were collected and run but some of the tests failed".
+# Do not fail in this case, but let Jenkins handle it using the junit report.
+allowlist_externals = sh
+commands = sh -c "{envpython} -m pytest --log-cli-level=INFO -vv {posargs:tests/integration_tests/none} || [ $? -eq 1 ]"
deps = {[testenv:integration-tests]deps}
passenv = *_proxy CLOUD_INIT_* PYCLOUDLIB_* SSH_AUTH_SOCK OS_* GOOGLE_* GCP_*
setenv =
@@ -226,3 +278,4 @@ markers =
ubuntu: this test should run on Ubuntu
unstable: skip this test because it is flakey
adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins)
+ is_iscsi: whether is an instance has iscsi net cfg or not