summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2020-05-27 19:07:02 -0600
committerChad Smith <chad.smith@canonical.com>2020-05-27 19:07:02 -0600
commit568e36d7c6266a8faa8a277c5f78738cbd996180 (patch)
treef9142770b47bb3ba481ae678580a25ee6b4d6b40
parent6bf245d4d482bd1ab5251746bc643e18e2b65d2e (diff)
parent8377897bdd1a88d4dc3b6456618231085c55af42 (diff)
downloadcloud-init-git-568e36d7c6266a8faa8a277c5f78738cbd996180.tar.gz
merge from 8377897b at 20.2-38-g8377897b
-rw-r--r--.github/workflows/cla.yml47
-rw-r--r--.github/workflows/stale.yml24
-rw-r--r--.gitignore3
-rw-r--r--.pylintrc1
-rw-r--r--.readthedocs.yaml8
-rw-r--r--.travis.yml43
-rw-r--r--ChangeLog182
-rw-r--r--HACKING.rst351
-rw-r--r--Makefile58
-rw-r--r--README.md2
-rw-r--r--cloudinit/analyze/dump.py15
-rw-r--r--cloudinit/analyze/tests/test_dump.py17
-rw-r--r--cloudinit/apport.py1
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py2
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py2
-rw-r--r--cloudinit/cmd/devel/tests/test_render.py2
-rw-r--r--cloudinit/cmd/query.py3
-rw-r--r--cloudinit/cmd/tests/test_clean.py3
-rw-r--r--cloudinit/cmd/tests/test_cloud_id.py2
-rw-r--r--cloudinit/cmd/tests/test_main.py4
-rw-r--r--cloudinit/cmd/tests/test_query.py2
-rw-r--r--cloudinit/cmd/tests/test_status.py3
-rw-r--r--cloudinit/config/cc_apt_configure.py573
-rw-r--r--cloudinit/config/cc_chef.py4
-rw-r--r--cloudinit/config/cc_debug.py3
-rw-r--r--cloudinit/config/cc_disk_setup.py3
-rw-r--r--cloudinit/config/cc_landscape.py3
-rw-r--r--cloudinit/config/cc_locale.py65
-rw-r--r--cloudinit/config/cc_mcollective.py10
-rw-r--r--cloudinit/config/cc_mounts.py82
-rw-r--r--cloudinit/config/cc_ntp.py62
-rw-r--r--cloudinit/config/cc_phone_home.py3
-rw-r--r--cloudinit/config/cc_power_state_change.py9
-rw-r--r--cloudinit/config/cc_puppet.py3
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py3
-rw-r--r--cloudinit/config/cc_rsyslog.py7
-rw-r--r--cloudinit/config/cc_seed_random.py3
-rwxr-xr-xcloudinit/config/cc_set_passwords.py4
-rw-r--r--cloudinit/config/cc_snap.py32
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py8
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py4
-rw-r--r--cloudinit/config/cc_write_files.py194
-rw-r--r--cloudinit/config/cc_yum_add_repo.py16
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py3
-rw-r--r--cloudinit/config/schema.py135
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py2
-rw-r--r--cloudinit/config/tests/test_mounts.py28
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py86
-rw-r--r--cloudinit/config/tests/test_set_passwords.py22
-rw-r--r--cloudinit/config/tests/test_snap.py50
-rw-r--r--cloudinit/config/tests/test_ubuntu_drivers.py7
-rw-r--r--cloudinit/config/tests/test_users_groups.py10
-rw-r--r--cloudinit/conftest.py72
-rwxr-xr-xcloudinit/distros/__init__.py131
-rw-r--r--cloudinit/distros/bsd.py126
-rw-r--r--cloudinit/distros/bsd_utils.py50
-rw-r--r--cloudinit/distros/debian.py3
-rw-r--r--cloudinit/distros/freebsd.py142
-rw-r--r--cloudinit/distros/netbsd.py156
-rw-r--r--cloudinit/distros/openbsd.py50
-rw-r--r--cloudinit/distros/opensuse.py4
-rw-r--r--cloudinit/distros/parsers/hostname.py2
-rw-r--r--cloudinit/distros/parsers/hosts.py2
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py2
-rw-r--r--cloudinit/distros/parsers/sys_conf.py6
-rw-r--r--cloudinit/distros/tests/__init__.py0
-rw-r--r--cloudinit/distros/tests/test_init.py135
-rwxr-xr-xcloudinit/distros/ug_util.py22
-rw-r--r--cloudinit/ec2_utils.py12
-rw-r--r--cloudinit/handlers/__init__.py9
-rw-r--r--cloudinit/helpers.py10
-rw-r--r--cloudinit/log.py31
-rw-r--r--cloudinit/mergers/__init__.py4
-rw-r--r--cloudinit/mergers/m_dict.py4
-rw-r--r--cloudinit/mergers/m_list.py4
-rw-r--r--cloudinit/mergers/m_str.py9
-rw-r--r--cloudinit/net/__init__.py79
-rw-r--r--cloudinit/net/bsd.py165
-rwxr-xr-xcloudinit/net/cmdline.py63
-rw-r--r--cloudinit/net/dhcp.py2
-rw-r--r--cloudinit/net/freebsd.py169
-rw-r--r--cloudinit/net/netbsd.py42
-rw-r--r--cloudinit/net/network_state.py15
-rw-r--r--cloudinit/net/openbsd.py44
-rw-r--r--cloudinit/net/renderer.py4
-rw-r--r--cloudinit/net/renderers.py7
-rw-r--r--cloudinit/net/sysconfig.py366
-rw-r--r--cloudinit/net/tests/test_dhcp.py10
-rw-r--r--cloudinit/net/tests/test_init.py65
-rw-r--r--cloudinit/net/tests/test_network_state.py13
-rw-r--r--cloudinit/netinfo.py52
-rwxr-xr-xcloudinit/reporting/handlers.py19
-rw-r--r--cloudinit/serial.py2
-rw-r--r--cloudinit/signal_handler.py3
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py8
-rw-r--r--cloudinit/sources/DataSourceEc2.py223
-rw-r--r--cloudinit/sources/DataSourceMAAS.py2
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py23
-rw-r--r--cloudinit/sources/DataSourceOVF.py37
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py5
-rw-r--r--cloudinit/sources/DataSourceOracle.py28
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py17
-rw-r--r--cloudinit/sources/DataSourceScaleway.py9
-rw-r--r--cloudinit/sources/__init__.py74
-rw-r--r--cloudinit/sources/helpers/openstack.py9
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py2
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py11
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py9
-rw-r--r--cloudinit/sources/tests/test_init.py188
-rw-r--r--cloudinit/sources/tests/test_oracle.py26
-rw-r--r--cloudinit/stages.py6
-rw-r--r--cloudinit/templater.py4
-rw-r--r--cloudinit/tests/helpers.py76
-rw-r--r--cloudinit/tests/test_conftest.py61
-rw-r--r--cloudinit/tests/test_dhclient_hook.py2
-rw-r--r--cloudinit/tests/test_gpg.py4
-rw-r--r--cloudinit/tests/test_url_helper.py34
-rw-r--r--cloudinit/tests/test_util.py132
-rw-r--r--cloudinit/tests/test_version.py4
-rw-r--r--cloudinit/type_utils.py25
-rw-r--r--cloudinit/url_helper.py50
-rw-r--r--cloudinit/user_data.py15
-rw-r--r--cloudinit/util.py240
-rw-r--r--cloudinit/version.py2
-rw-r--r--config/cloud.cfg.tmpl41
-rw-r--r--doc-requirements.txt2
-rw-r--r--doc/examples/cloud-config-apt.txt43
-rw-r--r--doc/examples/cloud-config-boot-cmds.txt4
-rw-r--r--doc/examples/cloud-config-chef-oneiric.txt115
-rw-r--r--doc/examples/cloud-config-chef.txt84
-rw-r--r--doc/examples/cloud-config-datasources.txt8
-rw-r--r--doc/examples/cloud-config-disk-setup.txt333
-rw-r--r--doc/examples/cloud-config-landscape.txt1
-rw-r--r--doc/examples/cloud-config-mcollective.txt82
-rw-r--r--doc/examples/cloud-config-mount-points.txt8
-rw-r--r--doc/examples/cloud-config-phone-home.txt10
-rw-r--r--doc/examples/cloud-config-power-state.txt10
-rw-r--r--doc/examples/cloud-config-puppet.txt88
-rw-r--r--doc/examples/cloud-config-reporting.txt22
-rw-r--r--doc/examples/cloud-config-rh_subscription.txt48
-rw-r--r--doc/examples/cloud-config-rsyslog.txt43
-rw-r--r--doc/examples/cloud-config-user-groups.txt24
-rw-r--r--doc/examples/cloud-config-vendor-data.txt4
-rw-r--r--doc/examples/cloud-config-write-files.txt42
-rw-r--r--doc/examples/cloud-config-yum-repo.txt24
-rw-r--r--doc/examples/cloud-config.txt53
-rw-r--r--doc/examples/kernel-cmdline.txt13
-rw-r--r--doc/rtd/conf.py2
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/availability.rst6
-rw-r--r--doc/rtd/topics/boot.rst4
-rw-r--r--doc/rtd/topics/code_review.rst256
-rw-r--r--doc/rtd/topics/datasources/azure.rst16
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst10
-rw-r--r--doc/rtd/topics/datasources/ec2.rst27
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst14
-rw-r--r--doc/rtd/topics/datasources/openstack.rst15
-rw-r--r--doc/rtd/topics/debugging.rst101
-rw-r--r--doc/rtd/topics/faq.rst21
-rw-r--r--doc/rtd/topics/format.rst31
-rw-r--r--doc/rtd/topics/instancedata.rst362
-rw-r--r--doc/rtd/topics/network-config.rst24
-rw-r--r--doc/rtd/topics/tests.rst66
-rw-r--r--integration-requirements.txt1
-rwxr-xr-xpackages/bddeb35
-rwxr-xr-xpackages/brpm4
-rw-r--r--packages/debian/control.in3
-rwxr-xr-xpackages/debian/rules (renamed from packages/debian/rules.in)6
-rw-r--r--packages/pkg-deps.json58
-rw-r--r--packages/redhat/cloud-init.spec.in10
-rw-r--r--requirements.txt3
-rwxr-xr-xsetup.py41
-rwxr-xr-xsysvinit/freebsd/cloudconfig3
-rwxr-xr-xsysvinit/freebsd/cloudfinal3
-rwxr-xr-xsysvinit/freebsd/cloudinit5
-rwxr-xr-xsysvinit/freebsd/cloudinitlocal3
-rwxr-xr-xsysvinit/netbsd/cloudconfig17
-rwxr-xr-xsysvinit/netbsd/cloudfinal16
-rwxr-xr-xsysvinit/netbsd/cloudinit16
-rwxr-xr-xsysvinit/netbsd/cloudinitlocal18
-rw-r--r--templates/hosts.suse.tmpl2
-rw-r--r--templates/resolv.conf.tmpl14
-rw-r--r--test-requirements.txt10
-rw-r--r--tests/cloud_tests/__init__.py3
-rw-r--r--tests/cloud_tests/config.py2
-rw-r--r--tests/cloud_tests/platforms/__init__.py4
-rw-r--r--tests/cloud_tests/platforms/azurecloud/image.py32
-rw-r--r--tests/cloud_tests/platforms/azurecloud/instance.py15
-rw-r--r--tests/cloud_tests/platforms/azurecloud/platform.py5
-rw-r--r--tests/cloud_tests/platforms/nocloudkvm/platform.py10
-rw-r--r--tests/cloud_tests/releases.yaml36
-rw-r--r--tests/cloud_tests/setup_image.py2
-rw-r--r--tests/cloud_tests/testcases/__init__.py8
-rw-r--r--tests/cloud_tests/testcases/base.py65
-rw-r--r--tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml73
-rw-r--r--tests/cloud_tests/testcases/modules/ntp_chrony.py4
-rw-r--r--tests/cloud_tests/util.py33
-rw-r--r--tests/cloud_tests/verify.py4
-rw-r--r--tests/unittests/test_builtin_handlers.py6
-rw-r--r--tests/unittests/test_cli.py20
-rw-r--r--tests/unittests/test_cs_util.py2
-rw-r--r--tests/unittests/test_data.py43
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py2
-rw-r--r--tests/unittests/test_datasource/test_azure.py10
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py6
-rw-r--r--tests/unittests/test_datasource/test_cloudsigma.py6
-rw-r--r--tests/unittests/test_datasource/test_ec2.py407
-rw-r--r--tests/unittests/test_datasource/test_gce.py4
-rw-r--r--tests/unittests/test_datasource/test_maas.py3
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py17
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py1
-rw-r--r--tests/unittests/test_datasource/test_openstack.py26
-rw-r--r--tests/unittests/test_datasource/test_rbx.py30
-rw-r--r--tests/unittests/test_datasource/test_scaleway.py49
-rw-r--r--tests/unittests/test_datasource/test_smartos.py18
-rw-r--r--tests/unittests/test_distros/test_bsd_utils.py66
-rw-r--r--tests/unittests/test_distros/test_generic.py6
-rw-r--r--tests/unittests/test_distros/test_netbsd.py17
-rw-r--r--tests/unittests/test_distros/test_netconfig.py47
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py3
-rw-r--r--tests/unittests/test_ds_identify.py10
-rw-r--r--tests/unittests/test_filters/test_launch_index.py3
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py8
-rw-r--r--tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py10
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v1.py10
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py40
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py11
-rw-r--r--tests/unittests/test_handler/test_handler_chef.py11
-rw-r--r--tests/unittests/test_handler/test_handler_disk_setup.py18
-rw-r--r--tests/unittests/test_handler/test_handler_etc_hosts.py8
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py11
-rw-r--r--tests/unittests/test_handler/test_handler_locale.py7
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py5
-rw-r--r--tests/unittests/test_handler/test_handler_mcollective.py2
-rw-r--r--tests/unittests/test_handler/test_handler_mounts.py24
-rw-r--r--tests/unittests/test_handler/test_handler_puppet.py2
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py3
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py2
-rw-r--r--tests/unittests/test_handler/test_handler_spacewalk.py6
-rw-r--r--tests/unittests/test_handler/test_handler_timezone.py2
-rw-r--r--tests/unittests/test_handler/test_handler_write_files.py96
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py17
-rw-r--r--tests/unittests/test_handler/test_handler_zypper_add_repo.py11
-rw-r--r--tests/unittests/test_handler/test_schema.py110
-rw-r--r--tests/unittests/test_log.py11
-rw-r--r--tests/unittests/test_merging.py6
-rw-r--r--tests/unittests/test_net.py701
-rw-r--r--tests/unittests/test_render_cloudcfg.py57
-rw-r--r--tests/unittests/test_reporting.py4
-rw-r--r--tests/unittests/test_reporting_hyperv.py2
-rw-r--r--tests/unittests/test_sshutil.py2
-rw-r--r--tests/unittests/test_templating.py2
-rw-r--r--tests/unittests/test_util.py119
-rw-r--r--tests/unittests/test_vmware/test_guestcust_util.py26
-rw-r--r--tests/unittests/test_vmware_config_file.py8
-rw-r--r--tools/.github-cla-signers7
-rw-r--r--tools/.lp-to-git-user4
-rwxr-xr-xtools/build-on-freebsd1
-rwxr-xr-xtools/build-on-netbsd36
-rwxr-xr-xtools/build-on-openbsd27
-rwxr-xr-xtools/ccfg-merge-debug2
-rwxr-xr-xtools/ds-identify4
-rwxr-xr-xtools/make-mime.py2
-rwxr-xr-xtools/make-tarball12
-rwxr-xr-xtools/mock-meta.py2
-rwxr-xr-xtools/pipremove2
-rwxr-xr-xtools/read-dependencies34
-rwxr-xr-xtools/read-version11
-rwxr-xr-xtools/render-cloudcfg5
-rwxr-xr-xtools/run-container64
-rwxr-xr-xtools/run-pyflakes3
-rwxr-xr-xtools/run-pyflakes32
-rwxr-xr-xtools/tox-venv2
-rwxr-xr-xtools/validate-yaml.py2
-rw-r--r--tox.ini99
276 files changed, 7546 insertions, 3236 deletions
diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml
index 34e11c2d..8a0b2c07 100644
--- a/.github/workflows/cla.yml
+++ b/.github/workflows/cla.yml
@@ -8,22 +8,33 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- - run: |
- echo "::set-env name=CLA_SIGNED::$(grep -q ': \"${{ github.actor }}\"' ./tools/.lp-to-git-user && echo CLA signed || echo CLA not signed)"
- - name: Add CLA label
+ - name: Check CLA signing status for ${{ github.event.pull_request.user.login }}
run: |
- # POST a new label to this issue
- curl --request POST \
- --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \
- --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- --header 'content-type: application/json' \
- --data '{"labels": ["${{env.CLA_SIGNED}}"]}'
- - name: Comment about CLA signing
- if: env.CLA_SIGNED == 'CLA not signed'
- run: |
- # POST a comment directing submitter to sign the CLA
- curl --request POST \
- --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/comments \
- --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \
- --header 'content-type: application/json' \
- --data '{"body": "Hello ${{ github.actor }},\n\nThank you for your contribution to cloud-init.\n\nIn order for us to merge this pull request, you need\nto have signed the Contributor License Agreement (CLA).\nPlease ensure that you have signed the CLA by following our\nhacking guide at:\n\nhttps://cloudinit.readthedocs.io/en/latest/topics/hacking.html\n\nThanks,\nYour friendly cloud-init upstream\n"}'
+ cat > unsigned-cla.txt <<EOF
+ Hello ${{ github.event.pull_request.user.login }},
+
+ Thank you for your contribution to cloud-init.
+
+ In order for us to merge this pull request, you need
+ to have signed the Contributor License Agreement (CLA).
+ Please sign the CLA by following our
+ hacking guide at:
+ https://cloudinit.readthedocs.io/en/latest/topics/hacking.html
+
+ Thanks,
+ Your friendly cloud-init upstream
+ EOF
+
+ has_signed() {
+ username="$1"
+ grep -q ": \"$username\"" ./tools/.lp-to-git-user && return 0
+ grep -q "^$username$" ./tools/.github-cla-signers && return 0
+ return 1
+ }
+
+ if has_signed "${{ github.event.pull_request.user.login }}"; then
+ echo "Thanks ${{ github.event.pull_request.user.login }} for signing cloud-init's CLA"
+ else
+ cat unsigned-cla.txt
+ exit 1
+ fi
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
new file mode 100644
index 00000000..5da7c976
--- /dev/null
+++ b/.github/workflows/stale.yml
@@ -0,0 +1,24 @@
+name: Mark and close stale pull requests
+
+on:
+ schedule:
+ - cron: "0 0 * * *" # Daily @ 00:00
+
+jobs:
+ stale:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/stale@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ days-before-stale: 14
+ days-before-close: 7
+ stale-pr-message: |
+ Hello! Thank you for this proposed change to cloud-init. This pull request is now marked as stale as it has not seen any activity in 14 days. If no activity occurs within the next 7 days, this pull request will automatically close.
+
+ If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging powersj, and he will ensure that someone takes a look soon.
+
+ (If the pull request is closed, please do feel free to reopen it if you wish to continue working on it.)
+ stale-pr-label: 'stale-pr'
diff --git a/.gitignore b/.gitignore
index 9e19c618..3589b210 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,6 +13,9 @@ stage
*.cover
.idea/
.venv/
+.pc/
+.cache/
+.mypy_cache/
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/.pylintrc b/.pylintrc
index c83546a6..4d5d066d 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -50,7 +50,6 @@ ignored-modules=
http.client,
httplib,
pkg_resources,
- six.moves,
# cloud_tests requirements.
boto3,
botocore,
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 00000000..46af8ee7
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,8 @@
+version: 2
+
+formats: all
+
+python:
+ install:
+ - requirements: doc-requirements.txt
+ - path: .
diff --git a/.travis.yml b/.travis.yml
index 15157b86..3de1066b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,22 @@
language: python
dist: bionic
+# We use two different caching strategies. The default is to cache pip
+# packages (as most of our jobs use pip packages), which is configured here.
+# For the integration tests, we instead want to cache the lxd images. The
+# directory in which the images are stored (/var/snap/lxd/common/lxd/images/)
+# is not readable/writeable by the default user (which is a requirement for
+# caching), so we instead cache the `lxd_images/` directory. We move lxd
+# images out of there before we run tests and back in once tests are complete.
+# We _move_ the images out and only copy the most recent lxd image back into
+# the cache, to avoid our cache growing without bound. (We only need the most
+# recent lxd image because the integration tests only use a single image.)
+#
+# We cache the lxd images because this saves a few seconds in the general
+# case, but provides substantial speed-ups when cloud-images.ubuntu.com, the
+# source of the images, is under heavy load.
+cache: pip
+
install:
# Required so `git describe` will definitely find a tag; see
# https://github.com/travis-ci/travis-ci/issues/7422
@@ -16,13 +32,25 @@ matrix:
- python: 3.6
env:
TOXENV=py3
- NOSE_VERBOSE=2 # List all tests run by nose
- - install:
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
+ - if: NOT branch =~ /^ubuntu\//
+ cache:
+ - directories:
+ - lxd_images
+ before_cache:
+ - |
+ # Find the most recent image file
+ latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)"
+ # This might be <hash>.rootfs or <hash>, normalise
+ latest_file="$(basename $latest_file .rootfs)"
+ # Find all files with that prefix and copy them to our cache dir
+ sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} lxd_images/ \;
+ install:
- git fetch --unshallow
- sudo apt-get build-dep -y cloud-init
- sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox
# These are build deps but not pulled in by the build-dep call above
- - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-contextlib2
+ - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-pytest python3-pytest-cov
- pip install .
- pip install tox
# bionic has lxd from deb installed, remove it first to ensure
@@ -32,6 +60,8 @@ matrix:
- sudo snap install lxd
- sudo lxd init --auto
- sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
+ # Move any cached lxd images into lxd's image dir
+ - sudo find lxd_images/ -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- sudo usermod -a -G lxd $USER
- sudo sbuild-adduser $USER
- cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
@@ -40,14 +70,13 @@ matrix:
- ./packages/bddeb -S
# Use this to get a new shell where we're in the sbuild group
- sudo -E su $USER -c 'mk-sbuild xenial'
- - sudo -E su $USER -c 'sbuild --nolog --verbose --dist=xenial cloud-init_*.dsc'
+ - sudo -E su $USER -c 'sbuild --nolog --no-run-lintian --verbose --dist=xenial cloud-init_*.dsc'
# Ubuntu LTS: Integration
- sg lxd -c 'tox -e citest -- run --verbose --preserve-data --data-dir results --os-name xenial --test modules/apt_configure_sources_list.yaml --test modules/ntp_servers --test modules/set_password_list --test modules/user_groups --deb cloud-init_*_all.deb'
- - python: 3.4
+ - python: 3.5
env:
TOXENV=xenial
- NOSE_VERBOSE=2 # List all tests run by nose
- # Travis doesn't support Python 3.4 on bionic, so use xenial
+ PYTEST_ADDOPTS=-v # List all tests run by pytest
dist: xenial
- python: 3.6
env: TOXENV=pycodestyle
diff --git a/ChangeLog b/ChangeLog
index 0430267f..0df01b16 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,185 @@
+20.2
+ - doc/format: reference make-mime.py instead of an inline script (#334)
+ - Add docs about creating parent folders (#330) [Adrian Wilkins]
+ - DataSourceNoCloud/OVF: drop claim to support FTP (#333) (LP: #1875470)
+ - schema: ignore spurious pylint error (#332)
+ - schema: add json schema for write_files module (#152)
+ - BSD: find_devs_with_ refactoring (#298) [Gonéri Le Bouder]
+ - nocloud: drop work around for Linux 2.6 (#324) [Gonéri Le Bouder]
+ - cloudinit: drop dependencies on unittest2 and contextlib2 (#322)
+ - distros: handle a potential mirror filtering error case (#328)
+ - log: remove unnecessary import fallback logic (#327)
+ - .travis.yml: don't run integration test on ubuntu/* branches (#321)
+ - More unit test documentation (#314)
+ - conftest: introduce disable_subp_usage autouse fixture (#304)
+ - YAML align indent sizes for docs readability (#323) [Tak Nishigori]
+ - network_state: add missing space to log message (#325)
+ - tests: add missing mocks for get_interfaces_by_mac (#326) (LP: #1873910)
+ - test_mounts: expand happy path test for both happy paths (#319)
+ - cc_mounts: fix incorrect format specifiers (#316) (LP: #1872836)
+ - swap file "size" being used before checked if str (#315) [Eduardo Otubo]
+ - HACKING.rst: add pytest version gotchas section (#311)
+ - docs: Add steps to re-run cloud-id and cloud-init (#313) [Joshua Powers]
+ - readme: OpenBSD is now supported (#309) [Gonéri Le Bouder]
+ - net: ignore 'renderer' key in netplan config (#306) (LP: #1870421)
+ - Add support for NFS/EFS mounts (#300) [Andrew Beresford] (LP: #1870370)
+ - openbsd: set_passwd should not unlock user (#289) [Gonéri Le Bouder]
+ - tools/.github-cla-signers: add beezly as CLA signer (#301)
+ - util: remove unnecessary lru_cache import fallback (#299)
+ - HACKING.rst: reorganise/update CLA signature info (#297)
+ - distros: drop leading/trailing hyphens from mirror URL labels (#296)
+ - HACKING.rst: add note about variable annotations (#295)
+ - CiTestCase: stop using and remove sys_exit helper (#283)
+ - distros: replace invalid characters in mirror URLs with hyphens (#291)
+ (LP: #1868232)
+ - rbxcloud: gracefully handle arping errors (#262) [Adam Dobrawy]
+ - Fix cloud-init ignoring some misdeclared mimetypes in user-data.
+ [Kurt Garloff]
+ - net: ubuntu focal prioritize netplan over eni even if both present
+ (#267) (LP: #1867029)
+ - cloudinit: refactor util.is_ipv4 to net.is_ipv4_address (#292)
+ - net/cmdline: replace type comments with annotations (#294)
+ - HACKING.rst: add Type Annotations design section (#293)
+ - net: introduce is_ip_address function (#288)
+ - CiTestCase: remove now-unneeded parse_and_read helper method (#286)
+ - .travis.yml: allow 30 minutes of inactivity in cloud tests (#287)
+ - sources/tests/test_init: drop use of deprecated inspect.getargspec (#285)
+ - setup.py: drop NIH check_output implementation (#282)
+ - Identify SAP Converged Cloud as OpenStack [Silvio Knizek]
+ - add Openbsd support (#147) [Gonéri Le Bouder]
+ - HACKING.rst: add examples of the two test class types (#278)
+ - VMWware: support to update guest info gc status if enabled (#261)
+ [xiaofengw-vmware]
+ - Add lp-to-git mapping for kgarloff (#279)
+ - set_passwords: avoid chpasswd on BSD (#268) [Gonéri Le Bouder]
+ - HACKING.rst: add Unit Testing design section (#277)
+ - util: read_cc_from_cmdline handle urlencoded yaml content (#275)
+ - distros/tests/test_init: add tests for _get_package_mirror_info (#272)
+ - HACKING.rst: add links to new Code Review Process doc (#276)
+ - freebsd: ensure package update works (#273) [Gonéri Le Bouder]
+ - doc: introduce Code Review Process documentation (#160)
+ - tools: use python3 (#274)
+ - cc_disk_setup: fix RuntimeError (#270) (LP: #1868327)
+ - cc_apt_configure/util: combine search_for_mirror implementations (#271)
+ - bsd: boottime does not depend on the libc soname (#269)
+ [Gonéri Le Bouder]
+ - test_oracle,DataSourceOracle: sort imports (#266)
+ - DataSourceOracle: update .network_config docstring (#257)
+ - cloudinit/tests: remove unneeded with_logs configuration (#263)
+ - .travis.yml: drop stale comment (#255)
+ - .gitignore: add more common directories (#258)
+ - ec2: render network on all NICs and add secondary IPs as static (#114)
+ (LP: #1866930)
+ - ec2 json validation: fix the reference to the 'merged_cfg' key (#256)
+ [Paride Legovini]
+ - releases.yaml: quote the Ubuntu version numbers (#254) [Paride Legovini]
+ - cloudinit: remove six from packaging/tooling (#253)
+ - util/netbsd: drop six usage (#252)
+ - workflows: introduce stale pull request workflow (#125)
+ - cc_resolv_conf: introduce tests and stabilise output across Python
+ versions (#251)
+ - fix minor issue with resolv_conf template (#144) [andreaf74]
+ - doc: CloudInit also support NetBSD (#250) [Gonéri Le Bouder]
+ - Add Netbsd support (#62) [Gonéri Le Bouder]
+ - tox.ini: avoid substition syntax that causes a traceback on xenial (#245)
+ - Add pub_key_ed25519 to cc_phone_home (#237) [Daniel Hensby]
+ - Introduce and use of a list of GitHub usernames that have signed CLA
+ (#244)
+ - workflows/cla.yml: use correct username for CLA check (#243)
+ - tox.ini: use xenial version of jsonpatch in CI (#242)
+ - workflows: CLA validation altered to fail status on pull_request (#164)
+ - tox.ini: bump pyflakes version to 2.1.1 (#239)
+ - cloudinit: move to pytest for running tests (#211)
+ - instance-data: add cloud-init merged_cfg and sys_info keys to json
+ (#214) (LP: #1865969)
+ - ec2: Do not fallback to IMDSv1 on EC2 (#216)
+ - instance-data: write redacted cfg to instance-data.json (#233)
+ (LP: #1865947)
+ - net: support network-config:disabled on the kernel commandline (#232)
+ (LP: #1862702)
+ - ec2: only redact token request headers in logs, avoid altering request
+ (#230) (LP: #1865882)
+ - docs: typo fixed: dta → data [Alexey Vazhnov]
+ - Fixes typo on Amazon Web Services (#217) [Nick Wales]
+ - Fix docs for OpenStack DMI Asset Tag (#228)
+ [Mark T. Voelker] (LP: #1669875)
+ - Add physical network type: cascading to openstack helpers (#200)
+ [sab-systems]
+ - tests: add focal integration tests for ubuntu (#225)
+
+20.1
+ - ec2: Do not log IMDSv2 token values, instead use REDACTED (#219)
+ (LP: #1863943)
+ - utils: use SystemRandom when generating random password. (#204)
+ [Dimitri John Ledkov]
+ - docs: mount_default_files is a list of 6 items, not 7 (#212)
+ - azurecloud: fix issues with instances not starting (#205) (LP: #1861921)
+ - unittest: fix stderr leak in cc_set_password random unittest
+ output. (#208)
+ - cc_disk_setup: add swap filesystem force flag (#207)
+ - import sysvinit patches from freebsd-ports tree (#161) [Igor Galić]
+ - docs: fix typo (#195) [Edwin Kofler]
+ - sysconfig: distro-specific config rendering for BOOTPROTO option (#162)
+ [Robert Schweikert] (LP: #1800854)
+ - cloudinit: replace "from six import X" imports (except in util.py) (#183)
+ - run-container: use 'test -n' instead of 'test ! -z' (#202)
+ [Paride Legovini]
+ - net/cmdline: correctly handle static ip= config (#201)
+ [Dimitri John Ledkov] (LP: #1861412)
+ - Replace mock library with unittest.mock (#186)
+ - HACKING.rst: update CLA link (#199)
+ - Scaleway: Fix DatasourceScaleway to avoid backtrace (#128)
+ [Louis Bouchard]
+ - cloudinit/cmd/devel/net_convert.py: add missing space (#191)
+ - tools/run-container: drop support for python2 (#192) [Paride Legovini]
+ - Print ssh key fingerprints using sha256 hash (#188) (LP: #1860789)
+ - Make the RPM build use Python 3 (#190) [Paride Legovini]
+ - cc_set_password: increase random pwlength from 9 to 20 (#189)
+ (LP: #1860795)
+ - .travis.yml: use correct Python version for xenial tests (#185)
+ - cloudinit: remove ImportError handling for mock imports (#182)
+ - Do not use fallocate in swap file creation on xfs. (#70)
+ [Eduardo Otubo] (LP: #1781781)
+ - .readthedocs.yaml: install cloud-init when building docs (#181)
+ (LP: #1860450)
+ - Introduce an RTD config file, and pin the Sphinx version to the RTD
+ default (#180)
+ - Drop most of the remaining use of six (#179)
+ - Start removing dependency on six (#178)
+ - Add Rootbox & HyperOne to list of cloud in README (#176) [Adam Dobrawy]
+ - docs: add proposed SRU testing procedure (#167)
+ - util: rename get_architecture to get_dpkg_architecture (#173)
+ - Ensure util.get_architecture() runs only once (#172)
+ - Only use gpart if it is the BSD gpart (#131) [Conrad Hoffmann]
+ - freebsd: remove superflu exception mapping (#166) [Gonéri Le Bouder]
+ - ssh_auth_key_fingerprints_disable test: fix capitalization (#165)
+ [Paride Legovini]
+ - util: move uptime's else branch into its own boottime function (#53)
+ [Igor Galić] (LP: #1853160)
+ - workflows: add contributor license agreement checker (#155)
+ - net: fix rendering of 'static6' in network config (#77) (LP: #1850988)
+ - Make tests work with Python 3.8 (#139) [Conrad Hoffmann]
+ - fixed minor bug with mkswap in cc_disk_setup.py (#143) [andreaf74]
+ - freebsd: fix create_group() cmd (#146) [Gonéri Le Bouder]
+ - doc: make apt_update example consistent (#154)
+ - doc: add modules page toc with links (#153) (LP: #1852456)
+ - Add support for the amazon variant in cloud.cfg.tmpl (#119)
+ [Frederick Lefebvre]
+ - ci: remove Python 2.7 from CI runs (#137)
+ - modules: drop cc_snap_config config module (#134)
+ - migrate-lp-user-to-github: ensure Launchpad repo exists (#136)
+ - docs: add initial troubleshooting to FAQ (#104) [Joshua Powers]
+ - doc: update cc_set_hostname frequency and descrip (#109)
+ [Joshua Powers] (LP: #1827021)
+ - freebsd: introduce the freebsd renderer (#61) [Gonéri Le Bouder]
+ - cc_snappy: remove deprecated module (#127)
+ - HACKING.rst: clarify that everyone needs to do the LP->GH dance (#130)
+ - freebsd: cloudinit service requires devd (#132) [Gonéri Le Bouder]
+ - cloud-init: fix capitalisation of SSH (#126)
+ - doc: update cc_ssh clarify host and auth keys
+ [Joshua Powers] (LP: #1827021)
+ - ci: emit names of tests run in Travis (#120)
+
19.4
- doc: specify _ over - in cloud config modules
[Joshua Powers] (LP: #1293254)
diff --git a/HACKING.rst b/HACKING.rst
index 4ebdac17..d026cf71 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -6,38 +6,35 @@ This document describes how to contribute changes to cloud-init.
It assumes you have a `GitHub`_ account, and refers to your GitHub user
as ``GH_USER`` throughout.
-Do these things once
-====================
+Submitting your first pull request
+==================================
-* To contribute, you must sign the Canonical `contributor license agreement`_
+Follow these steps to submit your first pull request to cloud-init:
- If you have already signed it as an individual, your Launchpad user will be
- listed in the `contributor-agreement-canonical`_ group. Unfortunately there
- is no easy way to check if an organization or company you are doing work for
- has signed. When signing the CLA and prompted for 'Project contact' or
- 'Canonical Project Manager' enter 'Josh Powers'.
+* To contribute to cloud-init, you must sign the Canonical `contributor
+ license agreement`_
- For first-time signers, or for existing contributors who have already signed
- the agreement in Launchpad, we need to verify the link between your
- `Launchpad`_ account and your `GitHub`_ account. To enable us to do this, we
- ask that you create a branch with both your Launchpad and GitHub usernames
- against both the Launchpad and GitHub cloud-init repositories. We've added a
- tool (``tools/migrate-lp-user-to-github``) to the cloud-init repository to
- handle this migration as automatically as possible.
+ * If you have already signed it as an individual, your Launchpad user
+ will be listed in the `contributor-agreement-canonical`_ group.
+ (Unfortunately there is no easy way to check if an organization or
+ company you are doing work for has signed.)
- The cloud-init team will review the two merge proposals and verify
- that the CLA has been signed for the Launchpad user and record the
- associated GitHub account. We will reply to the email address
- associated with your Launchpad account that you've been clear to
- contribute to cloud-init on GitHub.
+ * When signing it:
- If your company has signed the CLA for you, please contact us to help
- in verifying which launchad/GitHub accounts are associated with the
- company. For any questions or help with the process, please email:
+ * ensure that you fill in the GitHub username field.
+ * when prompted for 'Project contact' or 'Canonical Project
+ Manager', enter 'Rick Harding'.
- `Josh Powers <mailto:josh.powers@canonical.com>`_ with the subject: Cloud-Init CLA
+ * If your company has signed the CLA for you, please contact us to
+ help in verifying which Launchpad/GitHub accounts are associated
+ with the company.
- You also may contanct user ``powersj`` in ``#cloud-init`` channel via IRC freenode.
+ * For any questions or help with the process, please email `Rick
+ Harding <mailto:rick.harding@canonical.com>`_ with the subject,
+ "Cloud-Init CLA"
+
+ * You also may contact user ``rick_h`` in the ``#cloud-init``
+ channel on the Freenode IRC network.
* Configure git with your email and name for commit messages.
@@ -60,11 +57,46 @@ Do these things once
git remote add GH_USER git@github.com:GH_USER/cloud-init.git
git push GH_USER master
+* Read through the cloud-init `Code Review Process`_, so you understand
+ how your changes will end up in cloud-init's codebase.
+
+* Submit your first cloud-init pull request, adding yourself to the
+ in-repository list that we use to track CLA signatures:
+ `tools/.github-cla-signers`_
+
+ * See `PR #344`_ and `PR #345`_ for examples of what this pull
+ request should look like.
+
+ * Note that ``.github-cla-signers`` is sorted alphabetically.
+
+ * (If you already have a change that you want to submit, you can
+ also include the change to ``tools/.github-cla-signers`` in that
+ pull request, there is no need for two separate PRs.)
+
.. _GitHub: https://github.com
.. _Launchpad: https://launchpad.net
.. _repository: https://github.com/canonical/cloud-init
-.. _contributor license agreement: http://www.canonical.com/contributors
+.. _contributor license agreement: https://ubuntu.com/legal/contributors
.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
+.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/master/tools/.github-cla-signers
+.. _PR #344: https://github.com/canonical/cloud-init/pull/344
+.. _PR #345: https://github.com/canonical/cloud-init/pull/345
+
+Transferring CLA Signatures from Launchpad to Github
+----------------------------------------------------
+
+For existing contributors who have signed the agreement in Launchpad
+before the Github username field was included, we need to verify the
+link between your `Launchpad`_ account and your `GitHub`_ account. To
+enable us to do this, we ask that you create a branch with both your
+Launchpad and GitHub usernames against both the Launchpad and GitHub
+cloud-init repositories. We've added a tool
+(``tools/migrate-lp-user-to-github``) to the cloud-init repository to
+handle this migration as automatically as possible.
+
+The cloud-init team will review the two merge proposals and verify that
+the CLA has been signed for the Launchpad user and record the
+associated GitHub account.
Do these things for each feature or bug
=======================================
@@ -119,13 +151,15 @@ Do these things for each feature or bug
- Click 'Create Pull Request`
Then, someone in the `Ubuntu Server`_ team will review your changes and
-follow up in the pull request.
+follow up in the pull request. Look at the `Code Review Process`_ doc
+to understand the following steps.
Feel free to ping and/or join ``#cloud-init`` on freenode irc if you
have any questions.
.. _tox: https://tox.readthedocs.io/en/latest/
.. _Ubuntu Server: https://github.com/orgs/canonical/teams/ubuntu-server
+.. _Code Review Process: https://cloudinit.readthedocs.io/en/latest/topics/code_review.html
Design
======
@@ -138,3 +172,266 @@ Cloud Config Modules
* Any new modules should use underscores in any new config options and not
hyphens (e.g. `new_option` and *not* `new-option`).
+
+Unit Testing
+------------
+
+cloud-init uses `pytest`_ to run its tests, and has tests written both
+as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
+The following guidelines should be followed:
+
+* For ease of organisation and greater accessibility for developers not
+ familiar with pytest, all cloud-init unit tests must be contained
+ within test classes
+
+ * Put another way, module-level test functions should not be used
+
+* pytest test classes should use `pytest fixtures`_ to share
+ functionality instead of inheritance
+
+* As all tests are contained within classes, it is acceptable to mix
+ ``TestCase`` test classes and pytest test classes within the same
+ test file
+
+ * These can be easily distinguished by their definition: pytest
+ classes will not use inheritance at all (e.g.
+ `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
+ subclass (indirectly) from ``TestCase`` (e.g.
+ `TestPrependBaseCommands`_)
+
+* pytest tests should use bare ``assert`` statements, to take advantage
+ of pytest's `assertion introspection`_
+
+ * For ``==`` and other commutative assertions, the expected value
+ should be placed before the value under test:
+ ``assert expected_value == function_under_test()``
+
+* As we still support Ubuntu 16.04 (Xenial Xerus), we can only use
+ pytest features that are available in v2.8.7. This is an
+ inexhaustive list of ways in which this may catch you out:
+
+ * Support for using ``yield`` in ``pytest.fixture`` functions was
+ only introduced in `pytest 3.0`_. Such functions must instead use
+ the ``pytest.yield_fixture`` decorator.
+
+ * Only the following built-in fixtures are available
+ [#fixture-list]_:
+
+ * ``cache``
+ * ``capsys``
+ * ``capfd``
+ * ``record_xml_property``
+ * ``monkeypatch``
+ * ``pytestconfig``
+ * ``recwarn``
+ * ``tmpdir_factory``
+ * ``tmpdir``
+
+* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
+ should start with ``m_`` to clearly distinguish them from non-mock
+ variables
+
+ * For example, ``m_readurl`` (which would be a mock for ``readurl``)
+
+* The ``assert_*`` methods that are available on ``Mock`` and
+ ``MagicMock`` objects should be avoided, as typos in these method
+ names may not raise ``AttributeError`` (and so can cause tests to
+ silently pass). An important exception: if a ``Mock`` is
+ `autospecced`_ then misspelled assertion methods *will* raise an
+ ``AttributeError``, so these assertion methods may be used on
+ autospecced ``Mock`` objects.
+
+ For non-autospecced ``Mock`` s, these substitutions can be used
+ (``m`` is assumed to be a ``Mock``):
+
+ * ``m.assert_any_call(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) in m.call_args_list``
+ * ``m.assert_called()`` => ``assert 0 != m.call_count``
+ * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
+ * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
+ [mock.call(*args, **kwargs)] == m.call_args_list``
+ * ``m.assert_called_with(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) == m.call_args_list[-1]``
+ * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
+ call_list: assert call in m.call_args_list``
+
+ * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
+ any_order=False)`` are not easily replicated in a single
+ statement, so their use when appropriate is acceptable.
+
+ * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
+
+* Test arguments should be ordered as follows:
+
+ * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
+ partially applies its generated ``Mock`` object as the first
+ argument, so these arguments must go first.
+ * ``pytest.mark.parametrize`` arguments, in the order specified to
+ the ``parametrize`` decorator. These arguments are also provided
+ by a decorator, so it's natural that they sit next to the
+ ``mock.patch`` arguments.
+ * Fixture arguments, alphabetically. These are not provided by a
+ decorator, so they are last, and their order has no defined
+ meaning, so we default to alphabetical.
+
+* It follows from this ordering of test arguments (so that we retain
+ the property that arguments left-to-right correspond to decorators
+ bottom-to-top) that test decorators should be ordered as follows:
+
+ * ``pytest.mark.parametrize``
+ * ``mock.patch``
+
+* When there are multiple patch calls in a test file for the module it
+ is testing, it may be desirable to capture the shared string prefix
+ for these patch calls in a module-level variable. If used, such
+ variables should be named ``M_PATH`` or, for datasource tests,
+ ``DS_PATH``.
+
+.. _pytest: https://docs.pytest.org/
+.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
+.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
+.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
+.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
+.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
+
+Type Annotations
+----------------
+
+The cloud-init codebase uses Python's annotation support for storing
+type annotations in the style specified by `PEP-484`_. Their use in
+the codebase is encouraged but with one important caveat: types from
+the ``typing`` module cannot be used.
+
+cloud-init still supports Python 3.4, which doesn't have the ``typing``
+module in the stdlib. This means that the use of any types from the
+``typing`` module in the codebase would require installation of an
+additional Python module on platforms using Python 3.4. As such
+platforms are generally in maintenance mode, the introduction of a new
+dependency may act as a break in compatibility in practical terms.
+
+Similarly, only function annotations are appropriate for use, as the
+variable annotations specified in `PEP-526`_ were introduced in Python
+3.6.
+
+.. _PEP-484: https://www.python.org/dev/peps/pep-0484/
+.. _PEP-526: https://www.python.org/dev/peps/pep-0526/
+
+.. [#fixture-list] This list of fixtures (with markup) can be
+ reproduced by running::
+
+ py.test-3 --fixtures -q | grep "^[^ ]" | grep -v no | sed 's/.*/* ``\0``/'
+
+ in a xenial lxd container with python3-pytest installed.
+
+Ongoing Refactors
+=================
+
+This captures ongoing refactoring projects in the codebase. This is
+intended as documentation for developers involved in the refactoring,
+but also for other developers who may interact with the code being
+refactored in the meantime.
+
+``cloudinit.net`` -> ``cloudinit.distros.Networking`` Hierarchy
+---------------------------------------------------------------
+
+``cloudinit.net`` was imported from the curtin codebase as a chunk, and
+then modified enough that it integrated with the rest of the cloud-init
+codebase. Over the ~4 years since, the fact that it is not fully
+integrated into the ``Distro`` hierarchy has caused several issues.
+
+The common pattern of these problems is that the commands used for
+networking are different across distributions and operating systems.
+This has lead to ``cloudinit.net`` developing its own "distro
+determination" logic: `get_interfaces_by_mac`_ is probably the clearest
+example of this. Currently, these differences are primarily split
+along Linux/BSD lines. However, it would be short-sighted to only
+refactor in a way that captures this difference: we can anticipate that
+differences will develop between Linux-based distros in future, or
+there may already be differences in tooling that we currently
+work around in less obvious ways.
+
+The high-level plan is to introduce a hierarchy of networking classes
+in ``cloudinit.distros``, which each ``Distro`` subclass will
+reference. These will capture the differences between networking on
+our various distros, while still allowing easy reuse of code between
+distros that share functionality (e.g. most of the Linux networking
+behaviour). Callers will call ``distro.net.func`` instead of
+``cloudinit.net.func``, which will necessitate access to an
+instantiated ``Distro`` object.
+
+An implementation note: there may be external consumers of the
+``cloudinit.net`` module. We don't consider this a public API, so we
+will be removing it as part of this refactor. However, we will ensure
+that the new API is complete from its introduction, so that any such
+consumers can move over to it wholesale. (Note, however, that this new
+API is still not considered public or stable, and may not replicate the
+existing API exactly.)
+
+In more detail:
+
+* The root of this hierarchy will be the
+ ``cloudinit.distros.Networking`` class. This class will have
+ a corresponding method for every ``cloudinit.net`` function that we
+ identify to be involved in refactoring. Initially, these methods'
+ implementations will simply call the corresponding ``cloudinit.net``
+ function. (This gives us the complete API from day one, for existing
+ consumers.)
+* As the biggest differentiator in behaviour, the next layer of the
+ hierarchy will be two subclasses: ``LinuxNetworking`` and
+ ``BSDNetworking``. These will be introduced in the initial PR.
+* When a difference in behaviour for a particular distro is identified,
+ a new ``Networking`` subclass will be created. This new class should
+ generally subclass either ``LinuxNetworking`` or ``BSDNetworking``.
+* To be clear: ``Networking`` subclasses will only be created when
+ needed, we will not create a full hierarchy of per-``Distro``
+ subclasses up-front.
+* Each ``Distro`` class will have a class variable
+ (``cls.networking_cls``) which points at the appropriate
+ networking class (initially this will be either ``LinuxNetworking``
+ or ``BSDNetworking``).
+* When ``Distro`` classes are instantiated, they will instantiate
+ ``cls.networking_cls`` and store the instance at ``self.net``. (This
+ will be implemented in ``cloudinit.distros.Distro.__init__``.)
+* A helper function will be added which will determine the appropriate
+ ``Distro`` subclass for the current system, instantiate it and return
+ its ``net`` attribute. (This is the entry point for existing
+ consumers to migrate to.)
+* Callers of refactored functions will change from calling
+ ``cloudinit.net.some_func`` to ``distro.net.some_func``, where
+ ``distro`` is an instance of the appropriate ``Distro`` class for
+ this system. (This will require making such an instance available to
+ callers, which will constitute a large part of the work in this
+ project.)
+
+After the initial structure is in place, the work in this refactor will
+consist of replacing the ``cloudinit.net.some_func`` call in each
+``cloudinit.distros.Networking`` method with the actual implementation.
+This can be done incrementally, one function at a time:
+
+* pick an unmigrated ``cloudinit.distros.Networking`` method
+* refactor all of its callers to call the ``distro.net`` method on
+ ``Distro`` instead of the ``cloudinit.net`` function. (This is likely
+ to be the most time-consuming step, as it may require plumbing
+ ``Distro`` objects through to places that previously have not
+ consumed them.)
+* refactor its implementation from ``cloudinit.net`` into the
+ ``Networking`` hierarchy (e.g. if it has an if/else on BSD, this is
+ the time to put the implementations in their respective subclasses)
+* ensure that the new implementation has unit tests (either by moving
+ existing tests, or by writing new ones)
+* finally, remove it (and any other now-unused functions) from
+ cloudinit.net (to avoid having two parallel implementations)
+
+References
+~~~~~~~~~~
+
+* `Mina Galić's email the the cloud-init ML in 2018`_ (plus its thread)
+* `Mina Galić's email to the cloud-init ML in 2019`_ (plus its thread)
+* `PR #363`_, the discussion which prompted finally starting this
+ refactor (and where a lot of the above details were hashed out)
+
+.. _get_interfaces_by_mac: https://github.com/canonical/cloud-init/blob/961239749106daead88da483e7319e9268c67cde/cloudinit/net/__init__.py#L810-L818
+.. _Mina Galić's email the the cloud-init ML in 2018: https://lists.launchpad.net/cloud-init/msg00185.html
+.. _Mina Galić's email to the cloud-init ML in 2019: https://lists.launchpad.net/cloud-init/msg00237.html
+.. _PR #363: https://github.com/canonical/cloud-init/pull/363
diff --git a/Makefile b/Makefile
index 315e6b45..5fb0fcbf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,40 +1,22 @@
CWD=$(shell pwd)
-PYVER ?= $(shell for p in python3 python2; do \
- out=$$(command -v $$p 2>&1) && echo $$p && exit; done; exit 1)
-
-noseopts ?= -v
YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f )
YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
-PIP_INSTALL := pip install
-
-ifeq ($(PYVER),python3)
- pyflakes = pyflakes3
- unittests = unittest3
- yaml = yaml
-else
-ifeq ($(PYVER),python2)
- pyflakes = pyflakes
- unittests = unittest
-else
- pyflakes = pyflakes pyflakes3
- unittests = unittest unittest3
-endif
-endif
+PYTHON = python3
+PIP_INSTALL := pip3 install
ifeq ($(distro),)
distro = redhat
endif
-READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \
- echo read-version-failed)
-CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())")
+READ_VERSION=$(shell $(PYTHON) $(CWD)/tools/read-version || echo read-version-failed)
+CODE_VERSION=$(shell $(PYTHON) -c "from cloudinit import version; print(version.version_string())")
all: check
-check: check_version test $(yaml)
+check: check_version test yaml
style-check: pep8 $(pyflakes)
@@ -44,20 +26,14 @@ pep8:
pyflakes:
@$(CWD)/tools/run-pyflakes
-pyflakes3:
- @$(CWD)/tools/run-pyflakes3
-
unittest: clean_pyc
- nosetests $(noseopts) tests/unittests cloudinit
-
-unittest3: clean_pyc
- nosetests3 $(noseopts) tests/unittests cloudinit
+ python3 -m pytest -v tests/unittests cloudinit
ci-deps-ubuntu:
- @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
+ @$(PYTHON) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro
ci-deps-centos:
- @$(PYVER) $(CWD)/tools/read-dependencies --distro centos --test-distro
+ @$(PYTHON) $(CWD)/tools/read-dependencies --distro centos --test-distro
pip-requirements:
@echo "Installing cloud-init dependencies..."
@@ -67,7 +43,7 @@ pip-test-requirements:
@echo "Installing cloud-init test dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
-test: $(unittests)
+test: unittest
check_version:
@if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \
@@ -76,7 +52,7 @@ check_version:
else true; fi
config/cloud.cfg:
- $(PYVER) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg
+ $(PYTHON) ./tools/render-cloudcfg config/cloud.cfg.tmpl config/cloud.cfg
clean_pyc:
@find . -type f -name "*.pyc" -delete
@@ -86,30 +62,30 @@ clean: clean_pyc
rm -rf doc/rtd_html .tox .coverage
yaml:
- @$(PYVER) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
+ @$(PYTHON) $(CWD)/tools/validate-yaml.py $(YAML_FILES)
rpm:
- $(PYVER) ./packages/brpm --distro=$(distro)
+ $(PYTHON) ./packages/brpm --distro=$(distro)
srpm:
- $(PYVER) ./packages/brpm --srpm --distro=$(distro)
+ $(PYTHON) ./packages/brpm --srpm --distro=$(distro)
deb:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
echo sudo apt-get install devscripts; exit 1; }
- $(PYVER) ./packages/bddeb
+ $(PYTHON) ./packages/bddeb
deb-src:
@which debuild || \
{ echo "Missing devscripts dependency. Install with:"; \
echo sudo apt-get install devscripts; exit 1; }
- $(PYVER) ./packages/bddeb -S -d
+ $(PYTHON) ./packages/bddeb -S -d
doc:
tox -e doc
-.PHONY: test pyflakes pyflakes3 clean pep8 rpm srpm deb deb-src yaml
+.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml
.PHONY: check_version pip-test-requirements pip-requirements clean_pyc
-.PHONY: unittest unittest3 style-check doc
+.PHONY: unittest style-check doc
diff --git a/README.md b/README.md
index d648e426..73bf1ddd 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Ubuntu<br />SLES/openSUSE<br />RHEL/CentOS<br />Fedora<br />Gentoo Linux<br />Debian<br />ArchLinux<br />FreeBSD<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Ubuntu<br />SLES/openSUSE<br />RHEL/CentOS<br />Fedora<br />Gentoo Linux<br />Debian<br />ArchLinux<br />FreeBSD<br />NetBSD<br />OpenBSD<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index 1f3060d0..939c3126 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -74,8 +74,12 @@ def parse_ci_logline(line):
#
# 2017-05-22 18:02:01,088 - util.py[DEBUG]: Cloud-init v. 0.7.9 running \
# 'init-local' at Mon, 22 May 2017 18:02:01 +0000. Up 2.0 seconds.
+ #
+ # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \
+ # init-local/check-cache: attempting to read from cache [check]
- separators = [' - ', ' [CLOUDINIT] ']
+ amazon_linux_2_sep = ' cloud-init['
+ separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep]
found = False
for sep in separators:
if sep in line:
@@ -98,7 +102,14 @@ def parse_ci_logline(line):
hostname = extra.split()[-1]
else:
hostname = timehost.split()[-1]
- timestampstr = timehost.split(hostname)[0].strip()
+ if sep == amazon_linux_2_sep:
+ # This is an Amazon Linux style line, with no hostname and a PID.
+ # Use the whole of timehost as timestampstr, and strip off the PID
+ # from the start of eventstr.
+ timestampstr = timehost.strip()
+ eventstr = eventstr.split(maxsplit=1)[1]
+ else:
+ timestampstr = timehost.split(hostname)[0].strip()
if 'Cloud-init v.' in eventstr:
event_type = 'start'
if 'running' in eventstr:
diff --git a/cloudinit/analyze/tests/test_dump.py b/cloudinit/analyze/tests/test_dump.py
index db2a667b..d6fbd381 100644
--- a/cloudinit/analyze/tests/test_dump.py
+++ b/cloudinit/analyze/tests/test_dump.py
@@ -119,6 +119,23 @@ class TestParseCILogLine(CiTestCase):
m_parse_from_date.assert_has_calls(
[mock.call("2016-08-30 21:53:25.972325+00:00")])
+ def test_parse_logline_returns_event_for_amazon_linux_2_line(self):
+ line = (
+ "Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start:"
+ " init-local/check-cache: attempting to read from cache [check]")
+ # Generate the expected value using `datetime`, so that TZ
+ # determination is consistent with the code under test.
+ timestamp_dt = datetime.strptime(
+ "Apr 30 19:39:11", "%b %d %H:%M:%S"
+ ).replace(year=datetime.now().year)
+ expected = {
+ 'description': 'attempting to read from cache [check]',
+ 'event_type': 'start',
+ 'name': 'init-local/check-cache',
+ 'origin': 'cloudinit',
+ 'timestamp': timestamp_dt.timestamp()}
+ self.assertEqual(expected, parse_ci_logline(line))
+
SAMPLE_LOGS = dedent("""\
Nov 03 06:51:06.074410 x2 cloud-init[106]: [CLOUDINIT] util.py[DEBUG]:\
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 1f2c2e7e..9bded16c 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -36,6 +36,7 @@ KNOWN_CLOUD_NAMES = [
'OVF',
'RbxCloud - (HyperOne, Rootbox, Rubikon)',
'OpenTelekomCloud',
+ 'SAP Converged Cloud',
'Scaleway',
'SmartOS',
'VMware',
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 2d27a76a..80d217ca 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -95,7 +95,7 @@ def handle_args(name, args):
ns = network_state.parse_net_config_data(pre_ns)
if not ns:
raise RuntimeError("No valid network_state object created from"
- "input data")
+ " input data")
if args.debug:
sys.stderr.write('\n'.join(
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index 4951797b..d2dfa8de 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -2,7 +2,7 @@
from datetime import datetime
import os
-from six import StringIO
+from io import StringIO
from cloudinit.cmd.devel import logs
from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
diff --git a/cloudinit/cmd/devel/tests/test_render.py b/cloudinit/cmd/devel/tests/test_render.py
index 988bba03..a7fcf2ce 100644
--- a/cloudinit/cmd/devel/tests/test_render.py
+++ b/cloudinit/cmd/devel/tests/test_render.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
import os
+from io import StringIO
from collections import namedtuple
from cloudinit.cmd.devel import render
diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py
index 1d888b9d..e3db8679 100644
--- a/cloudinit/cmd/query.py
+++ b/cloudinit/cmd/query.py
@@ -5,7 +5,6 @@
import argparse
from errno import EACCES
import os
-import six
import sys
from cloudinit.handlers.jinja_template import (
@@ -149,7 +148,7 @@ def handle_args(name, args):
response = '\n'.join(sorted(response.keys()))
elif args.list_keys:
response = '\n'.join(sorted(response.keys()))
- if not isinstance(response, six.string_types):
+ if not isinstance(response, str):
response = util.json_dumps(response)
print(response)
return 0
diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py
index f092ab3d..a848a810 100644
--- a/cloudinit/cmd/tests/test_clean.py
+++ b/cloudinit/cmd/tests/test_clean.py
@@ -5,7 +5,7 @@ from cloudinit.util import ensure_dir, sym_link, write_file
from cloudinit.tests.helpers import CiTestCase, wrap_and_call, mock
from collections import namedtuple
import os
-from six import StringIO
+from io import StringIO
mypaths = namedtuple('MyPaths', 'cloud_dir')
@@ -167,7 +167,6 @@ class TestClean(CiTestCase):
wrap_and_call(
'cloudinit.cmd.clean',
{'Init': {'side_effect': self.init_class},
- 'sys.exit': {'side_effect': self.sys_exit},
'sys.argv': {'new': ['clean', '--logs']}},
clean.main)
diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py
index 73738170..3f3727fd 100644
--- a/cloudinit/cmd/tests/test_cloud_id.py
+++ b/cloudinit/cmd/tests/test_cloud_id.py
@@ -4,7 +4,7 @@
from cloudinit import util
from collections import namedtuple
-from six import StringIO
+from io import StringIO
from cloudinit.cmd import cloud_id
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 57b8fdf5..585b3b0e 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -3,7 +3,7 @@
from collections import namedtuple
import copy
import os
-from six import StringIO
+from io import StringIO
from cloudinit.cmd import main
from cloudinit import safeyaml
@@ -18,8 +18,6 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
class TestMain(FilesystemMockingTestCase):
- with_logs = True
-
def setUp(self):
super(TestMain, self).setUp()
self.new_root = self.tmp_dir()
diff --git a/cloudinit/cmd/tests/test_query.py b/cloudinit/cmd/tests/test_query.py
index c48605ad..6d36a4ea 100644
--- a/cloudinit/cmd/tests/test_query.py
+++ b/cloudinit/cmd/tests/test_query.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
-from six import StringIO
+from io import StringIO
from textwrap import dedent
import os
diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py
index aded8580..1c9eec37 100644
--- a/cloudinit/cmd/tests/test_status.py
+++ b/cloudinit/cmd/tests/test_status.py
@@ -2,7 +2,7 @@
from collections import namedtuple
import os
-from six import StringIO
+from io import StringIO
from textwrap import dedent
from cloudinit.atomic_helper import write_json
@@ -382,7 +382,6 @@ class TestStatus(CiTestCase):
wrap_and_call(
'cloudinit.cmd.status',
{'sys.argv': {'new': ['status']},
- 'sys.exit': {'side_effect': self.sys_exit},
'_is_cloudinit_disabled': (False, ''),
'Init': {'side_effect': self.init_class}},
status.main)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index f01e2aaf..9a33451d 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -6,228 +6,371 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Apt Configure
--------------
-**Summary:** configure apt
-
-This module handles both configuration of apt options and adding source lists.
-There are configuration options such as ``apt_get_wrapper`` and
-``apt_get_command`` that control how cloud-init invokes apt-get.
-These configuration options are handled on a per-distro basis, so consult
-documentation for cloud-init's distro support for instructions on using
-these config options.
-
-.. note::
- To ensure that apt configuration is valid yaml, any strings containing
- special characters, especially ``:`` should be quoted.
-
-.. note::
- For more information about apt configuration, see the
- ``Additional apt configuration`` example.
-
-**Preserve sources.list:**
-
-By default, cloud-init will generate a new sources list in
-``/etc/apt/sources.list.d`` based on any changes specified in cloud config.
-To disable this behavior and preserve the sources list from the pristine image,
-set ``preserve_sources_list`` to ``true``.
-
-.. note::
- The ``preserve_sources_list`` option overrides all other config keys that
- would alter ``sources.list`` or ``sources.list.d``, **except** for
- additional sources to be added to ``sources.list.d``.
-
-**Disable source suites:**
-
-Entries in the sources list can be disabled using ``disable_suites``, which
-takes a list of suites to be disabled. If the string ``$RELEASE`` is present in
-a suite in the ``disable_suites`` list, it will be replaced with the release
-name. If a suite specified in ``disable_suites`` is not present in
-``sources.list`` it will be ignored. For convenience, several aliases are
-provided for ``disable_suites``:
-
- - ``updates`` => ``$RELEASE-updates``
- - ``backports`` => ``$RELEASE-backports``
- - ``security`` => ``$RELEASE-security``
- - ``proposed`` => ``$RELEASE-proposed``
- - ``release`` => ``$RELEASE``
-
-.. note::
- When a suite is disabled using ``disable_suites``, its entry in
- ``sources.list`` is not deleted; it is just commented out.
-
-**Configure primary and security mirrors:**
-
-The primary and security archive mirrors can be specified using the ``primary``
-and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys
-take a list of configs, allowing mirrors to be specified on a per-architecture
-basis. Each config is a dictionary which must have an entry for ``arches``,
-specifying which architectures that config entry is for. The keyword
-``default`` applies to any architecture not explicitly listed. The mirror url
-can be specified with the ``uri`` key, or a list of mirrors to check can be
-provided in order, with the first mirror that can be resolved being selected.
-This allows the same configuration to be used in different environment, with
-different hosts used for a local apt mirror. If no mirror is provided by
-``uri`` or ``search``, ``search_dns`` may be used to search for dns names in
-the format ``<distro>-mirror`` in each of the following:
-
- - fqdn of this host per cloud metadata
- - localdomain
- - domains listed in ``/etc/resolv.conf``
-
-If there is a dns entry for ``<distro>-mirror``, then it is assumed that there
-is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the
-``primary`` key is defined, but not the ``security`` key, then then
-configuration for ``primary`` is also used for ``security``. If ``search_dns``
-is used for the ``security`` key, the search pattern will be.
-``<distro>-security-mirror``.
-
-If no mirrors are specified, or all lookups fail, then default mirrors defined
-in the datasource are used. If none are present in the datasource either the
-following defaults are used:
-
- - primary: ``http://archive.ubuntu.com/ubuntu``
- - security: ``http://security.ubuntu.com/ubuntu``
-
-**Specify sources.list template:**
-
-A custom template for rendering ``sources.list`` can be specefied with
-``sources_list``. If no ``sources_list`` template is given, cloud-init will
-use sane default. Within this template, the following strings will be replaced
-with the appropriate values:
-
- - ``$MIRROR``
- - ``$RELEASE``
- - ``$PRIMARY``
- - ``$SECURITY``
-
-**Pass configuration to apt:**
-
-Apt configuration can be specified using ``conf``. Configuration is specified
-as a string. For multiline apt configuration, make sure to follow yaml syntax.
-
-**Configure apt proxy:**
-
-Proxy configuration for apt can be specified using ``conf``, but proxy config
-keys also exist for convenience. The proxy config keys, ``http_proxy``,
-``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp
-and https protocols respectively. The ``proxy`` key also exists as an alias for
-``http_proxy``. Proxy url is specified in the format
-``<protocol>://[[user][:pass]@]host[:port]/``.
-
-**Add apt repos by regex:**
+"""Apt Configure: Configure apt for the user."""
-All source entries in ``apt-sources`` that match regex in
-``add_apt_repo_match`` will be added to the system using
-``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
-to ``^[\\w-]+:\\w``
-
-**Add source list entries:**
-
-Source list entries can be specified as a dictionary under the ``sources``
-config key, with key in the dict representing a different source file. The key
-of each source entry will be used as an id that can be referenced in
-other config entries, as well as the filename for the source's configuration
-under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
-it will be appended. If there is no configuration for a key in ``sources``, no
-file will be written, but the key may still be referred to as an id in other
-``sources`` entries.
-
-Each entry under ``sources`` is a dictionary which may contain any of the
-following optional keys:
-
- - ``source``: a sources.list entry (some variable replacements apply)
- - ``keyid``: a key to import via shortid or fingerprint
- - ``key``: a raw PGP key
- - ``keyserver``: alternate keyserver to pull ``keyid`` key from
-
-The ``source`` key supports variable replacements for the following strings:
-
- - ``$MIRROR``
- - ``$PRIMARY``
- - ``$SECURITY``
- - ``$RELEASE``
-
-**Internal name:** ``cc_apt_configure``
+import glob
+import os
+import re
+from textwrap import dedent
-**Module frequency:** per instance
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit import gpg
+from cloudinit import log as logging
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** ubuntu, debian
+LOG = logging.getLogger(__name__)
-**Config keys**::
+# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
+ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
- apt:
- preserve_sources_list: <true/false>
- disable_suites:
+frequency = PER_INSTANCE
+distros = ["ubuntu", "debian"]
+mirror_property = {
+ 'type': 'array',
+ 'item': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'required': ['arches'],
+ 'properties': {
+ 'arches': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string'
+ },
+ 'minItems': 1
+ },
+ 'uri': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'search': {
+ 'type': 'array',
+ 'item': {
+ 'type': 'string',
+ 'format': 'uri'
+ },
+ 'minItems': 1
+ },
+ 'search_dns': {
+ 'type': 'boolean',
+ }
+ }
+ }
+}
+schema = {
+ 'id': 'cc_apt_configure',
+ 'name': 'Apt Configure',
+ 'title': 'Configure apt for the user',
+ 'description': dedent("""\
+ This module handles both configuration of apt options and adding
+ source lists. There are configuration options such as
+ ``apt_get_wrapper`` and ``apt_get_command`` that control how
+ cloud-init invokes apt-get. These configuration options are
+ handled on a per-distro basis, so consult documentation for
+ cloud-init's distro support for instructions on using
+ these config options.
+
+ .. note::
+ To ensure that apt configuration is valid yaml, any strings
+ containing special characters, especially ``:`` should be quoted.
+
+ .. note::
+ For more information about apt configuration, see the
+ ``Additional apt configuration`` example."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ apt:
+ preserve_sources_list: false
+ disable_suites:
- $RELEASE-updates
- backports
- $RELEASE
- mysuite
- primary:
+ primary:
- arches:
- amd64
- i386
- default
- uri: "http://us.archive.ubuntu.com/ubuntu"
+ uri: 'http://us.archive.ubuntu.com/ubuntu'
search:
- - "http://cool.but-sometimes-unreachable.com/ubuntu"
- - "http://us.archive.ubuntu.com/ubuntu"
+ - 'http://cool.but-sometimes-unreachable.com/ubuntu'
+ - 'http://us.archive.ubuntu.com/ubuntu'
search_dns: <true/false>
- arches:
- s390x
- arm64
- uri: "http://archive-to-use-for-arm64.example.com/ubuntu"
- security:
+ uri: 'http://archive-to-use-for-arm64.example.com/ubuntu'
+ security:
- arches:
- default
search_dns: true
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- debconf_selections:
- set1: the-package the-package/some-flag boolean true
- conf: |
- APT {
- Get {
- Assume-Yes "true";
- Fix-Broken "true";
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ debconf_selections:
+ set1: the-package the-package/some-flag boolean true
+ conf: |
+ APT {
+ Get {
+ Assume-Yes 'true';
+ Fix-Broken 'true';
+ }
+ }
+ proxy: 'http://[[user][:pass]@]host[:port]/'
+ http_proxy: 'http://[[user][:pass]@]host[:port]/'
+ ftp_proxy: 'ftp://[[user][:pass]@]host[:port]/'
+ https_proxy: 'https://[[user][:pass]@]host[:port]/'
+ sources:
+ source1:
+ keyid: 'keyid'
+ keyserver: 'keyserverurl'
+ source: 'deb http://<url>/ xenial main'
+ source2:
+ source: 'ppa:<ppa-name>'
+ source3:
+ source: 'deb $MIRROR $RELEASE multiverse'
+ key: |
+ ------BEGIN PGP PUBLIC KEY BLOCK-------
+ <key data>
+ ------END PGP PUBLIC KEY BLOCK-------""")],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'apt': {
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'preserve_sources_list': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ By default, cloud-init will generate a new sources
+ list in ``/etc/apt/sources.list.d`` based on any
+ changes specified in cloud config. To disable this
+ behavior and preserve the sources list from the
+ pristine image, set ``preserve_sources_list``
+ to ``true``.
+
+ The ``preserve_sources_list`` option overrides
+ all other config keys that would alter
+ ``sources.list`` or ``sources.list.d``,
+ **except** for additional sources to be added
+ to ``sources.list.d``.""")
+ },
+ 'disable_suites': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string'
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ Entries in the sources list can be disabled using
+ ``disable_suites``, which takes a list of suites
+ to be disabled. If the string ``$RELEASE`` is
+ present in a suite in the ``disable_suites`` list,
+ it will be replaced with the release name. If a
+ suite specified in ``disable_suites`` is not
+ present in ``sources.list`` it will be ignored.
+ For convenience, several aliases are provided for
+ ``disable_suites``:
+
+ - ``updates`` => ``$RELEASE-updates``
+ - ``backports`` => ``$RELEASE-backports``
+ - ``security`` => ``$RELEASE-security``
+ - ``proposed`` => ``$RELEASE-proposed``
+ - ``release`` => ``$RELEASE``.
+
+ When a suite is disabled using ``disable_suites``,
+ its entry in ``sources.list`` is not deleted; it
+ is just commented out.""")
+ },
+ 'primary': {
+ **mirror_property,
+ 'description': dedent("""\
+ The primary and security archive mirrors can
+ be specified using the ``primary`` and
+ ``security`` keys, respectively. Both the
+ ``primary`` and ``security`` keys take a list
+ of configs, allowing mirrors to be specified
+ on a per-architecture basis. Each config is a
+ dictionary which must have an entry for
+ ``arches``, specifying which architectures
+ that config entry is for. The keyword
+ ``default`` applies to any architecture not
+ explicitly listed. The mirror url can be specified
+ with the ``uri`` key, or a list of mirrors to
+ check can be provided in order, with the first
+ mirror that can be resolved being selected. This
+ allows the same configuration to be used in
+ different environment, with different hosts used
+ for a local apt mirror. If no mirror is provided
+ by ``uri`` or ``search``, ``search_dns`` may be
+ used to search for dns names in the format
+ ``<distro>-mirror`` in each of the following:
+
+ - fqdn of this host per cloud metadata,
+ - localdomain,
+ - domains listed in ``/etc/resolv.conf``.
+
+ If there is a dns entry for ``<distro>-mirror``,
+ then it is assumed that there is a distro mirror
+ at ``http://<distro>-mirror.<domain>/<distro>``.
+ If the ``primary`` key is defined, but not the
+ ``security`` key, then then configuration for
+ ``primary`` is also used for ``security``.
+ If ``search_dns`` is used for the ``security``
+ key, the search pattern will be
+ ``<distro>-security-mirror``.
+
+ If no mirrors are specified, or all lookups fail,
+ then default mirrors defined in the datasource
+ are used. If none are present in the datasource
+ either the following defaults are used:
+
+ - ``primary`` => \
+ ``http://archive.ubuntu.com/ubuntu``.
+ - ``security`` => \
+ ``http://security.ubuntu.com/ubuntu``
+ """)},
+ 'security': {
+ **mirror_property,
+ 'description': dedent("""\
+ Please refer to the primary config documentation""")
+ },
+ 'add_apt_repo_match': {
+ 'type': 'string',
+ 'default': ADD_APT_REPO_MATCH,
+ 'description': dedent("""\
+ All source entries in ``apt-sources`` that match
+ regex in ``add_apt_repo_match`` will be added to
+ the system using ``add-apt-repository``. If
+ ``add_apt_repo_match`` is not specified, it
+ defaults to ``{}``""".format(ADD_APT_REPO_MATCH))
+ },
+ 'debconf_selections': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Debconf additional configurations can be specified as a
+ dictionary under the ``debconf_selections`` config
+ key, with each key in the dict representing a
+ different set of configurations. The value of each key
+ must be a string containing all the debconf
+ configurations that must be applied. We will bundle
+ all of the values and pass them to
+ ``debconf-set-selections``. Therefore, each value line
+ must be a valid entry for ``debconf-set-selections``,
+ meaning that they must possess for distinct fields:
+
+ ``pkgname question type answer``
+
+ Where:
+
+ - ``pkgname`` is the name of the package.
+ - ``question`` the name of the questions.
+ - ``type`` is the type of question.
+ - ``answer`` is the value used to ansert the \
+ question.
+
+ For example: \
+ ``ippackage ippackage/ip string 127.0.01``
+ """)
+ },
+ 'sources_list': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specifies a custom template for rendering
+ ``sources.list`` . If no ``sources_list`` template
+ is given, cloud-init will use sane default. Within
+ this template, the following strings will be
+ replaced with the appropriate values:
+
+ - ``$MIRROR``
+ - ``$RELEASE``
+ - ``$PRIMARY``
+ - ``$SECURITY``""")
+ },
+ 'conf': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Specify configuration for apt, such as proxy
+ configuration. This configuration is specified as a
+ string. For multiline apt configuration, make sure
+ to follow yaml syntax.""")
+ },
+ 'https_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify https apt proxy.
+ https proxy url is specified in the format
+ ``https://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'http_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify http apt proxy.
+ http proxy url is specified in the format
+ ``http://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'proxy': {
+ 'type': 'string',
+ 'description': 'Alias for defining a http apt proxy.'
+ },
+ 'ftp_proxy': {
+ 'type': 'string',
+ 'description': dedent("""\
+ More convenient way to specify ftp apt proxy.
+ ftp proxy url is specified in the format
+ ``ftp://[[user][:pass]@]host[:port]/``.""")
+ },
+ 'sources': {
+ 'type': 'object',
+ 'items': {'type': 'string'},
+ 'description': dedent("""\
+ Source list entries can be specified as a
+ dictionary under the ``sources`` config key, with
+ each key in the dict representing a different source
+ file. The key of each source entry will be used
+ as an id that can be referenced in other config
+ entries, as well as the filename for the source's
+ configuration under ``/etc/apt/sources.list.d``.
+ If the name does not end with ``.list``, it will
+ be appended. If there is no configuration for a
+ key in ``sources``, no file will be written, but
+ the key may still be referred to as an id in other
+ ``sources`` entries.
+
+ Each entry under ``sources`` is a dictionary which
+ may contain any of the following optional keys:
+
+ - ``source``: a sources.list entry \
+ (some variable replacements apply).
+ - ``keyid``: a key to import via shortid or \
+ fingerprint.
+ - ``key``: a raw PGP key.
+ - ``keyserver``: alternate keyserver to pull \
+ ``keyid`` key from.
+
+ The ``source`` key supports variable
+ replacements for the following strings:
+
+ - ``$MIRROR``
+ - ``$PRIMARY``
+ - ``$SECURITY``
+ - ``$RELEASE``""")
}
}
- proxy: "http://[[user][:pass]@]host[:port]/"
- http_proxy: "http://[[user][:pass]@]host[:port]/"
- ftp_proxy: "ftp://[[user][:pass]@]host[:port]/"
- https_proxy: "https://[[user][:pass]@]host[:port]/"
- sources:
- source1:
- keyid: "keyid"
- keyserver: "keyserverurl"
- source: "deb http://<url>/ xenial main"
- source2:
- source: "ppa:<ppa-name>"
- source3:
- source: "deb $MIRROR $RELEASE multiverse"
- key: |
- ------BEGIN PGP PUBLIC KEY BLOCK-------
- <key data>
- ------END PGP PUBLIC KEY BLOCK-------
-"""
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import log as logging
-from cloudinit import templater
-from cloudinit import util
+ }
+ }
+}
-LOG = logging.getLogger(__name__)
+__doc__ = get_schema_doc(schema)
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
# place where apt stores cached repository data
APT_LISTS = "/var/lib/apt/lists"
@@ -253,7 +396,7 @@ def get_default_mirrors(arch=None, target=None):
architecture, for more see:
https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports"""
if arch is None:
- arch = util.get_architecture(target)
+ arch = util.get_dpkg_architecture(target)
if arch in PRIMARY_ARCHES:
return PRIMARY_ARCH_MIRRORS.copy()
if arch in PORTS_ARCHES:
@@ -279,6 +422,7 @@ def handle(name, ocfg, cloud, log, _):
"Expected dictionary for 'apt' config, found {config_type}".format(
config_type=type(cfg)))
+ validate_cloudconfig_schema(cfg, schema)
apply_debconf_selections(cfg, target)
apply_apt(cfg, cloud, target)
@@ -303,13 +447,13 @@ def apply_apt(cfg, cloud, target):
LOG.debug("handling apt config: %s", cfg)
release = util.lsb_release(target=target)['codename']
- arch = util.get_architecture(target)
+ arch = util.get_dpkg_architecture(target)
mirrors = find_apt_mirror_info(cfg, cloud, arch=arch)
LOG.debug("Apt Mirror info: %s", mirrors)
if util.is_false(cfg.get('preserve_sources_list', False)):
generate_sources_list(cfg, release, mirrors, cloud)
- rename_apt_lists(mirrors, target)
+ rename_apt_lists(mirrors, target, arch)
try:
apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
@@ -427,9 +571,9 @@ def mirrorurl_to_apt_fileprefix(mirror):
return string
-def rename_apt_lists(new_mirrors, target=None):
+def rename_apt_lists(new_mirrors, target, arch):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
- default_mirrors = get_default_mirrors(util.get_architecture(target))
+ default_mirrors = get_default_mirrors(arch)
pre = util.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
@@ -763,25 +907,6 @@ def convert_to_v3_apt_format(cfg):
return cfg
-def search_for_mirror(candidates):
- """
- Search through a list of mirror urls for one that works
- This needs to return quickly.
- """
- if candidates is None:
- return None
-
- LOG.debug("search for mirror in candidates: '%s'", candidates)
- for cand in candidates:
- try:
- if util.is_resolvable_url(cand):
- LOG.debug("found working mirror: '%s'", cand)
- return cand
- except Exception:
- pass
- return None
-
-
def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
"""
Try to resolve a list of predefines DNS names to pick mirrors
@@ -813,7 +938,7 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud):
for post in doms:
mirror_list.append(mirrorfmt % (post))
- mirror = search_for_mirror(mirror_list)
+ mirror = util.search_for_mirror(mirror_list)
return mirror
@@ -876,7 +1001,7 @@ def get_mirror(cfg, mirrortype, arch, cloud):
# fallback to search if specified
if mirror is None:
# list of mirrors to try to resolve
- mirror = search_for_mirror(mcfg.get("search", None))
+ mirror = util.search_for_mirror(mcfg.get("search", None))
# fallback to search_dns if specified
if mirror is None:
@@ -896,7 +1021,7 @@ def find_apt_mirror_info(cfg, cloud, arch=None):
"""
if arch is None:
- arch = util.get_architecture()
+ arch = util.get_dpkg_architecture()
LOG.debug("got arch for mirror selection: %s", arch)
pmirror = get_mirror(cfg, "primary", arch, cloud)
LOG.debug("got primary mirror: %s", pmirror)
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 0ad6b7f1..01d61fa1 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -79,8 +79,6 @@ from cloudinit import templater
from cloudinit import url_helper
from cloudinit import util
-import six
-
RUBY_VERSION_DEFAULT = "1.8"
CHEF_DIRS = tuple([
@@ -273,7 +271,7 @@ def run_chef(chef_cfg, log):
cmd_args = chef_cfg['exec_arguments']
if isinstance(cmd_args, (list, tuple)):
cmd.extend(cmd_args)
- elif isinstance(cmd_args, six.string_types):
+ elif isinstance(cmd_args, str):
cmd.append(cmd_args)
else:
log.warning("Unknown type %s provided for chef"
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 610dbc8b..4d5a6aa2 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -28,8 +28,7 @@ location that this cloud-init has been configured with when running.
"""
import copy
-
-from six import StringIO
+from io import StringIO
from cloudinit import type_utils
from cloudinit import util
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d8d0fcf1..45925755 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -163,7 +163,7 @@ def handle(_name, cfg, cloud, log, _args):
def update_disk_setup_devices(disk_setup, tformer):
# update 'disk_setup' dictionary anywhere were a device may occur
# update it with the response from 'tformer'
- for origname in disk_setup.keys():
+ for origname in list(disk_setup):
transformed = tformer(origname)
if transformed is None or transformed == origname:
continue
@@ -825,6 +825,7 @@ def lookup_force_flag(fs):
'btrfs': '-f',
'xfs': '-f',
'reiserfs': '-f',
+ 'swap': '-f',
}
if 'ext' in fs.lower():
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index eaf1e940..a9c04d86 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -56,8 +56,7 @@ The following default client config is provided, but can be overridden::
"""
import os
-
-from six import BytesIO
+from io import BytesIO
from configobj import ConfigObj
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index f68c3cc7..4f8b7bf6 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -6,27 +6,58 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Locale
-------
-**Summary:** set system locale
+"""Locale: set system locale"""
-Configure the system locale and apply it system wide. By default use the locale
-specified by the datasource.
+from textwrap import dedent
-**Internal name:** ``cc_locale``
-
-**Module frequency:** per instance
+from cloudinit import util
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
-**Supported distros:** all
-**Config keys**::
+frequency = PER_INSTANCE
+distros = ['all']
+schema = {
+ 'id': 'cc_locale',
+ 'name': 'Locale',
+ 'title': 'Set system locale',
+ 'description': dedent(
+ """\
+ Configure the system locale and apply it system wide. By default use
+ the locale specified by the datasource."""
+ ),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Set the locale to ar_AE
+ locale: ar_AE
+ """),
+ dedent("""\
+ # Set the locale to fr_CA in /etc/alternate_path/locale
+ locale: fr_CA
+ locale_configfile: /etc/alternate_path/locale
+ """),
+ ],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'locale': {
+ 'type': 'string',
+ 'description': (
+ "The locale to set as the system's locale (e.g. ar_PS)"
+ ),
+ },
+ 'locale_configfile': {
+ 'type': 'string',
+ 'description': (
+ "The file in which to write the locale configuration (defaults"
+ " to the distro's default location)"
+ ),
+ },
+ },
+}
- locale: <locale str>
- locale_configfile: <path to locale config file>
-"""
-
-from cloudinit import util
+__doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, cloud, log, args):
@@ -40,6 +71,8 @@ def handle(name, cfg, cloud, log, args):
name, locale)
return
+ validate_cloudconfig_schema(cfg, schema)
+
log.debug("Setting locale to %s", locale)
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index d5f63f5f..351183f1 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -49,9 +49,7 @@ private certificates for mcollective. Their values will be written to
"""
import errno
-
-import six
-from six import BytesIO
+import io
# Used since this can maintain comments
# and doesn't need a top level section
@@ -73,7 +71,7 @@ def configure(config, server_cfg=SERVER_CFG,
# original file in order to be able to mix the rest up.
try:
old_contents = util.load_file(server_cfg, quiet=False, decode=False)
- mcollective_config = ConfigObj(BytesIO(old_contents))
+ mcollective_config = ConfigObj(io.BytesIO(old_contents))
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -93,7 +91,7 @@ def configure(config, server_cfg=SERVER_CFG,
'plugin.ssl_server_private'] = pricert_file
mcollective_config['securityprovider'] = 'ssl'
else:
- if isinstance(cfg, six.string_types):
+ if isinstance(cfg, str):
# Just set it in the 'main' section
mcollective_config[cfg_name] = cfg
elif isinstance(cfg, (dict)):
@@ -119,7 +117,7 @@ def configure(config, server_cfg=SERVER_CFG,
raise
# Now we got the whole (new) file, write to disk...
- contents = BytesIO()
+ contents = io.BytesIO()
mcollective_config.write(contents)
util.write_file(server_cfg, contents.getvalue(), mode=0o644)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index c741c746..85a89cd1 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -25,7 +25,7 @@ mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``).
The ``mount_default_fields`` config key allows default options to be specified
for the values in a ``mounts`` entry that are not specified, aside from the
-``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 7
+``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 6
values. It defaults to::
mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"]
@@ -74,6 +74,9 @@ from cloudinit import util
# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
+# Name matches 'server:/path'
+NETWORK_NAME_FILTER = r"^.+:.*"
+NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
MNT_COMMENT = "comment=cloudconfig"
@@ -93,6 +96,13 @@ def is_meta_device_name(name):
return False
+def is_network_device(name):
+ # return true if this is a network device
+ if NETWORK_NAME_RE.match(name):
+ return True
+ return False
+
+
def _get_nth_partition_for_device(device_path, partition_number):
potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
'-part%s' % (partition_number,)]
@@ -122,6 +132,9 @@ def sanitize_devname(startname, transformer, log):
devname = "ephemeral0"
log.debug("Adjusted mount option from ephemeral to ephemeral0")
+ if is_network_device(startname):
+ return startname
+
device_path, partition_number = util.expand_dotted_devname(devname)
if is_meta_device_name(device_path):
@@ -223,13 +236,57 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
return size
+def create_swapfile(fname: str, size: str) -> None:
+ """Size is in MiB."""
+
+ errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s"
+
+ def create_swap(fname, size, method):
+ LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'",
+ fname, fstype, method)
+
+ if method == "fallocate":
+ cmd = ['fallocate', '-l', '%sM' % size, fname]
+ elif method == "dd":
+ cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M',
+ 'count=%s' % size]
+
+ try:
+ util.subp(cmd, capture=True)
+ except util.ProcessExecutionError as e:
+ LOG.warning(errmsg, fname, size, method, e)
+ util.del_file(fname)
+
+ swap_dir = os.path.dirname(fname)
+ util.ensure_dir(swap_dir)
+
+ fstype = util.get_mount_info(swap_dir)[1]
+
+ if fstype in ("xfs", "btrfs"):
+ create_swap(fname, size, "dd")
+ else:
+ try:
+ create_swap(fname, size, "fallocate")
+ except util.ProcessExecutionError as e:
+ LOG.warning(errmsg, fname, size, "dd", e)
+ LOG.warning("Will attempt with dd.")
+ create_swap(fname, size, "dd")
+
+ util.chmod(fname, 0o600)
+ try:
+ util.subp(['mkswap', fname])
+ except util.ProcessExecutionError:
+ util.del_file(fname)
+ raise
+
+
def setup_swapfile(fname, size=None, maxsize=None):
"""
fname: full path string of filename to setup
size: the size to create. set to "auto" for recommended
maxsize: the maximum size
"""
- tdir = os.path.dirname(fname)
+ swap_dir = os.path.dirname(fname)
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
@@ -237,28 +294,17 @@ def setup_swapfile(fname, size=None, maxsize=None):
LOG.debug("Not creating swap: failed to read meminfo")
return
- util.ensure_dir(tdir)
- size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
+ util.ensure_dir(swap_dir)
+ size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize,
memsize=memsize)
+ mibsize = str(int(size / (2 ** 20)))
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
- mbsize = str(int(size / (2 ** 20)))
- msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
- try:
- util.ensure_dir(tdir)
- util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- 'dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
-
- except Exception as e:
- raise IOError("Failed %s: %s" % (msg, e))
+ util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
+ args=[fname, mibsize])
return fname
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 9e074bda..3b2c2020 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -6,19 +6,17 @@
"""NTP: enable and configure ntp"""
-from cloudinit.config.schema import (
- get_schema_doc, validate_cloudconfig_schema)
+import copy
+import os
+from textwrap import dedent
+
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
-
-import copy
-import os
-import six
-from textwrap import dedent
+from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema
+from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
@@ -171,8 +169,8 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp pools. If both pools and servers are
- empty, 4 default pool servers will be provided of
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided of
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
},
'servers': {
'type': 'array',
@@ -183,46 +181,46 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of ntp servers. If both pools and servers are
- empty, 4 default pool servers will be provided with
- the format ``{0-3}.{distro}.pool.ntp.org``.""")
+ empty, 4 default pool servers will be provided with
+ the format ``{0-3}.{distro}.pool.ntp.org``.""")
},
'ntp_client': {
'type': 'string',
'default': 'auto',
'description': dedent("""\
Name of an NTP client to use to configure system NTP.
- When unprovided or 'auto' the default client preferred
- by the distribution will be used. The following
- built-in client names can be used to override existing
- configuration defaults: chrony, ntp, ntpdate,
- systemd-timesyncd."""),
+ When unprovided or 'auto' the default client preferred
+ by the distribution will be used. The following
+ built-in client names can be used to override existing
+ configuration defaults: chrony, ntp, ntpdate,
+ systemd-timesyncd."""),
},
'enabled': {
'type': 'boolean',
'default': True,
'description': dedent("""\
Attempt to enable ntp clients if set to True. If set
- to False, ntp client will not be configured or
- installed"""),
+ to False, ntp client will not be configured or
+ installed"""),
},
'config': {
'description': dedent("""\
Configuration settings or overrides for the
- ``ntp_client`` specified."""),
+ ``ntp_client`` specified."""),
'type': ['object'],
'properties': {
'confpath': {
'type': 'string',
'description': dedent("""\
The path to where the ``ntp_client``
- configuration is written."""),
+ configuration is written."""),
},
'check_exe': {
'type': 'string',
'description': dedent("""\
The executable name for the ``ntp_client``.
- For example, ntp service ``check_exe`` is
- 'ntpd' because it runs the ntpd binary."""),
+ For example, ntp service ``check_exe`` is
+ 'ntpd' because it runs the ntpd binary."""),
},
'packages': {
'type': 'array',
@@ -232,22 +230,22 @@ schema = {
'uniqueItems': True,
'description': dedent("""\
List of packages needed to be installed for the
- selected ``ntp_client``."""),
+ selected ``ntp_client``."""),
},
'service_name': {
'type': 'string',
'description': dedent("""\
The systemd or sysvinit service name used to
- start and stop the ``ntp_client``
- service."""),
+ start and stop the ``ntp_client``
+ service."""),
},
'template': {
'type': 'string',
'description': dedent("""\
Inline template allowing users to define their
- own ``ntp_client`` configuration template.
- The value must start with '## template:jinja'
- to enable use of templating support.
+ own ``ntp_client`` configuration template.
+ The value must start with '## template:jinja'
+ to enable use of templating support.
"""),
},
},
@@ -460,7 +458,7 @@ def supplemental_schema_validation(ntp_config):
for key, value in sorted(ntp_config.items()):
keypath = 'ntp:config:' + key
if key == 'confpath':
- if not all([value, isinstance(value, six.string_types)]):
+ if not all([value, isinstance(value, str)]):
errors.append(
'Expected a config file path {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
@@ -472,11 +470,11 @@ def supplemental_schema_validation(ntp_config):
elif key in ('template', 'template_name'):
if value is None: # Either template or template_name can be none
continue
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
errors.append(
'Expected a string type for {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
- elif not isinstance(value, six.string_types):
+ elif not isinstance(value, str):
errors.append(
'Expected a string type for {keypath}.'
' Found ({value})'.format(keypath=keypath, value=value))
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index b8e27090..733c3910 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -19,6 +19,7 @@ keys to post. Available keys are:
- ``pub_key_dsa``
- ``pub_key_rsa``
- ``pub_key_ecdsa``
+ - ``pub_key_ed25519``
- ``instance_id``
- ``hostname``
- ``fdqn``
@@ -52,6 +53,7 @@ POST_LIST_ALL = [
'pub_key_dsa',
'pub_key_rsa',
'pub_key_ecdsa',
+ 'pub_key_ed25519',
'instance_id',
'hostname',
'fqdn'
@@ -105,6 +107,7 @@ def handle(name, cfg, cloud, log, args):
'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
+ 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub',
}
for (n, path) in pubkeys.items():
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 43a479cf..3e81a3c7 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -49,16 +49,15 @@ key returns 0.
condition: <true/false/command>
"""
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
import errno
import os
import re
-import six
import subprocess
import time
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
frequency = PER_INSTANCE
EXIT_FAIL = 254
@@ -183,7 +182,7 @@ def load_power_state(cfg):
pstate['timeout'])
condition = pstate.get("condition", True)
- if not isinstance(condition, six.string_types + (list, bool)):
+ if not isinstance(condition, (str, list, bool)):
raise TypeError("condition type %s invalid. must be list, bool, str")
return (args, timeout, condition)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index b088db6e..c01f5b8f 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -77,11 +77,10 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html
pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290
"""
-from six import StringIO
-
import os
import socket
import yaml
+from io import StringIO
from cloudinit import helpers
from cloudinit import util
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index bd8ee89f..a5aca038 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -50,13 +50,12 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``.
#
import os
+from urllib.parse import parse_qs
from cloudinit.settings import PER_INSTANCE
from cloudinit import url_helper as uhelp
from cloudinit import util
-from six.moves.urllib_parse import parse_qs
-
frequency = PER_INSTANCE
MY_NAME = "cc_rightscale_userdata"
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index ff211f65..5df0137d 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -180,7 +180,6 @@ config entries. Legacy to new mappings are as follows:
import os
import re
-import six
from cloudinit import log as logging
from cloudinit import util
@@ -233,9 +232,9 @@ def load_config(cfg):
fillup = (
(KEYNAME_CONFIGS, [], list),
- (KEYNAME_DIR, DEF_DIR, six.string_types),
- (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
- (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
+ (KEYNAME_DIR, DEF_DIR, str),
+ (KEYNAME_FILENAME, DEF_FILENAME, str),
+ (KEYNAME_RELOAD, DEF_RELOAD, (str, list)),
(KEYNAME_REMOTES, DEF_REMOTES, dict))
for key, default, vtypes in fillup:
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index a5d7c73f..b65f3ed9 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -61,8 +61,7 @@ used::
import base64
import os
-
-from six import BytesIO
+from io import BytesIO
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index e3b39d8b..7b7aa885 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -236,12 +236,12 @@ def handle(_name, cfg, cloud, log, args):
raise errors[-1]
-def rand_user_password(pwlen=9):
+def rand_user_password(pwlen=20):
return util.rand_str(pwlen, select_from=PW_SET)
def chpasswd(distro, plist_in, hashed=False):
- if util.is_FreeBSD():
+ if util.is_BSD():
for pentry in plist_in.splitlines():
u, p = pentry.split(":")
distro.set_passwd(u, p, hashed=hashed)
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 90724b81..8178562e 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -61,9 +61,9 @@ schema = {
snap:
assertions:
00: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
02: |
- signed_assertion_blob_here
+ signed_assertion_blob_here
commands:
00: snap create-user --sudoer --known <snap-user>@mydomain.com
01: snap install canonical-livepatch
@@ -85,6 +85,21 @@ schema = {
01: ['snap', 'install', 'vlc']
02: snap install vlc
03: 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of commands
+ snap:
+ commands:
+ - ['install', 'vlc']
+ - ['snap', 'install', 'vlc']
+ - snap install vlc
+ - 'snap install vlc'
+ """), dedent("""\
+ # You can use a list of assertions
+ snap:
+ assertions:
+ - signed_assertion_blob_here
+ - |
+ signed_assertion_blob_here
""")],
'frequency': PER_INSTANCE,
'type': 'object',
@@ -98,7 +113,8 @@ schema = {
'additionalItems': False, # Reject items non-string
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
+ 'uniqueItems': True,
+ 'additionalProperties': {'type': 'string'},
},
'commands': {
'type': ['object', 'array'], # Array of strings or dict
@@ -110,6 +126,12 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
+ 'additionalProperties': {
+ 'oneOf': [
+ {'type': 'string'},
+ {'type': 'array', 'items': {'type': 'string'}},
+ ],
+ },
},
'squashfuse_in_container': {
'type': 'boolean'
@@ -122,10 +144,6 @@ schema = {
}
}
-# TODO schema for 'assertions' and 'commands' are too permissive at the moment.
-# Once python-jsonschema supports schema draft 6 add support for arbitrary
-# object keys with 'patternProperties' constraint to validate string values.
-
__doc__ = get_schema_doc(schema) # Supplement python help()
SNAP_CMD = "snap"
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index dcf86fdc..7ac1c8cf 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -11,7 +11,7 @@ SSH Authkey Fingerprints
Write fingerprints of authorized keys for each user to log. This is enabled by
default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
-the keys can be specified, but defaults to ``md5``.
+the keys can be specified, but defaults to ``sha256``.
**Internal name:** `` cc_ssh_authkey_fingerprints``
@@ -42,7 +42,7 @@ def _split_hash(bin_hash):
return split_up
-def _gen_fingerprint(b64_text, hash_meth='md5'):
+def _gen_fingerprint(b64_text, hash_meth='sha256'):
if not b64_text:
return ''
# TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
@@ -65,7 +65,7 @@ def _is_printable_key(entry):
return False
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
+def _pprint_key_entries(user, key_fn, key_entries, hash_meth='sha256',
prefix='ci-info: '):
if not key_entries:
message = ("%sno authorized SSH keys fingerprints found for user %s.\n"
@@ -101,7 +101,7 @@ def handle(name, cfg, cloud, log, _args):
"logging of SSH fingerprints disabled"), name)
return
- hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
+ hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "sha256")
(users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index f846e9a5..8b6d2a1a 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -4,8 +4,6 @@
from textwrap import dedent
-import six
-
from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
@@ -98,7 +96,7 @@ def configure_ua(token=None, enable=None):
if enable is None:
enable = []
- elif isinstance(enable, six.string_types):
+ elif isinstance(enable, str):
LOG.warning('ubuntu_advantage: enable should be a list, not'
' a string; treating as a single enable')
enable = [enable]
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 0b6546e2..8601e707 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -4,61 +4,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Write Files
------------
-**Summary:** write arbitrary files
-
-Write out arbitrary content to files, optionally setting permissions. Content
-can be specified in plain text or binary. Data encoded with either base64 or
-binary gzip data can be specified and will be decoded before being written.
-
-.. note::
- if multiline data is provided, care should be taken to ensure that it
- follows yaml formatting standards. to specify binary data, use the yaml
- option ``!!binary``
-
-.. note::
- Do not write files under /tmp during boot because of a race with
- systemd-tmpfiles-clean that can cause temp files to get cleaned during
- the early boot process. Use /run/somedir instead to avoid race LP:1707222.
-
-**Internal name:** ``cc_write_files``
-
-**Module frequency:** per instance
-
-**Supported distros:** all
-
-**Config keys**::
-
- write_files:
- - encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
- - content: |
- # My new /etc/sysconfig/samba file
-
- SMDBOPTIONS="-D"
- path: /etc/sysconfig/samba
- - content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAA
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAAB
- ...
- path: /bin/arch
- permissions: '0555'
- - content: |
- 15 * * * * root ship_logs
- path: /etc/crontab
- append: true
-"""
+"""Write Files: write arbitrary files"""
import base64
import os
-import six
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
@@ -72,6 +25,142 @@ UNKNOWN_ENC = 'text/plain'
LOG = logging.getLogger(__name__)
+distros = ['all']
+
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+supported_encoding_types = [
+ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64',
+ 'base64']
+
+schema = {
+ 'id': 'cc_write_files',
+ 'name': 'Write Files',
+ 'title': 'write arbitrary files',
+ 'description': dedent("""\
+ Write out arbitrary content to files, optionally setting permissions.
+ Parent folders in the path are created if absent.
+ Content can be specified in plain text or binary. Data encoded with
+ either base64 or binary gzip data can be specified and will be decoded
+ before being written. For empty file creation, content can be omitted.
+
+ .. note::
+ if multiline data is provided, care should be taken to ensure that it
+ follows yaml formatting standards. to specify binary data, use the yaml
+ option ``!!binary``
+
+ .. note::
+ Do not write files under /tmp during boot because of a race with
+ systemd-tmpfiles-clean that can cause temp files to get cleaned during
+ the early boot process. Use /run/somedir instead to avoid race
+ LP:1707222."""),
+ 'distros': distros,
+ 'examples': [
+ dedent("""\
+ # Write out base64 encoded content to /etc/sysconfig/selinux
+ write_files:
+ - encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+ """),
+ dedent("""\
+ # Appending content to an existing file
+ write_files:
+ - content: |
+ 15 * * * * root ship_logs
+ path: /etc/crontab
+ append: true
+ """),
+ dedent("""\
+ # Provide gziped binary content
+ write_files:
+ - encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
+ """),
+ dedent("""\
+ # Create an empty file on the system
+ write_files:
+ - path: /root/CLOUD_INIT_WAS_HERE
+ """)],
+ 'frequency': frequency,
+ 'type': 'object',
+ 'properties': {
+ 'write_files': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'path': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Path of the file to which ``content`` is decoded
+ and written
+ """),
+ },
+ 'content': {
+ 'type': 'string',
+ 'default': '',
+ 'description': dedent("""\
+ Optional content to write to the provided ``path``.
+ When content is present and encoding is not '%s',
+ decode the content prior to writing. Default:
+ **''**
+ """ % UNKNOWN_ENC),
+ },
+ 'owner': {
+ 'type': 'string',
+ 'default': DEFAULT_OWNER,
+ 'description': dedent("""\
+ Optional owner:group to chown on the file. Default:
+ **{owner}**
+ """.format(owner=DEFAULT_OWNER)),
+ },
+ 'permissions': {
+ 'type': 'string',
+ 'default': oct(DEFAULT_PERMS).replace('o', ''),
+ 'description': dedent("""\
+ Optional file permissions to set on ``path``
+ represented as an octal string '0###'. Default:
+ **'{perms}'**
+ """.format(perms=oct(DEFAULT_PERMS).replace('o', ''))),
+ },
+ 'encoding': {
+ 'type': 'string',
+ 'default': UNKNOWN_ENC,
+ 'enum': supported_encoding_types,
+ 'description': dedent("""\
+ Optional encoding type of the content. Default is
+ **text/plain** and no content decoding is
+ performed. Supported encoding types are:
+ %s.""" % ", ".join(supported_encoding_types)),
+ },
+ 'append': {
+ 'type': 'boolean',
+ 'default': False,
+ 'description': dedent("""\
+ Whether to append ``content`` to existing file if
+ ``path`` exists. Default: **false**.
+ """),
+ },
+ },
+ 'required': ['path'],
+ 'additionalProperties': False
+ },
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, _cloud, log, _args):
files = cfg.get('write_files')
@@ -79,6 +168,7 @@ def handle(name, cfg, _cloud, log, _args):
log.debug(("Skipping module named %s,"
" no/empty 'write_files' key in configuration"), name)
return
+ validate_cloudconfig_schema(cfg, schema)
write_files(name, files)
@@ -126,7 +216,7 @@ def decode_perms(perm, default):
if perm is None:
return default
try:
- if isinstance(perm, six.integer_types + (float,)):
+ if isinstance(perm, (int, float)):
# Just 'downcast' it (if a float)
return int(perm)
else:
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3b354a7d..01fe683c 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,7 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** fedora, rhel
+**Supported distros:** centos, fedora, rhel
**Config keys**::
@@ -30,17 +30,13 @@ entry, the config entry will be skipped.
# any repository configuration options (see man yum.conf)
"""
+import io
import os
-
-try:
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
-import six
+from configparser import ConfigParser
from cloudinit import util
-distros = ['fedora', 'rhel']
+distros = ['centos', 'fedora', 'rhel']
def _canonicalize_id(repo_id):
@@ -57,7 +53,7 @@ def _format_repo_value(val):
# Can handle 'lists' in certain cases
# See: https://linux.die.net/man/5/yum.conf
return "\n".join([_format_repo_value(v) for v in val])
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
return str(val)
return val
@@ -72,7 +68,7 @@ def _format_repository_config(repo_id, repo_config):
# For now assume that people using this know
# the format of yum and don't verify keys/values further
to_be.set(repo_id, k, _format_repo_value(v))
- to_be_stream = six.StringIO()
+ to_be_stream = io.StringIO()
to_be.write(to_be_stream)
to_be_stream.seek(0)
lines = to_be_stream.readlines()
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index aba26952..05855b0c 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -7,7 +7,6 @@
import configobj
import os
-from six import string_types
from textwrap import dedent
from cloudinit.config.schema import get_schema_doc
@@ -110,7 +109,7 @@ def _format_repo_value(val):
return 1 if val else 0
if isinstance(val, (list, tuple)):
return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, string_types):
+ if not isinstance(val, str):
return str(val)
return val
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 807c3eee..2d8c7577 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1,8 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""schema.py: Set of module functions for processing cloud-config schema."""
-from __future__ import print_function
-
from cloudinit import importer
from cloudinit.util import find_modules, load_file
@@ -36,6 +34,8 @@ SCHEMA_DOC_TMPL = """
{examples}
"""
SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_LIST_ITEM_TMPL = (
+ '{prefix}Each item in **{prop_name}** list supports the following keys:')
SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
@@ -58,6 +58,19 @@ class SchemaValidationError(ValueError):
super(SchemaValidationError, self).__init__(message)
+def is_schema_byte_string(checker, instance):
+ """TYPE_CHECKER override allowing bytes for string type
+
+ For jsonschema v. 3.0.0+
+ """
+ try:
+ from jsonschema import Draft4Validator
+ except ImportError:
+ return False
+ return (Draft4Validator.TYPE_CHECKER.is_type(instance, "string") or
+ isinstance(instance, (bytes,)))
+
+
def validate_cloudconfig_schema(config, schema, strict=False):
"""Validate provided config meets the schema definition.
@@ -73,11 +86,31 @@ def validate_cloudconfig_schema(config, schema, strict=False):
"""
try:
from jsonschema import Draft4Validator, FormatChecker
+ from jsonschema.validators import create, extend
except ImportError:
logging.debug(
'Ignoring schema validation. python-jsonschema is not present')
return
- validator = Draft4Validator(schema, format_checker=FormatChecker())
+
+ # Allow for bytes to be presented as an acceptable valid value for string
+ # type jsonschema attributes in cloud-init's schema.
+ # This allows #cloud-config to provide valid yaml "content: !!binary | ..."
+ if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+
+ type_checker = Draft4Validator.TYPE_CHECKER.redefine(
+ 'string', is_schema_byte_string)
+ cloudinitValidator = extend(Draft4Validator, type_checker=type_checker)
+ else: # jsonschema 2.6 workaround
+ types = Draft4Validator.DEFAULT_TYPES
+ # Allow bytes as well as string (and disable a spurious
+ # unsupported-assignment-operation pylint warning which appears because
+ # this code path isn't written against the latest jsonschema).
+ types['string'] = (str, bytes) # pylint: disable=E1137
+ cloudinitValidator = create(
+ meta_schema=Draft4Validator.META_SCHEMA,
+ validators=Draft4Validator.VALIDATORS,
+ version="draft4",
+ default_types=types)
+ validator = cloudinitValidator(schema, format_checker=FormatChecker())
errors = ()
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
path = '.'.join([str(p) for p in error.path])
@@ -106,7 +139,6 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
schemapaths = _schemapath_for_cloudconfig(
cloudconfig, original_content)
errors_by_line = defaultdict(list)
- error_count = 1
error_footer = []
annotated_content = []
for path, msg in schema_errors:
@@ -120,18 +152,17 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
if col is not None:
msg = 'Line {line} column {col}: {msg}'.format(
line=line, col=col, msg=msg)
- error_footer.append('# E{0}: {1}'.format(error_count, msg))
- error_count += 1
lines = original_content.decode().split('\n')
- error_count = 1
- for line_number, line in enumerate(lines):
- errors = errors_by_line[line_number + 1]
+ error_index = 1
+ for line_number, line in enumerate(lines, 1):
+ errors = errors_by_line[line_number]
if errors:
- error_label = ','.join(
- ['E{0}'.format(count + error_count)
- for count in range(0, len(errors))])
- error_count += len(errors)
- annotated_content.append(line + '\t\t# ' + error_label)
+ error_label = []
+ for error in errors:
+ error_label.append('E{0}'.format(error_index))
+ error_footer.append('# E{0}: {1}'.format(error_index, error))
+ error_index += 1
+ annotated_content.append(line + '\t\t# ' + ','.join(error_label))
else:
annotated_content.append(line)
annotated_content.append(
@@ -213,20 +244,34 @@ def _schemapath_for_cloudconfig(config, original_content):
previous_depth = -1
path_prefix = ''
if line.startswith('- '):
+ # Process list items adding a list_index to the path prefix
+ previous_list_idx = '.%d' % (list_index - 1)
+ if path_prefix and path_prefix.endswith(previous_list_idx):
+ path_prefix = path_prefix[:-len(previous_list_idx)]
key = str(list_index)
- value = line[1:]
+ schema_line_numbers[key] = line_number
+ item_indent = len(re.match(RE_YAML_INDENT, line[1:]).groups()[0])
+ item_indent += 1 # For the leading '-' character
+ previous_depth = indent_depth
+ indent_depth += item_indent
+ line = line[item_indent:] # Strip leading list item + whitespace
list_index += 1
else:
+ # Process non-list lines setting value if present
list_index = 0
key, value = line.split(':', 1)
+ if path_prefix:
+ # Append any existing path_prefix for a fully-pathed key
+ key = path_prefix + '.' + key
while indent_depth <= previous_depth:
if scopes:
previous_depth, path_prefix = scopes.pop()
+ if list_index > 0 and indent_depth == previous_depth:
+ path_prefix = '.'.join(path_prefix.split('.')[:-1])
+ break
else:
previous_depth = -1
path_prefix = ''
- if path_prefix:
- key = path_prefix + '.' + key
scopes.append((indent_depth, key))
if value:
value = value.strip()
@@ -259,6 +304,28 @@ def _get_property_type(property_dict):
return property_type
+def _parse_description(description, prefix):
+ """Parse description from the schema in a format that we can better
+ display in our docs. This parser does three things:
+
+ - Guarantee that a paragraph will be in a single line
+ - Guarantee that each new paragraph will be aligned with
+ the first paragraph
+ - Proper align lists of items
+
+ @param description: The original description in the schema.
+ @param prefix: The number of spaces used to align the current description
+ """
+ list_paragraph = prefix * 3
+ description = re.sub(r"(\S)\n(\S)", r"\1 \2", description)
+ description = re.sub(
+ r"\n\n", r"\n\n{}".format(prefix), description)
+ description = re.sub(
+ r"\n( +)-", r"\n{}-".format(list_paragraph), description)
+
+ return description
+
+
def _get_property_doc(schema, prefix=' '):
"""Return restructured text describing the supported schema properties."""
new_prefix = prefix + ' '
@@ -266,11 +333,23 @@ def _get_property_doc(schema, prefix=' '):
for prop_key, prop_config in schema.get('properties', {}).items():
# Define prop_name and dscription for SCHEMA_PROPERTY_TMPL
description = prop_config.get('description', '')
+
properties.append(SCHEMA_PROPERTY_TMPL.format(
prefix=prefix,
prop_name=prop_key,
type=_get_property_type(prop_config),
- description=description.replace('\n', '')))
+ description=_parse_description(description, prefix)))
+ items = prop_config.get('items')
+ if items:
+ if isinstance(items, list):
+ for item in items:
+ properties.append(
+ _get_property_doc(item, prefix=new_prefix))
+ elif isinstance(items, dict) and items.get('properties'):
+ properties.append(SCHEMA_LIST_ITEM_TMPL.format(
+ prefix=new_prefix, prop_name=prop_key))
+ new_prefix += ' '
+ properties.append(_get_property_doc(items, prefix=new_prefix))
if 'properties' in prop_config:
properties.append(
_get_property_doc(prop_config, prefix=new_prefix))
@@ -346,8 +425,9 @@ def get_parser(parser=None):
description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
- parser.add_argument('-d', '--doc', action="store_true", default=False,
- help='Print schema documentation')
+ parser.add_argument('-d', '--docs', nargs='+',
+ help=('Print schema module docs. Choices: all or'
+ ' space-delimited cc_names.'))
parser.add_argument('--annotate', action="store_true", default=False,
help='Annotate existing cloud-config file with errors')
return parser
@@ -355,9 +435,9 @@ def get_parser(parser=None):
def handle_schema_args(name, args):
"""Handle provided schema args and perform the appropriate actions."""
- exclusive_args = [args.config_file, args.doc]
+ exclusive_args = [args.config_file, args.docs]
if not any(exclusive_args) or all(exclusive_args):
- error('Expected either --config-file argument or --doc')
+ error('Expected either --config-file argument or --docs')
full_schema = get_schema()
if args.config_file:
try:
@@ -370,9 +450,16 @@ def handle_schema_args(name, args):
error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
- if args.doc:
+ elif args.docs:
+ schema_ids = [subschema['id'] for subschema in full_schema['allOf']]
+ schema_ids += ['all']
+ invalid_docs = set(args.docs).difference(set(schema_ids))
+ if invalid_docs:
+ error('Invalid --docs value {0}. Must be one of: {1}'.format(
+ list(invalid_docs), ', '.join(schema_ids)))
for subschema in full_schema['allOf']:
- print(get_schema_doc(subschema))
+ if 'all' in args.docs or subschema['id'] in args.docs:
+ print(get_schema_doc(subschema))
def main():
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
index 67646b03..823917c7 100644
--- a/cloudinit/config/tests/test_disable_ec2_metadata.py
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -15,8 +15,6 @@ DISABLE_CFG = {'disable_ec2_metadata': 'true'}
class TestEC2MetadataRoute(CiTestCase):
- with_logs = True
-
@mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
@mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
def test_disable_ifconfig(self, m_subp, m_which):
diff --git a/cloudinit/config/tests/test_mounts.py b/cloudinit/config/tests/test_mounts.py
new file mode 100644
index 00000000..80b54d0f
--- /dev/null
+++ b/cloudinit/config/tests/test_mounts.py
@@ -0,0 +1,28 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_mounts import create_swapfile
+
+
+M_PATH = 'cloudinit.config.cc_mounts.'
+
+
+class TestCreateSwapfile:
+
+ @pytest.mark.parametrize('fstype', ('xfs', 'btrfs', 'ext4', 'other'))
+ @mock.patch(M_PATH + 'util.get_mount_info')
+ @mock.patch(M_PATH + 'util.subp')
+ def test_happy_path(self, m_subp, m_get_mount_info, fstype, tmpdir):
+ swap_file = tmpdir.join("swap-file")
+ fname = str(swap_file)
+
+ # Some of the calls to util.subp should create the swap file; this
+ # roughly approximates that
+ m_subp.side_effect = lambda *args, **kwargs: swap_file.write('')
+
+ m_get_mount_info.return_value = (mock.ANY, fstype)
+
+ create_swapfile(fname, '')
+ assert mock.call(['mkswap', fname]) in m_subp.call_args_list
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
new file mode 100644
index 00000000..6546a0b5
--- /dev/null
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -0,0 +1,86 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit.config.cc_resolv_conf import generate_resolv_conf
+
+
+EXPECTED_HEADER = """\
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#\n\n"""
+
+
+class TestGenerateResolvConf:
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
+ generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
+
+ assert [
+ mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
+ def test_target_fname_is_used_if_passed(self, m_render_to_file):
+ generate_resolv_conf(
+ "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
+ )
+
+ assert [
+ mock.call(mock.ANY, "/use/this/path", mock.ANY)
+ ] == m_render_to_file.call_args_list
+
+ # Patch in templater so we can assert on the actual generated content
+ @mock.patch("cloudinit.templater.util.write_file")
+ # Parameterise with the value to be passed to generate_resolv_conf as the
+ # params parameter, and the expected line after the header as
+ # expected_extra_line.
+ @pytest.mark.parametrize(
+ "params,expected_extra_line",
+ [
+ # No options
+ ({}, None),
+ # Just a true flag
+ ({"options": {"foo": True}}, "options foo"),
+ # Just a false flag
+ ({"options": {"foo": False}}, None),
+ # Just an option
+ ({"options": {"foo": "some_value"}}, "options foo:some_value"),
+ # A true flag and an option
+ (
+ {"options": {"foo": "some_value", "bar": True}},
+ "options bar foo:some_value",
+ ),
+ # Two options
+ (
+ {"options": {"foo": "some_value", "bar": "other_value"}},
+ "options bar:other_value foo:some_value",
+ ),
+ # Everything
+ (
+ {
+ "options": {
+ "foo": "some_value",
+ "bar": "other_value",
+ "baz": False,
+ "spam": True,
+ }
+ },
+ "options spam bar:other_value foo:some_value",
+ ),
+ ],
+ )
+ def test_flags_and_options(
+ self, m_write_file, params, expected_extra_line
+ ):
+ generate_resolv_conf("templates/resolv.conf.tmpl", params)
+
+ expected_content = EXPECTED_HEADER
+ if expected_extra_line is not None:
+ # If we have any extra lines, expect a trailing newline
+ expected_content += "\n".join([expected_extra_line, ""])
+ assert [
+ mock.call(mock.ANY, expected_content, mode=mock.ANY)
+ ] == m_write_file.call_args_list
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
index 85e2f1fe..2732bd60 100644
--- a/cloudinit/config/tests/test_set_passwords.py
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -1,6 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import mock
+from unittest import mock
from cloudinit.config import cc_set_passwords as setpass
from cloudinit.tests.helpers import CiTestCase
@@ -74,6 +74,10 @@ class TestSetPasswordsHandle(CiTestCase):
with_logs = True
+ def setUp(self):
+ super(TestSetPasswordsHandle, self).setUp()
+ self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err')
+
def test_handle_on_empty_config(self, *args):
"""handle logs that no password has changed when config is empty."""
cloud = self.tmp_cloud(distro='ubuntu')
@@ -108,12 +112,12 @@ class TestSetPasswordsHandle(CiTestCase):
'\n'.join(valid_hashed_pwds) + '\n')],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
+ @mock.patch(MODPATH + "util.is_BSD")
@mock.patch(MODPATH + "util.subp")
- def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_freebsd):
- """FreeBSD calls custom pw commands instead of chpasswd and passwd"""
- m_is_freebsd.return_value = True
+ def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
+ self, m_subp, m_is_bsd):
+ """BSD don't use chpasswd"""
+ m_is_bsd.return_value = True
cloud = self.tmp_cloud(distro='freebsd')
valid_pwds = ['ubuntu:passw0rd']
cfg = {'chpasswd': {'list': valid_pwds}}
@@ -125,12 +129,12 @@ class TestSetPasswordsHandle(CiTestCase):
mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])],
m_subp.call_args_list)
- @mock.patch(MODPATH + "util.is_FreeBSD")
+ @mock.patch(MODPATH + "util.is_BSD")
@mock.patch(MODPATH + "util.subp")
def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp,
- m_is_freebsd):
+ m_is_bsd):
"""handle parses command set random passwords."""
- m_is_freebsd.return_value = False
+ m_is_bsd.return_value = False
cloud = self.tmp_cloud(distro='ubuntu')
valid_random_pwds = [
'root:R',
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index 3c472891..95270fa0 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import re
-from six import StringIO
+from io import StringIO
from cloudinit.config.cc_snap import (
ASSERTIONS_FILE, add_assertions, handle, maybe_install_squashfuse,
@@ -310,6 +310,52 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
{'snap': {'commands': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_values_are_invalid_type(self, _):
+ """Warnings when snap:commands values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: 123 is not valid under any of the given"
+ " schemas\n"
+ "WARNING: Invalid config:\n"
+ "snap.commands.01: 123 is not valid under any of the given"
+ " schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_commands_list_values_are_invalid_type(self, _):
+ """Warnings when snap:commands list values are wrong type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'commands': [["snap", "install", 123]]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'commands': {'01': ["snap", "install", 123]}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ "WARNING: Invalid config:\n"
+ "snap.commands.0: ['snap', 'install', 123] is not valid under any"
+ " of the given schemas\n",
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.config.cc_snap.run_commands')
+ def test_schema_when_assertions_values_are_invalid_type(self, _):
+ """Warnings when snap:assertions values are invalid type (e.g. int)"""
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': [123]}}, schema)
+ validate_cloudconfig_schema(
+ {'snap': {'assertions': {'01': 123}}}, schema)
+ self.assertEqual(
+ "WARNING: Invalid config:\n"
+ "snap.assertions.0: 123 is not of type 'string'\n"
+ "WARNING: Invalid config:\n"
+ "snap.assertions.01: 123 is not of type 'string'\n",
+ self.logs.getvalue())
+
@mock.patch('cloudinit.config.cc_snap.add_assertions')
def test_warn_schema_assertions_is_not_list_or_dict(self, _):
"""Warn when snap:assertions config is not a list or dict."""
@@ -345,7 +391,7 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin):
def test_duplicates_are_fine_array_array(self):
"""Duplicated commands array/array entries are allowed."""
self.assertSchemaValid(
- {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ {'commands': [["echo", "bye"], ["echo", "bye"]]},
"command entries can be duplicate.")
def test_duplicates_are_fine_array_string(self):
diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py
index 46952692..0aec1265 100644
--- a/cloudinit/config/tests/test_ubuntu_drivers.py
+++ b/cloudinit/config/tests/test_ubuntu_drivers.py
@@ -16,6 +16,13 @@ OLD_UBUNTU_DRIVERS_ERROR_STDERR = (
"(choose from 'list', 'autoinstall', 'devices', 'debug')\n")
+# The tests in this module call helper methods which are decorated with
+# mock.patch. pylint doesn't understand that mock.patch passes parameters to
+# the decorated function, so it incorrectly reports that we aren't passing
+# values for all parameters. Instead of annotating every single call, we
+# disable it for the entire module:
+# pylint: disable=no-value-for-parameter
+
class AnyTempScriptAndDebconfFile(object):
def __init__(self, tmp_dir, debconf_file):
diff --git a/cloudinit/config/tests/test_users_groups.py b/cloudinit/config/tests/test_users_groups.py
index f620b597..df89ddb3 100644
--- a/cloudinit/config/tests/test_users_groups.py
+++ b/cloudinit/config/tests/test_users_groups.py
@@ -39,7 +39,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -65,7 +65,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='freebsd', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_fbsd_user.call_args_list,
[mock.call('freebsd', groups='wheel', lock_passwd=True,
shell='/bin/tcsh'),
@@ -86,7 +86,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -107,7 +107,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
@@ -146,7 +146,7 @@ class TestHandleUsersGroups(CiTestCase):
cloud = self.tmp_cloud(
distro='ubuntu', sys_cfg=sys_cfg, metadata=metadata)
cc_users_groups.handle('modulename', cfg, cloud, None, None)
- self.assertItemsEqual(
+ self.assertCountEqual(
m_user.call_args_list,
[mock.call('ubuntu', groups='lxd,sudo', lock_passwd=True,
shell='/bin/bash'),
diff --git a/cloudinit/conftest.py b/cloudinit/conftest.py
new file mode 100644
index 00000000..37cbbcda
--- /dev/null
+++ b/cloudinit/conftest.py
@@ -0,0 +1,72 @@
+from unittest import mock
+
+import pytest
+
+from cloudinit import util
+
+
+@pytest.yield_fixture(autouse=True)
+def disable_subp_usage(request):
+ """
+ Across all (pytest) tests, ensure that util.subp is not invoked.
+
+ Note that this can only catch invocations where the util module is imported
+ and ``util.subp(...)`` is called. ``from cloudinit.util import subp``
+ imports happen before the patching here (or the CiTestCase monkey-patching)
+ happens, so are left untouched.
+
+ To allow a particular test method or class to use util.subp you can set the
+ parameter passed to this fixture to False using pytest.mark.parametrize::
+
+ @pytest.mark.parametrize("disable_subp_usage", [False], indirect=True)
+ def test_whoami(self):
+ util.subp(["whoami"])
+
+ To instead allow util.subp usage for a specific command, you can set the
+ parameter passed to this fixture to that command:
+
+ @pytest.mark.parametrize("disable_subp_usage", ["bash"], indirect=True)
+ def test_bash(self):
+ util.subp(["bash"])
+
+ To specify multiple commands, set the parameter to a list (note the
+ double-layered list: we specify a single parameter that is itself a list):
+
+ @pytest.mark.parametrize(
+ "disable_subp_usage", ["bash", "whoami"], indirect=True)
+ def test_several_things(self):
+ util.subp(["bash"])
+ util.subp(["whoami"])
+
+ This fixture (roughly) mirrors the functionality of
+ CiTestCase.allowed_subp. N.B. While autouse fixtures do affect non-pytest
+ tests, CiTestCase's allowed_subp does take precedence (and we have
+ TestDisableSubpUsageInTestSubclass to confirm that).
+ """
+ should_disable = getattr(request, "param", True)
+ if should_disable:
+ if not isinstance(should_disable, (list, str)):
+ def side_effect(args, *other_args, **kwargs):
+ raise AssertionError("Unexpectedly used util.subp")
+ else:
+ # Look this up before our patch is in place, so we have access to
+ # the real implementation in side_effect
+ subp = util.subp
+
+ if isinstance(should_disable, str):
+ should_disable = [should_disable]
+
+ def side_effect(args, *other_args, **kwargs):
+ cmd = args[0]
+ if cmd not in should_disable:
+ raise AssertionError(
+ "Unexpectedly used util.subp to call {} (allowed:"
+ " {})".format(cmd, ",".join(should_disable))
+ )
+ return subp(args, *other_args, **kwargs)
+
+ with mock.patch('cloudinit.util.subp', autospec=True) as m_subp:
+ m_subp.side_effect = side_effect
+ yield
+ else:
+ yield
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index cdce26f2..e99529df 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -9,13 +9,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-from six import StringIO
-
import abc
import os
import re
import stat
+import string
+import urllib.parse
+from io import StringIO
from cloudinit import importer
from cloudinit import log as logging
@@ -52,9 +52,11 @@ _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
# Default NTP Client Configurations
PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+# Letters/Digits/Hyphen characters, for use in domain name validation
+LDH_ASCII_CHARS = string.ascii_letters + string.digits + "-"
+
-@six.add_metaclass(abc.ABCMeta)
-class Distro(object):
+class Distro(metaclass=abc.ABCMeta):
usr_lib_exec = "/usr/lib"
hosts_fn = "/etc/hosts"
@@ -429,7 +431,7 @@ class Distro(object):
# support kwargs having groups=[list] or groups="g1,g2"
groups = kwargs.get('groups')
if groups:
- if isinstance(groups, six.string_types):
+ if isinstance(groups, str):
groups = groups.split(",")
# remove any white spaces in group names, most likely
@@ -544,7 +546,7 @@ class Distro(object):
if 'ssh_authorized_keys' in kwargs:
# Try to handle this in a smart manner.
keys = kwargs['ssh_authorized_keys']
- if isinstance(keys, six.string_types):
+ if isinstance(keys, str):
keys = [keys]
elif isinstance(keys, dict):
keys = list(keys.values())
@@ -668,7 +670,7 @@ class Distro(object):
if isinstance(rules, (list, tuple)):
for rule in rules:
lines.append("%s %s" % (user, rule))
- elif isinstance(rules, six.string_types):
+ elif isinstance(rules, str):
lines.append("%s %s" % (user, rules))
else:
msg = "Can not create sudoers rule addition with type %r"
@@ -723,6 +725,111 @@ class Distro(object):
LOG.info("Added user '%s' to group '%s'", member, name)
+def _apply_hostname_transformations_to_url(url: str, transformations: list):
+ """
+ Apply transformations to a URL's hostname, return transformed URL.
+
+ This is a separate function because unwrapping and rewrapping only the
+ hostname portion of a URL is complex.
+
+ :param url:
+ The URL to operate on.
+ :param transformations:
+ A list of ``(str) -> Optional[str]`` functions, which will be applied
+ in order to the hostname portion of the URL. If any function
+ (regardless of ordering) returns None, ``url`` will be returned without
+ any modification.
+
+ :return:
+ A string whose value is ``url`` with the hostname ``transformations``
+ applied, or ``None`` if ``url`` is unparseable.
+ """
+ try:
+ parts = urllib.parse.urlsplit(url)
+ except ValueError:
+ # If we can't even parse the URL, we shouldn't use it for anything
+ return None
+ new_hostname = parts.hostname
+ if new_hostname is None:
+ # The URL given doesn't have a hostname component, so (a) we can't
+ # transform it, and (b) it won't work as a mirror; return None.
+ return None
+
+ for transformation in transformations:
+ new_hostname = transformation(new_hostname)
+ if new_hostname is None:
+ # If a transformation returns None, that indicates we should abort
+ # processing and return `url` unmodified
+ return url
+
+ new_netloc = new_hostname
+ if parts.port is not None:
+ new_netloc = "{}:{}".format(new_netloc, parts.port)
+ return urllib.parse.urlunsplit(parts._replace(netloc=new_netloc))
+
+
+def _sanitize_mirror_url(url: str):
+ """
+ Given a mirror URL, replace or remove any invalid URI characters.
+
+ This performs the following actions on the URL's hostname:
+ * Checks if it is an IP address, returning the URL immediately if it is
+ * Converts it to its IDN form (see below for details)
+ * Replaces any non-Letters/Digits/Hyphen (LDH) characters in it with
+ hyphens
+ * TODO: Remove any leading/trailing hyphens from each domain name label
+
+ Before we replace any invalid domain name characters, we first need to
+ ensure that any valid non-ASCII characters in the hostname will not be
+ replaced, by ensuring the hostname is in its Internationalized domain name
+ (IDN) representation (see RFC 5890). This conversion has to be applied to
+ the whole hostname (rather than just the substitution variables), because
+ the Punycode algorithm used by IDNA transcodes each part of the hostname as
+ a whole string (rather than encoding individual characters). It cannot be
+ applied to the whole URL, because (a) the Punycode algorithm expects to
+ operate on domain names so doesn't output a valid URL, and (b) non-ASCII
+ characters in non-hostname parts of the URL aren't encoded via Punycode.
+
+ To put this in RFC 5890's terminology: before we remove or replace any
+ characters from our domain name (which we do to ensure that each label is a
+ valid LDH Label), we first ensure each label is in its A-label form.
+
+ (Note that Python's builtin idna encoding is actually IDNA2003, not
+ IDNA2008. This changes the specifics of how some characters are encoded to
+ ASCII, but doesn't affect the logic here.)
+
+ :param url:
+ The URL to operate on.
+
+ :return:
+ A sanitized version of the URL, which will have been IDNA encoded if
+ necessary, or ``None`` if the generated string is not a parseable URL.
+ """
+ # Acceptable characters are LDH characters, plus "." to separate each label
+ acceptable_chars = LDH_ASCII_CHARS + "."
+ transformations = [
+ # This is an IP address, not a hostname, so no need to apply the
+ # transformations
+ lambda hostname: None if net.is_ip_address(hostname) else hostname,
+
+ # Encode with IDNA to get the correct characters (as `bytes`), then
+ # decode with ASCII so we return a `str`
+ lambda hostname: hostname.encode('idna').decode('ascii'),
+
+ # Replace any unacceptable characters with "-"
+ lambda hostname: ''.join(
+ c if c in acceptable_chars else "-" for c in hostname
+ ),
+
+ # Drop leading/trailing hyphens from each part of the hostname
+ lambda hostname: '.'.join(
+ part.strip('-') for part in hostname.split('.')
+ ),
+ ]
+
+ return _apply_hostname_transformations_to_url(url, transformations)
+
+
def _get_package_mirror_info(mirror_info, data_source=None,
mirror_filter=util.search_for_mirror):
# given a arch specific 'mirror_info' entry (from package_mirrors)
@@ -751,9 +858,13 @@ def _get_package_mirror_info(mirror_info, data_source=None,
mirrors = []
for tmpl in searchlist:
try:
- mirrors.append(tmpl % subst)
+ mirror = tmpl % subst
except KeyError:
- pass
+ continue
+
+ mirror = _sanitize_mirror_url(mirror)
+ if mirror is not None:
+ mirrors.append(mirror)
found = mirror_filter(mirrors)
if found:
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
new file mode 100644
index 00000000..37cf93bf
--- /dev/null
+++ b/cloudinit/distros/bsd.py
@@ -0,0 +1,126 @@
+import platform
+
+from cloudinit import distros
+from cloudinit.distros import bsd_utils
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class BSD(distros.Distro):
+ hostname_conf_fn = '/etc/rc.conf'
+ rc_conf_fn = "/etc/rc.conf"
+
+ # Set in BSD distro subclasses
+ group_add_cmd_prefix = []
+ pkg_cmd_install_prefix = []
+ pkg_cmd_remove_prefix = []
+ # There is no update/upgrade on OpenBSD
+ pkg_cmd_update_prefix = None
+ pkg_cmd_upgrade_prefix = None
+
+ def __init__(self, name, cfg, paths):
+ super().__init__(name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ cfg['ssh_svcname'] = 'sshd'
+ self.osfamily = platform.system().lower()
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ return bsd_utils.get_rc_config_value('hostname')
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ raise NotImplementedError('Return list cmd to add member to group')
+
+ def _write_hostname(self, hostname, filename):
+ bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf')
+
+ def create_group(self, name, members=None):
+ if util.is_group(name):
+ LOG.warning("Skipping creation of existing group '%s'", name)
+ else:
+ group_add_cmd = self.group_add_cmd_prefix + [name]
+ try:
+ util.subp(group_add_cmd)
+ LOG.info("Created new group %s", name)
+ except Exception:
+ util.logexc(LOG, "Failed to create group %s", name)
+
+ if not members:
+ members = []
+ for member in members:
+ if not util.is_user(member):
+ LOG.warning("Unable to add group member '%s' to group '%s'"
+ "; user does not exist.", member, name)
+ continue
+ try:
+ util.subp(self._get_add_member_to_group_cmd(member, name))
+ LOG.info("Added user '%s' to group '%s'", member, name)
+ except Exception:
+ util.logexc(LOG, "Failed to add user '%s' to group '%s'",
+ member, name)
+
+ def generate_fallback_config(self):
+ nconf = {'config': [], 'version': 1}
+ for mac, name in net.get_interfaces_by_mac().items():
+ nconf['config'].append(
+ {'type': 'physical', 'name': name,
+ 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
+ return nconf
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('install', pkgs=pkglist)
+
+ def _get_pkg_cmd_environ(self):
+ """Return environment vars used in *BSD package_command operations"""
+ raise NotImplementedError('BSD subclasses return a dict of env vars')
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ if command == 'install':
+ cmd = self.pkg_cmd_install_prefix
+ elif command == 'remove':
+ cmd = self.pkg_cmd_remove_prefix
+ elif command == 'update':
+ if not self.pkg_cmd_update_prefix:
+ return
+ cmd = self.pkg_cmd_update_prefix
+ elif command == 'upgrade':
+ if not self.pkg_cmd_upgrade_prefix:
+ return
+ cmd = self.pkg_cmd_upgrade_prefix
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
+
+ def _write_network_config(self, netconfig):
+ return self._supported_write_network_config(netconfig)
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def apply_locale(self, locale, out_fn=None):
+ LOG.debug('Cannot set the locale.')
+
+ def apply_network_config_names(self, netconfig):
+ LOG.debug('Cannot rename network interface.')
diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py
new file mode 100644
index 00000000..079d0d53
--- /dev/null
+++ b/cloudinit/distros/bsd_utils.py
@@ -0,0 +1,50 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import shlex
+
+from cloudinit import util
+
+# On NetBSD, /etc/rc.conf comes with a if block:
+# if [ -r /etc/defaults/rc.conf ]; then
+# as a consequence, the file is not a regular key/value list
+# anymore and we cannot use cloudinit.distros.parsers.sys_conf
+# The module comes with a more naive parser, but is able to
+# preserve these if blocks.
+
+
+def _unquote(value):
+ if value[0] == value[-1] and value[0] in ['"', "'"]:
+ return value[1:-1]
+ return value
+
+
+def get_rc_config_value(key, fn='/etc/rc.conf'):
+ key_prefix = '{}='.format(key)
+ for line in util.load_file(fn).splitlines():
+ if line.startswith(key_prefix):
+ value = line.replace(key_prefix, '')
+ return _unquote(value)
+
+
+def set_rc_config_value(key, value, fn='/etc/rc.conf'):
+ lines = []
+ done = False
+ value = shlex.quote(value)
+ original_content = util.load_file(fn)
+ for line in original_content.splitlines():
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if k == key:
+ v = value
+ done = True
+ lines.append('='.join([k, v]))
+ else:
+ lines.append(line)
+ if not done:
+ lines.append('='.join([key, value]))
+ new_content = '\n'.join(lines) + '\n'
+ if new_content != original_content:
+ util.write_file(fn, new_content)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index cf082c73..128bb523 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -205,8 +205,7 @@ class Distro(distros.Distro):
["update"], freq=PER_INSTANCE)
def get_primary_arch(self):
- (arch, _err) = util.subp(['dpkg', '--print-architecture'])
- return str(arch).strip()
+ return util.get_dpkg_architecture()
def _get_wrapper_prefix(cmd, mode):
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 40e435e7..b3a4ad67 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -5,39 +5,27 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-import six
-from six import StringIO
-
import re
+from io import StringIO
-from cloudinit import distros
-from cloudinit import helpers
+import cloudinit.distros.bsd
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import ssh_util
from cloudinit import util
-from cloudinit.distros import rhel_util
from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-class Distro(distros.Distro):
+class Distro(cloudinit.distros.bsd.BSD):
usr_lib_exec = '/usr/local/lib'
- rc_conf_fn = "/etc/rc.conf"
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
- hostname_conf_fn = '/etc/rc.conf'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'freebsd'
- cfg['ssh_svcname'] = 'sshd'
+ group_add_cmd_prefix = ['pw', 'group', 'add']
+ pkg_cmd_install_prefix = ["pkg", "install"]
+ pkg_cmd_remove_prefix = ["pkg", "remove"]
+ pkg_cmd_update_prefix = ["pkg", "update"]
+ pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
def _select_hostname(self, hostname, fqdn):
# Should be FQDN if available. See rc.conf(5) in FreeBSD
@@ -45,45 +33,8 @@ class Distro(distros.Distro):
return fqdn
return hostname
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname(self, filename, default=None):
- (_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if contents.get('hostname'):
- return contents['hostname']
- else:
- return default
-
- def _write_hostname(self, hostname, filename):
- rhel_util.update_sysconfig_file(filename, {'hostname': hostname})
-
- def create_group(self, name, members):
- group_add_cmd = ['pw', 'group', 'add', name]
- if util.is_group(name):
- LOG.warning("Skipping creation of existing group '%s'", name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s", name)
- except Exception:
- util.logexc(LOG, "Failed to create group %s", name)
- raise
- if not members:
- members = []
-
- for member in members:
- if not util.is_user(member):
- LOG.warning("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['pw', 'usermod', '-n', member_name, '-G', group_name]
def add_user(self, name, **kwargs):
if util.is_user(name):
@@ -108,8 +59,7 @@ class Distro(distros.Distro):
}
for key, val in kwargs.items():
- if (key in pw_useradd_opts and val and
- isinstance(val, six.string_types)):
+ if key in pw_useradd_opts and val and isinstance(val, str):
pw_useradd_cmd.extend([pw_useradd_opts[key], val])
elif key in pw_useradd_flags and val:
@@ -165,40 +115,8 @@ class Distro(distros.Distro):
util.logexc(LOG, "Failed to lock user %s", name)
raise
- def create_user(self, name, **kwargs):
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs and kwargs['sudo'] is not False:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- def generate_fallback_config(self):
- nconf = {'config': [], 'version': 1}
- for mac, name in net.get_interfaces_by_mac().items():
- nconf['config'].append(
- {'type': 'physical', 'name': name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
-
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def apply_locale(self, locale, out_fn=None):
- # Adjust the locals value to the new value
+ # Adjust the locales value to the new value
newconf = StringIO()
for line in util.load_file(self.login_conf_fn).splitlines():
newconf.write(re.sub(r'^default:',
@@ -228,39 +146,17 @@ class Distro(distros.Distro):
# /etc/rc.conf a line with the following format:
# ifconfig_OLDNAME_name=NEWNAME
# FreeBSD network script will rename the interface automatically.
- return
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
+ pass
+ def _get_pkg_cmd_environ(self):
+ """Return environment vars used in *BSD package_command operations"""
e = os.environ.copy()
e['ASSUME_ALWAYS_YES'] = 'YES'
-
- cmd = ['pkg']
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, env=e, capture=False)
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+ return e
def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
+ self._runner.run(
+ "update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
new file mode 100644
index 00000000..ecc8239a
--- /dev/null
+++ b/cloudinit/distros/netbsd.py
@@ -0,0 +1,156 @@
+# Copyright (C) 2019-2020 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import crypt
+import os
+import platform
+
+import cloudinit.distros.bsd
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class NetBSD(cloudinit.distros.bsd.BSD):
+ """
+ Distro subclass for NetBSD.
+
+ (N.B. OpenBSD inherits from this class.)
+ """
+
+ ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users'
+ group_add_cmd_prefix = ["groupadd"]
+
+ def __init__(self, name, cfg, paths):
+ super().__init__(name, cfg, paths)
+ if os.path.exists("/usr/pkg/bin/pkgin"):
+ self.pkg_cmd_install_prefix = ['pkgin', '-y', 'install']
+ self.pkg_cmd_remove_prefix = ['pkgin', '-y', 'remove']
+ self.pkg_cmd_update_prefix = ['pkgin', '-y', 'update']
+ self.pkg_cmd_upgrade_prefix = ['pkgin', '-y', 'full-upgrade']
+ else:
+ self.pkg_cmd_install_prefix = ['pkg_add', '-U']
+ self.pkg_cmd_remove_prefix = ['pkg_delete']
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['usermod', '-G', group_name, member_name]
+
+ def add_user(self, name, **kwargs):
+ if util.is_user(name):
+ LOG.info("User %s already exists, skipping.", name)
+ return False
+
+ adduser_cmd = ['useradd']
+ log_adduser_cmd = ['useradd']
+
+ adduser_opts = {
+ "homedir": '-d',
+ "gecos": '-c',
+ "primary_group": '-g',
+ "groups": '-G',
+ "shell": '-s',
+ }
+ adduser_flags = {
+ "no_user_group": '--no-user-group',
+ "system": '--system',
+ "no_log_init": '--no-log-init',
+ }
+
+ for key, val in kwargs.items():
+ if key in adduser_opts and val and isinstance(val, str):
+ adduser_cmd.extend([adduser_opts[key], val])
+
+ elif key in adduser_flags and val:
+ adduser_cmd.append(adduser_flags[key])
+ log_adduser_cmd.append(adduser_flags[key])
+
+ if 'no_create_home' not in kwargs or 'system' not in kwargs:
+ adduser_cmd += ['-m']
+ log_adduser_cmd += ['-m']
+
+ adduser_cmd += [name]
+ log_adduser_cmd += [name]
+
+ # Run the command
+ LOG.info("Adding user %s", name)
+ try:
+ util.subp(adduser_cmd, logstring=log_adduser_cmd)
+ except Exception:
+ util.logexc(LOG, "Failed to create user %s", name)
+ raise
+ # Set the password if it is provided
+ # For security consideration, only hashed passwd is assumed
+ passwd_val = kwargs.get('passwd', None)
+ if passwd_val is not None:
+ self.set_passwd(name, passwd_val, hashed=True)
+
+ def set_passwd(self, user, passwd, hashed=False):
+ if hashed:
+ hashed_pw = passwd
+ elif not hasattr(crypt, 'METHOD_BLOWFISH'):
+ # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available
+ # on NetBSD 7 and 8.
+ LOG.error((
+ 'Cannot set non-encrypted password for user %s. '
+ 'Python >= 3.7 is required.'), user)
+ return
+ else:
+ method = crypt.METHOD_BLOWFISH # pylint: disable=E1101
+ hashed_pw = crypt.crypt(
+ passwd,
+ crypt.mksalt(method))
+
+ try:
+ util.subp(['usermod', '-p', hashed_pw, user])
+ except Exception:
+ util.logexc(LOG, "Failed to set password for %s", user)
+ raise
+ self.unlock_passwd(user)
+
+ def force_passwd_change(self, user):
+ try:
+ util.subp(['usermod', '-F', user])
+ except Exception:
+ util.logexc(LOG, "Failed to set pw expiration for %s", user)
+ raise
+
+ def lock_passwd(self, name):
+ try:
+ util.subp(['usermod', '-C', 'yes', name])
+ except Exception:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise
+
+ def unlock_passwd(self, name):
+ try:
+ util.subp(['usermod', '-C', 'no', name])
+ except Exception:
+ util.logexc(LOG, "Failed to unlock user %s", name)
+ raise
+
+ def apply_locale(self, locale, out_fn=None):
+ LOG.debug('Cannot set the locale.')
+
+ def apply_network_config_names(self, netconfig):
+ LOG.debug('NetBSD cannot rename network interface.')
+
+ def _get_pkg_cmd_environ(self):
+ """Return env vars used in NetBSD package_command operations"""
+ os_release = platform.release()
+ os_arch = platform.machine()
+ e = os.environ.copy()
+ e['PKG_PATH'] = (
+ 'http://cdn.netbsd.org/pub/pkgsrc/'
+ 'packages/NetBSD/%s/%s/All') % (os_arch, os_release)
+ return e
+
+ def update_package_sources(self):
+ pass
+
+
+class Distro(NetBSD):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/openbsd.py b/cloudinit/distros/openbsd.py
new file mode 100644
index 00000000..ca094156
--- /dev/null
+++ b/cloudinit/distros/openbsd.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2019-2020 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+import platform
+
+import cloudinit.distros.netbsd
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(cloudinit.distros.netbsd.NetBSD):
+ hostname_conf_fn = '/etc/myname'
+
+ def _read_hostname(self, filename, default=None):
+ return util.load_file(self.hostname_conf_fn)
+
+ def _write_hostname(self, hostname, filename):
+ content = hostname + '\n'
+ util.write_file(self.hostname_conf_fn, content)
+
+ def _get_add_member_to_group_cmd(self, member_name, group_name):
+ return ['usermod', '-G', group_name, member_name]
+
+ def lock_passwd(self, name):
+ try:
+ util.subp(['usermod', '-p', '*', name])
+ except Exception:
+ util.logexc(LOG, "Failed to lock user %s", name)
+ raise
+
+ def unlock_passwd(self, name):
+ pass
+
+ def _get_pkg_cmd_environ(self):
+ """Return env vars used in OpenBSD package_command operations"""
+ os_release = platform.release()
+ os_arch = platform.machine()
+ e = os.environ.copy()
+ e['PKG_PATH'] = (
+ 'ftp://ftp.openbsd.org/pub/OpenBSD/{os_release}/'
+ 'packages/{os_arch}/').format(
+ os_arch=os_arch, os_release=os_release)
+ return e
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index e41e2f7b..68028d20 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -37,6 +37,7 @@ class Distro(distros.Distro):
renderer_configs = {
'sysconfig': {
'control': 'etc/sysconfig/network/config',
+ 'flavor': 'suse',
'iface_templates': '%(base)s/network/ifcfg-%(name)s',
'netrules_path': (
'etc/udev/rules.d/85-persistent-net-cloud-init.rules'),
@@ -143,6 +144,9 @@ class Distro(distros.Distro):
return default
return hostname
+ def _get_localhost_ip(self):
+ return "127.0.1.1"
+
def _read_hostname_conf(self, filename):
conf = HostnameConf(util.load_file(filename))
conf.parse()
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index dd434ac6..e74c083c 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
+from io import StringIO
from cloudinit.distros.parsers import chop_comment
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 64444581..54e4e934 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
+from io import StringIO
from cloudinit.distros.parsers import chop_comment
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index a62055ae..299d54b5 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from six import StringIO
+from io import StringIO
from cloudinit.distros.parsers import chop_comment
from cloudinit import log as logging
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index 44df17de..dee4c551 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -4,11 +4,9 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-from six import StringIO
-
import pipes
import re
+from io import StringIO
# This library is used to parse/write
# out the various sysconfig files edited (best attempt effort)
@@ -65,7 +63,7 @@ class SysConf(configobj.ConfigObj):
return out_contents.getvalue()
def _quote(self, value, multiline=False):
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
raise ValueError('Value "%s" is not a string' % (value))
if len(value) == 0:
return ''
diff --git a/cloudinit/distros/tests/__init__.py b/cloudinit/distros/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/cloudinit/distros/tests/__init__.py
diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py
new file mode 100644
index 00000000..40939133
--- /dev/null
+++ b/cloudinit/distros/tests/test_init.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2020 Canonical Ltd.
+#
+# Author: Daniel Watkins <oddbloke@ubuntu.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests for cloudinit/distros/__init__.py"""
+
+from unittest import mock
+
+import pytest
+
+from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS
+
+
+# Define a set of characters we would expect to be replaced
+INVALID_URL_CHARS = [
+ chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS
+]
+for separator in [":", ".", "/", "#", "?", "@", "[", "]"]:
+ # Remove from the set characters that either separate hostname parts (":",
+ # "."), terminate hostnames ("/", "#", "?", "@"), or cause Python to be
+ # unable to parse URLs ("[", "]").
+ INVALID_URL_CHARS.remove(separator)
+
+
+class TestGetPackageMirrorInfo:
+ """
+ Tests for cloudinit.distros._get_package_mirror_info.
+
+ These supplement the tests in tests/unittests/test_distros/test_generic.py
+ which are more focused on testing a single production-like configuration.
+ These tests are more focused on specific aspects of the unit under test.
+ """
+
+ @pytest.mark.parametrize('mirror_info,expected', [
+ # Empty info gives empty return
+ ({}, {}),
+ # failsafe values used if present
+ ({'failsafe': {'primary': 'http://value', 'security': 'http://other'}},
+ {'primary': 'http://value', 'security': 'http://other'}),
+ # search values used if present
+ ({'search': {'primary': ['http://value'],
+ 'security': ['http://other']}},
+ {'primary': ['http://value'], 'security': ['http://other']}),
+ # failsafe values used if search value not present
+ ({'search': {'primary': ['http://value']},
+ 'failsafe': {'security': 'http://other'}},
+ {'primary': ['http://value'], 'security': 'http://other'})
+ ])
+ def test_get_package_mirror_info_failsafe(self, mirror_info, expected):
+ """
+ Test the interaction between search and failsafe inputs
+
+ (This doesn't test the case where the mirror_filter removes all search
+ options; test_failsafe_used_if_all_search_results_filtered_out covers
+ that.)
+ """
+ assert expected == _get_package_mirror_info(mirror_info,
+ mirror_filter=lambda x: x)
+
+ def test_failsafe_used_if_all_search_results_filtered_out(self):
+ """Test the failsafe option used if all search options eliminated."""
+ mirror_info = {
+ 'search': {'primary': ['http://value']},
+ 'failsafe': {'primary': 'http://other'}
+ }
+ assert {'primary': 'http://other'} == _get_package_mirror_info(
+ mirror_info, mirror_filter=lambda x: False)
+
+ @pytest.mark.parametrize('availability_zone,region,patterns,expected', (
+ # Test ec2_region alone
+ ('fk-fake-1f', None, ['http://EC2-%(ec2_region)s/ubuntu'],
+ ['http://ec2-fk-fake-1/ubuntu']),
+ # Test availability_zone alone
+ ('fk-fake-1f', None, ['http://AZ-%(availability_zone)s/ubuntu'],
+ ['http://az-fk-fake-1f/ubuntu']),
+ # Test region alone
+ (None, 'fk-fake-1', ['http://RG-%(region)s/ubuntu'],
+ ['http://rg-fk-fake-1/ubuntu']),
+ # Test that ec2_region is not available for non-matching AZs
+ ('fake-fake-1f', None,
+ ['http://EC2-%(ec2_region)s/ubuntu',
+ 'http://AZ-%(availability_zone)s/ubuntu'],
+ ['http://az-fake-fake-1f/ubuntu']),
+ # Test that template order maintained
+ (None, 'fake-region',
+ ['http://RG-%(region)s-2/ubuntu', 'http://RG-%(region)s-1/ubuntu'],
+ ['http://rg-fake-region-2/ubuntu', 'http://rg-fake-region-1/ubuntu']),
+ # Test that non-ASCII hostnames are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com/ubuntu'],
+ ['http://www.xn--idna--4kd53hh6aba3q.com/ubuntu']),
+ # Test that non-ASCII hostnames with a port are IDNA encoded;
+ # "IDNA-ТεЅТ̣".encode('idna') == b"xn--idna--4kd53hh6aba3q"
+ (None, 'ТεЅТ̣', ['http://www.IDNA-%(region)s.com:8080/ubuntu'],
+ ['http://www.xn--idna--4kd53hh6aba3q.com:8080/ubuntu']),
+ # Test that non-ASCII non-hostname parts of URLs are unchanged
+ (None, 'ТεЅТ̣', ['http://www.example.com/%(region)s/ubuntu'],
+ ['http://www.example.com/ТεЅТ̣/ubuntu']),
+ # Test that IPv4 addresses are unchanged
+ (None, 'fk-fake-1', ['http://192.168.1.1:8080/%(region)s/ubuntu'],
+ ['http://192.168.1.1:8080/fk-fake-1/ubuntu']),
+ # Test that IPv6 addresses are unchanged
+ (None, 'fk-fake-1',
+ ['http://[2001:67c:1360:8001::23]/%(region)s/ubuntu'],
+ ['http://[2001:67c:1360:8001::23]/fk-fake-1/ubuntu']),
+ # Test that unparseable URLs are filtered out of the mirror list
+ (None, 'inv[lid',
+ ['http://%(region)s.in.hostname/should/be/filtered',
+ 'http://but.not.in.the.path/%(region)s'],
+ ['http://but.not.in.the.path/inv[lid']),
+ (None, '-some-region-',
+ ['http://-lead-ing.%(region)s.trail-ing-.example.com/ubuntu'],
+ ['http://lead-ing.some-region.trail-ing.example.com/ubuntu']),
+ ) + tuple(
+ # Dynamically generate a test case for each non-LDH
+ # (Letters/Digits/Hyphen) ASCII character, testing that it is
+ # substituted with a hyphen
+ (None, 'fk{0}fake{0}1'.format(invalid_char),
+ ['http://%(region)s/ubuntu'], ['http://fk-fake-1/ubuntu'])
+ for invalid_char in INVALID_URL_CHARS
+ ))
+ def test_substitution(self, availability_zone, region, patterns, expected):
+ """Test substitution works as expected."""
+ m_data_source = mock.Mock(
+ availability_zone=availability_zone, region=region
+ )
+ mirror_info = {'search': {'primary': patterns}}
+
+ ret = _get_package_mirror_info(
+ mirror_info,
+ data_source=m_data_source,
+ mirror_filter=lambda x: x
+ )
+ assert {'primary': expected} == ret
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
index 9378dd78..08446a95 100755
--- a/cloudinit/distros/ug_util.py
+++ b/cloudinit/distros/ug_util.py
@@ -9,8 +9,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
from cloudinit import log as logging
from cloudinit import type_utils
from cloudinit import util
@@ -29,7 +27,7 @@ LOG = logging.getLogger(__name__)
# is the standard form used in the rest
# of cloud-init
def _normalize_groups(grp_cfg):
- if isinstance(grp_cfg, six.string_types):
+ if isinstance(grp_cfg, str):
grp_cfg = grp_cfg.strip().split(",")
if isinstance(grp_cfg, list):
c_grp_cfg = {}
@@ -39,7 +37,7 @@ def _normalize_groups(grp_cfg):
if k not in c_grp_cfg:
if isinstance(v, list):
c_grp_cfg[k] = list(v)
- elif isinstance(v, six.string_types):
+ elif isinstance(v, str):
c_grp_cfg[k] = [v]
else:
raise TypeError("Bad group member type %s" %
@@ -47,12 +45,12 @@ def _normalize_groups(grp_cfg):
else:
if isinstance(v, list):
c_grp_cfg[k].extend(v)
- elif isinstance(v, six.string_types):
+ elif isinstance(v, str):
c_grp_cfg[k].append(v)
else:
raise TypeError("Bad group member type %s" %
type_utils.obj_name(v))
- elif isinstance(i, six.string_types):
+ elif isinstance(i, str):
if i not in c_grp_cfg:
c_grp_cfg[i] = []
else:
@@ -89,7 +87,7 @@ def _normalize_users(u_cfg, def_user_cfg=None):
if isinstance(u_cfg, dict):
ad_ucfg = []
for (k, v) in u_cfg.items():
- if isinstance(v, (bool, int, float) + six.string_types):
+ if isinstance(v, (bool, int, float, str)):
if util.is_true(v):
ad_ucfg.append(str(k))
elif isinstance(v, dict):
@@ -99,12 +97,12 @@ def _normalize_users(u_cfg, def_user_cfg=None):
raise TypeError(("Unmappable user value type %s"
" for key %s") % (type_utils.obj_name(v), k))
u_cfg = ad_ucfg
- elif isinstance(u_cfg, six.string_types):
+ elif isinstance(u_cfg, str):
u_cfg = util.uniq_merge_sorted(u_cfg)
users = {}
for user_config in u_cfg:
- if isinstance(user_config, (list,) + six.string_types):
+ if isinstance(user_config, (list, str)):
for u in util.uniq_merge(user_config):
if u and u not in users:
users[u] = {}
@@ -209,7 +207,7 @@ def normalize_users_groups(cfg, distro):
old_user = cfg['user']
# Translate it into the format that is more useful
# going forward
- if isinstance(old_user, six.string_types):
+ if isinstance(old_user, str):
old_user = {
'name': old_user,
}
@@ -238,7 +236,7 @@ def normalize_users_groups(cfg, distro):
default_user_config = util.mergemanydict([old_user, distro_user_config])
base_users = cfg.get('users', [])
- if not isinstance(base_users, (list, dict) + six.string_types):
+ if not isinstance(base_users, (list, dict, str)):
LOG.warning(("Format for 'users' key must be a comma separated string"
" or a dictionary or a list and not %s"),
type_utils.obj_name(base_users))
@@ -252,7 +250,7 @@ def normalize_users_groups(cfg, distro):
base_users.append({'name': 'default'})
elif isinstance(base_users, dict):
base_users['default'] = dict(base_users).get('default', True)
- elif isinstance(base_users, six.string_types):
+ elif isinstance(base_users, str):
# Just append it on to be re-parsed later
base_users += ",default"
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 57708c14..34acfe84 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -142,7 +142,8 @@ def skip_retry_on_codes(status_codes, _request_args, cause):
def get_instance_userdata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
- headers_cb=None, exception_cb=None):
+ headers_cb=None, headers_redact=None,
+ exception_cb=None):
ud_url = url_helper.combine_url(metadata_address, api_version)
ud_url = url_helper.combine_url(ud_url, 'user-data')
user_data = ''
@@ -155,7 +156,8 @@ def get_instance_userdata(api_version='latest',
SKIP_USERDATA_CODES)
response = url_helper.read_file_or_url(
ud_url, ssl_details=ssl_details, timeout=timeout,
- retries=retries, exception_cb=exception_cb, headers_cb=headers_cb)
+ retries=retries, exception_cb=exception_cb, headers_cb=headers_cb,
+ headers_redact=headers_redact)
user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
@@ -169,11 +171,13 @@ def _get_instance_metadata(tree, api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
leaf_decoder=None, headers_cb=None,
+ headers_redact=None,
exception_cb=None):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
caller = functools.partial(
url_helper.read_file_or_url, ssl_details=ssl_details,
timeout=timeout, retries=retries, headers_cb=headers_cb,
+ headers_redact=headers_redact,
exception_cb=exception_cb)
def mcaller(url):
@@ -197,6 +201,7 @@ def get_instance_metadata(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
leaf_decoder=None, headers_cb=None,
+ headers_redact=None,
exception_cb=None):
# Note, 'meta-data' explicitly has trailing /.
# this is required for CloudStack (LP: #1356855)
@@ -204,6 +209,7 @@ def get_instance_metadata(api_version='latest',
metadata_address=metadata_address,
ssl_details=ssl_details, timeout=timeout,
retries=retries, leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
headers_cb=headers_cb,
exception_cb=exception_cb)
@@ -212,12 +218,14 @@ def get_instance_identity(api_version='latest',
metadata_address='http://169.254.169.254',
ssl_details=None, timeout=5, retries=5,
leaf_decoder=None, headers_cb=None,
+ headers_redact=None,
exception_cb=None):
return _get_instance_metadata(tree='dynamic/instance-identity',
api_version=api_version,
metadata_address=metadata_address,
ssl_details=ssl_details, timeout=timeout,
retries=retries, leaf_decoder=leaf_decoder,
+ headers_redact=headers_redact,
headers_cb=headers_cb,
exception_cb=exception_cb)
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 0db75af9..a409ff8a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -10,14 +10,12 @@
import abc
import os
-import six
-
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
from cloudinit import util
+from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
LOG = logging.getLogger(__name__)
@@ -60,8 +58,7 @@ INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
key=(lambda e: 0 - len(e)))
-@six.add_metaclass(abc.ABCMeta)
-class Handler(object):
+class Handler(metaclass=abc.ABCMeta):
def __init__(self, frequency, version=2):
self.handler_version = version
@@ -159,7 +156,7 @@ def _extract_first_or_bytes(blob, size):
# Extract the first line or upto X symbols for text objects
# Extract first X bytes for binary objects
try:
- if isinstance(blob, six.string_types):
+ if isinstance(blob, str):
start = blob.split("\n", 1)[0]
else:
# We want to avoid decoding the whole blob (it might be huge)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index dcd2645e..9752ad28 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -12,10 +12,8 @@ from time import time
import contextlib
import os
-
-from six import StringIO
-from six.moves.configparser import (
- NoSectionError, NoOptionError, RawConfigParser)
+from configparser import NoSectionError, NoOptionError, RawConfigParser
+from io import StringIO
from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
CFG_ENV_NAME)
@@ -453,8 +451,4 @@ class DefaultingConfigParser(RawConfigParser):
contents = '\n'.join([header, contents, ''])
return contents
-
-def identity(object):
- return object
-
# vi: ts=4 expandtab
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 5ae312ba..2e5df042 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -8,17 +8,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import collections
+import io
import logging
import logging.config
import logging.handlers
-
-import collections
import os
import sys
-
-import six
-from six import StringIO
-
import time
# Logging levels for easy access
@@ -74,13 +70,13 @@ def setupLogging(cfg=None):
log_cfgs = []
log_cfg = cfg.get('logcfg')
- if log_cfg and isinstance(log_cfg, six.string_types):
+ if log_cfg and isinstance(log_cfg, str):
# If there is a 'logcfg' entry in the config,
# respect it, it is the old keyname
log_cfgs.append(str(log_cfg))
elif "log_cfgs" in cfg:
for a_cfg in cfg['log_cfgs']:
- if isinstance(a_cfg, six.string_types):
+ if isinstance(a_cfg, str):
log_cfgs.append(a_cfg)
elif isinstance(a_cfg, (collections.Iterable)):
cfg_str = [str(c) for c in a_cfg]
@@ -100,7 +96,7 @@ def setupLogging(cfg=None):
# is acting as a file)
pass
else:
- log_cfg = StringIO(log_cfg)
+ log_cfg = io.StringIO(log_cfg)
# Attempt to load its config
logging.config.fileConfig(log_cfg)
# The first one to work wins!
@@ -126,17 +122,12 @@ def getLogger(name='cloudinit'):
return logging.getLogger(name)
-# Fixes this annoyance...
-# No handlers could be found for logger XXX annoying output...
-try:
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-
def _resetLogger(log):
+ """Remove all current handlers, unset log level and add a NullHandler.
+
+ (Adding the NullHandler avoids "No handlers could be found for logger XXX"
+ messages.)
+ """
if not log:
return
handlers = list(log.handlers)
@@ -145,7 +136,7 @@ def _resetLogger(log):
h.close()
log.removeHandler(h)
log.setLevel(NOTSET)
- log.addHandler(NullHandler())
+ log.addHandler(logging.NullHandler())
def resetLogging():
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index 7fbc25ff..668e3cd6 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -6,8 +6,6 @@
import re
-import six
-
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
@@ -85,7 +83,7 @@ def dict_extract_mergers(config):
raw_mergers = config.pop('merge_type', None)
if raw_mergers is None:
return parsed_mergers
- if isinstance(raw_mergers, six.string_types):
+ if isinstance(raw_mergers, str):
return string_extract_mergers(raw_mergers)
for m in raw_mergers:
if isinstance(m, (dict)):
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index 6c5fddc2..93472f13 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
DEF_MERGE_TYPE = 'no_replace'
MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
@@ -47,7 +45,7 @@ class Merger(object):
return new_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
+ if isinstance(new_v, str) and self._recurse_str:
return self._merger.merge(old_v, new_v)
if isinstance(new_v, (dict)) and self._recurse_dict:
return self._merger.merge(old_v, new_v)
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index daa0469a..19f32771 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
DEF_MERGE_TYPE = 'replace'
MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
@@ -63,7 +61,7 @@ class Merger(object):
return old_v
if isinstance(new_v, (list, tuple)) and self._recurse_array:
return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
+ if isinstance(new_v, str) and self._recurse_str:
return self._merger.merge(old_v, new_v)
if isinstance(new_v, (dict)) and self._recurse_dict:
return self._merger.merge(old_v, new_v)
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index 629df58e..539e3e29 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import six
-
class Merger(object):
def __init__(self, _merger, opts):
@@ -23,13 +21,10 @@ class Merger(object):
# perform the following action, if appending we will
# merge them together, otherwise we will just return value.
def _on_str(self, value, merge_with):
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
return merge_with
if not self._append:
return merge_with
- if isinstance(value, six.text_type):
- return value + six.text_type(merge_with)
- else:
- return value + six.binary_type(merge_with)
+ return value + merge_with
# vi: ts=4 expandtab
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 1d5eb535..cb8c1601 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -6,13 +6,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
import errno
+import ipaddress
import logging
import os
import re
from functools import partial
-from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit import util
+from cloudinit.net.network_state import mask_to_net_prefix
from cloudinit.url_helper import UrlError, readurl
LOG = logging.getLogger(__name__)
@@ -334,10 +335,20 @@ def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
if util.is_FreeBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
+ elif util.is_NetBSD() or util.is_OpenBSD():
+ return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers)
else:
return find_fallback_nic_on_linux(blacklist_drivers)
+def find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers=None):
+ values = list(sorted(
+ get_interfaces_by_mac().values(),
+ key=natural_sort_key))
+ if values:
+ return values[0]
+
+
def find_fallback_nic_on_freebsd(blacklist_drivers=None):
"""Return the name of the 'fallback' network device on FreeBSD.
@@ -799,6 +810,10 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac():
if util.is_FreeBSD():
return get_interfaces_by_mac_on_freebsd()
+ elif util.is_NetBSD():
+ return get_interfaces_by_mac_on_netbsd()
+ elif util.is_OpenBSD():
+ return get_interfaces_by_mac_on_openbsd()
else:
return get_interfaces_by_mac_on_linux()
@@ -830,6 +845,36 @@ def get_interfaces_by_mac_on_freebsd():
return results
+def get_interfaces_by_mac_on_netbsd():
+ ret = {}
+ re_field_match = (
+ r"(?P<ifname>\w+).*address:\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
+ (out, _) = util.subp(['ifconfig', '-a'])
+ if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ for line in if_lines:
+ m = re.match(re_field_match, line)
+ if m:
+ fields = m.groupdict()
+ ret[fields['mac']] = fields['ifname']
+ return ret
+
+
+def get_interfaces_by_mac_on_openbsd():
+ ret = {}
+ re_field_match = (
+ r"(?P<ifname>\w+).*lladdr\s"
+ r"(?P<mac>([\da-f]{2}[:-]){5}([\da-f]{2})).*")
+ (out, _) = util.subp(['ifconfig', '-a'])
+ if_lines = re.sub(r'\n\s+', ' ', out).splitlines()
+ for line in if_lines:
+ m = re.match(re_field_match, line)
+ if m:
+ fields = m.groupdict()
+ ret[fields['mac']] = fields['ifname']
+ return ret
+
+
def get_interfaces_by_mac_on_linux():
"""Build a dictionary of tuples {mac: name}.
@@ -917,6 +962,38 @@ def has_url_connectivity(url):
return True
+def is_ip_address(s: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IP address.
+
+ :param s:
+ The string to test.
+
+ :return:
+ A bool indicating if the string contains an IP address or not.
+ """
+ try:
+ ipaddress.ip_address(s)
+ except ValueError:
+ return False
+ return True
+
+
+def is_ipv4_address(s: str) -> bool:
+ """Returns a bool indicating if ``s`` is an IPv4 address.
+
+ :param s:
+ The string to test.
+
+ :return:
+ A bool indicating if the string contains an IPv4 address or not.
+ """
+ try:
+ ipaddress.IPv4Address(s)
+ except ValueError:
+ return False
+ return True
+
+
class EphemeralIPv4Network(object):
"""Context manager which sets up temporary static network configuration.
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
new file mode 100644
index 00000000..fb714d4c
--- /dev/null
+++ b/cloudinit/net/bsd.py
@@ -0,0 +1,165 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import re
+
+from cloudinit import log as logging
+from cloudinit import net
+from cloudinit import util
+from cloudinit.distros.parsers.resolv_conf import ResolvConf
+from cloudinit.distros import bsd_utils
+
+from . import renderer
+
+LOG = logging.getLogger(__name__)
+
+
+class BSDRenderer(renderer.Renderer):
+ resolv_conf_fn = 'etc/resolv.conf'
+ rc_conf_fn = 'etc/rc.conf'
+
+ def get_rc_config_value(self, key):
+ fn = util.target_path(self.target, self.rc_conf_fn)
+ bsd_utils.get_rc_config_value(key, fn=fn)
+
+ def set_rc_config_value(self, key, value):
+ fn = util.target_path(self.target, self.rc_conf_fn)
+ bsd_utils.set_rc_config_value(key, value, fn=fn)
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.target = None
+ self.interface_configurations = {}
+ self._postcmds = config.get('postcmds', True)
+
+ def _ifconfig_entries(self, settings, target=None):
+ ifname_by_mac = net.get_interfaces_by_mac()
+ for interface in settings.iter_interfaces():
+ device_name = interface.get("name")
+ device_mac = interface.get("mac_address")
+ if device_name and re.match(r'^lo\d+$', device_name):
+ continue
+ if device_mac not in ifname_by_mac:
+ LOG.info('Cannot find any device with MAC %s', device_mac)
+ elif device_mac and device_name:
+ cur_name = ifname_by_mac[device_mac]
+ if cur_name != device_name:
+ LOG.info('netif service will rename interface %s to %s',
+ cur_name, device_name)
+ try:
+ self.rename_interface(cur_name, device_name)
+ except NotImplementedError:
+ LOG.error((
+ 'Interface renaming is '
+ 'not supported on this OS'))
+ device_name = cur_name
+
+ else:
+ device_name = ifname_by_mac[device_mac]
+
+ LOG.info('Configuring interface %s', device_name)
+
+ self.interface_configurations[device_name] = 'DHCP'
+
+ for subnet in interface.get("subnets", []):
+ if subnet.get('type') == 'static':
+ if not subnet.get('netmask'):
+ LOG.debug(
+ 'Skipping IP %s, because there is no netmask',
+ subnet.get('address'))
+ continue
+ LOG.debug('Configuring dev %s with %s / %s', device_name,
+ subnet.get('address'), subnet.get('netmask'))
+
+ self.interface_configurations[device_name] = {
+ 'address': subnet.get('address'),
+ 'netmask': subnet.get('netmask'),
+ }
+
+ def _route_entries(self, settings, target=None):
+ routes = list(settings.iter_routes())
+ for interface in settings.iter_interfaces():
+ subnets = interface.get("subnets", [])
+ for subnet in subnets:
+ if subnet.get('type') != 'static':
+ continue
+ gateway = subnet.get('gateway')
+ if gateway and len(gateway.split('.')) == 4:
+ routes.append({
+ 'network': '0.0.0.0',
+ 'netmask': '0.0.0.0',
+ 'gateway': gateway})
+ routes += subnet.get('routes', [])
+ for route in routes:
+ network = route.get('network')
+ if not network:
+ LOG.debug('Skipping a bad route entry')
+ continue
+ netmask = route.get('netmask')
+ gateway = route.get('gateway')
+ self.set_route(network, netmask, gateway)
+
+ def _resolve_conf(self, settings, target=None):
+ nameservers = settings.dns_nameservers
+ searchdomains = settings.dns_searchdomains
+ for interface in settings.iter_interfaces():
+ for subnet in interface.get("subnets", []):
+ if 'dns_nameservers' in subnet:
+ nameservers.extend(subnet['dns_nameservers'])
+ if 'dns_search' in subnet:
+ searchdomains.extend(subnet['dns_search'])
+ # Try to read the /etc/resolv.conf or just start from scratch if that
+ # fails.
+ try:
+ resolvconf = ResolvConf(util.load_file(util.target_path(
+ target, self.resolv_conf_fn)))
+ resolvconf.parse()
+ except IOError:
+ util.logexc(LOG, "Failed to parse %s, use new empty file",
+ util.target_path(target, self.resolv_conf_fn))
+ resolvconf = ResolvConf('')
+ resolvconf.parse()
+
+ # Add some nameservers
+ for server in nameservers:
+ try:
+ resolvconf.add_nameserver(server)
+ except ValueError:
+ util.logexc(LOG, "Failed to add nameserver %s", server)
+
+ # And add any searchdomains.
+ for domain in searchdomains:
+ try:
+ resolvconf.add_search_domain(domain)
+ except ValueError:
+ util.logexc(LOG, "Failed to add search domain %s", domain)
+ util.write_file(
+ util.target_path(target, self.resolv_conf_fn),
+ str(resolvconf), 0o644)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ self._ifconfig_entries(settings=network_state)
+ self._route_entries(settings=network_state)
+ self._resolve_conf(settings=network_state)
+
+ self.write_config()
+ self.start_services(run=self._postcmds)
+
+ def dhcp_interfaces(self):
+ ic = self.interface_configurations.items
+ return [k for k, v in ic() if v == 'DHCP']
+
+ def start_services(self, run=False):
+ raise NotImplementedError()
+
+ def write_config(self, target=None):
+ raise NotImplementedError()
+
+ def set_gateway(self, gateway):
+ raise NotImplementedError()
+
+ def rename_interface(self, cur_name, device_name):
+ raise NotImplementedError()
+
+ def set_route(self, network, netmask, gateway):
+ raise NotImplementedError()
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 55166ea8..0e83685d 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -10,10 +10,9 @@ import base64
import glob
import gzip
import io
+import logging
import os
-import six
-
from cloudinit import util
from . import get_devicelist
@@ -21,20 +20,19 @@ from . import read_sys_net_safe
_OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface"
+KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED = "disabled"
+
-@six.add_metaclass(abc.ABCMeta)
-class InitramfsNetworkConfigSource(object):
+class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta):
"""ABC for net config sources that read config written by initramfses"""
@abc.abstractmethod
- def is_applicable(self):
- # type: () -> bool
+ def is_applicable(self) -> bool:
"""Is this initramfs config source applicable to the current system?"""
pass
@abc.abstractmethod
- def render_config(self):
- # type: () -> dict
+ def render_config(self) -> dict:
"""Render a v1 network config from the initramfs configuration"""
pass
@@ -65,8 +63,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
if mac_addr:
self._mac_addrs[k] = mac_addr
- def is_applicable(self):
- # type: () -> bool
+ def is_applicable(self) -> bool:
"""
Return whether this system has klibc initramfs network config or not
@@ -84,8 +81,7 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource):
return True
return False
- def render_config(self):
- # type: () -> dict
+ def render_config(self) -> dict:
return config_from_klibc_net_cfg(
files=self._files, mac_addrs=self._mac_addrs,
)
@@ -104,9 +100,12 @@ def _klibc_to_config_entry(content, mac_addrs=None):
provided here. There is no good documentation on this unfortunately.
DEVICE=<name> is expected/required and PROTO should indicate if
- this is 'static' or 'dhcp' or 'dhcp6' (LP: #1621507).
+ this is 'none' (static) or 'dhcp' or 'dhcp6' (LP: #1621507).
note that IPV6PROTO is also written by newer code to address the
possibility of both ipv4 and ipv6 getting addresses.
+
+ Full syntax is documented at:
+ https://git.kernel.org/pub/scm/libs/klibc/klibc.git/plain/usr/kinit/ipconfig/README.ipconfig
"""
if mac_addrs is None:
@@ -125,9 +124,9 @@ def _klibc_to_config_entry(content, mac_addrs=None):
if data.get('filename'):
proto = 'dhcp'
else:
- proto = 'static'
+ proto = 'none'
- if proto not in ('static', 'dhcp', 'dhcp6'):
+ if proto not in ('none', 'dhcp', 'dhcp6'):
raise ValueError("Unexpected value for PROTO: %s" % proto)
iface = {
@@ -147,6 +146,9 @@ def _klibc_to_config_entry(content, mac_addrs=None):
# PROTO for ipv4, IPV6PROTO for ipv6
cur_proto = data.get(pre + 'PROTO', proto)
+ # ipconfig's 'none' is called 'static'
+ if cur_proto == 'none':
+ cur_proto = 'static'
subnet = {'type': cur_proto, 'control': 'manual'}
# only populate address for static types. While the rendered config
@@ -230,34 +232,35 @@ def read_initramfs_config():
return None
-def _decomp_gzip(blob, strict=True):
- # decompress blob. raise exception if not compressed unless strict=False.
+def _decomp_gzip(blob):
+ # decompress blob or return original blob
with io.BytesIO(blob) as iobuf:
gzfp = None
try:
gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
return gzfp.read()
except IOError:
- if strict:
- raise
return blob
finally:
if gzfp:
gzfp.close()
-def _b64dgz(b64str, gzipped="try"):
- # decode a base64 string. If gzipped is true, transparently uncompresss
- # if gzipped is 'try', then try gunzip, returning the original on fail.
- try:
- blob = base64.b64decode(b64str)
- except TypeError:
- raise ValueError("Invalid base64 text: %s" % b64str)
+def _b64dgz(data):
+ """Decode a string base64 encoding, if gzipped, uncompress as well
- if not gzipped:
- return blob
+ :return: decompressed unencoded string of the data or empty string on
+ unencoded data.
+ """
+ try:
+ blob = base64.b64decode(data)
+ except (TypeError, ValueError):
+ logging.error(
+ "Expected base64 encoded kernel commandline parameter"
+ " network-config. Ignoring network-config=%s.", data)
+ return ''
- return _decomp_gzip(blob, strict=gzipped != "try")
+ return _decomp_gzip(blob)
def read_kernel_cmdline_config(cmdline=None):
@@ -270,6 +273,8 @@ def read_kernel_cmdline_config(cmdline=None):
if tok.startswith("network-config="):
data64 = tok.split("=", 1)[1]
if data64:
+ if data64 == KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED:
+ return {"config": "disabled"}
return util.load_yaml(_b64dgz(data64))
return None
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index c033cc8e..19d0199c 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -10,6 +10,7 @@ import os
import re
import signal
import time
+from io import StringIO
from cloudinit.net import (
EphemeralIPv4Network, find_fallback_nic, get_devicelist,
@@ -17,7 +18,6 @@ from cloudinit.net import (
from cloudinit.net.network_state import mask_and_ipv4_to_bcast_addr as bcip
from cloudinit import temp_utils
from cloudinit import util
-from six import StringIO
LOG = logging.getLogger(__name__)
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index d6f61da3..60f05bb2 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -1,156 +1,29 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import re
-
from cloudinit import log as logging
-from cloudinit import net
+import cloudinit.net.bsd
from cloudinit import util
-from cloudinit.distros import rhel_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
-from . import renderer
LOG = logging.getLogger(__name__)
-class Renderer(renderer.Renderer):
- resolv_conf_fn = 'etc/resolv.conf'
- rc_conf_fn = 'etc/rc.conf'
+class Renderer(cloudinit.net.bsd.BSDRenderer):
def __init__(self, config=None):
- if not config:
- config = {}
- self.dhcp_interfaces = []
- self._postcmds = config.get('postcmds', True)
-
- def _update_rc_conf(self, settings, target=None):
- fn = util.target_path(target, self.rc_conf_fn)
- rhel_util.update_sysconfig_file(fn, settings)
-
- def _write_ifconfig_entries(self, settings, target=None):
- ifname_by_mac = net.get_interfaces_by_mac()
- for interface in settings.iter_interfaces():
- device_name = interface.get("name")
- device_mac = interface.get("mac_address")
- if device_name and re.match(r'^lo\d+$', device_name):
- continue
- if device_mac not in ifname_by_mac:
- LOG.info('Cannot find any device with MAC %s', device_mac)
- elif device_mac and device_name:
- cur_name = ifname_by_mac[device_mac]
- if cur_name != device_name:
- LOG.info('netif service will rename interface %s to %s',
- cur_name, device_name)
- self._update_rc_conf(
- {'ifconfig_%s_name' % cur_name: device_name},
- target=target)
- else:
- device_name = ifname_by_mac[device_mac]
-
- LOG.info('Configuring interface %s', device_name)
- ifconfig = 'DHCP' # default
-
- for subnet in interface.get("subnets", []):
- if ifconfig != 'DHCP':
- LOG.info('The FreeBSD provider only set the first subnet.')
- break
- if subnet.get('type') == 'static':
- if not subnet.get('netmask'):
- LOG.debug(
- 'Skipping IP %s, because there is no netmask',
- subnet.get('address'))
- continue
- LOG.debug('Configuring dev %s with %s / %s', device_name,
- subnet.get('address'), subnet.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (
- subnet.get('address') + ' netmask ' +
- subnet.get('netmask'))
-
- if ifconfig == 'DHCP':
- self.dhcp_interfaces.append(device_name)
- self._update_rc_conf(
- {'ifconfig_' + device_name: ifconfig},
- target=target)
-
- def _write_route_entries(self, settings, target=None):
- routes = list(settings.iter_routes())
- for interface in settings.iter_interfaces():
- subnets = interface.get("subnets", [])
- for subnet in subnets:
- if subnet.get('type') != 'static':
- continue
- gateway = subnet.get('gateway')
- if gateway and len(gateway.split('.')) == 4:
- routes.append({
- 'network': '0.0.0.0',
- 'netmask': '0.0.0.0',
- 'gateway': gateway})
- routes += subnet.get('routes', [])
- route_cpt = 0
- for route in routes:
- network = route.get('network')
- if not network:
- LOG.debug('Skipping a bad route entry')
- continue
- netmask = route.get('netmask')
- gateway = route.get('gateway')
- route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
- if network == '0.0.0.0':
- self._update_rc_conf(
- {'defaultrouter': gateway}, target=target)
+ self._route_cpt = 0
+ super(Renderer, self).__init__()
+
+ def rename_interface(self, cur_name, device_name):
+ self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name)
+
+ def write_config(self):
+ for device_name, v in self.interface_configurations.items():
+ if isinstance(v, dict):
+ self.set_rc_config_value(
+ 'ifconfig_' + device_name,
+ v.get('address') + ' netmask ' + v.get('netmask'))
else:
- self._update_rc_conf(
- {'route_net%d' % route_cpt: route_cmd}, target=target)
- route_cpt += 1
-
- def _write_resolve_conf(self, settings, target=None):
- nameservers = settings.dns_nameservers
- searchdomains = settings.dns_searchdomains
- for interface in settings.iter_interfaces():
- for subnet in interface.get("subnets", []):
- if 'dns_nameservers' in subnet:
- nameservers.extend(subnet['dns_nameservers'])
- if 'dns_search' in subnet:
- searchdomains.extend(subnet['dns_search'])
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(util.target_path(
- target, self.resolv_conf_fn)))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- util.target_path(target, self.resolv_conf_fn))
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(
- util.target_path(target, self.resolv_conf_fn),
- str(resolvconf), 0o644)
-
- def _write_network(self, settings, target=None):
- self._write_ifconfig_entries(settings, target=target)
- self._write_route_entries(settings, target=target)
- self._write_resolve_conf(settings, target=target)
-
- self.start_services(run=self._postcmds)
-
- def render_network_state(self, network_state, templates=None, target=None):
- self._write_network(network_state, target=target)
+ self.set_rc_config_value('ifconfig_' + device_name, 'DHCP')
def start_services(self, run=False):
if not run:
@@ -165,11 +38,21 @@ class Renderer(renderer.Renderer):
# - dhclient: it cannot stop the dhclient started by the netif service.
# In both case, the situation is ok, and we can proceed.
util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
- for dhcp_interface in self.dhcp_interfaces:
+
+ for dhcp_interface in self.dhcp_interfaces():
util.subp(['service', 'dhclient', 'restart', dhcp_interface],
rcs=[0, 1],
capture=True)
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ self.set_rc_config_value('defaultrouter', gateway)
+ else:
+ route_name = 'route_net%d' % self._route_cpt
+ route_cmd = "-route %s/%s %s" % (network, netmask, gateway)
+ self.set_rc_config_value(route_name, route_cmd)
+ self._route_cpt += 1
+
def available(target=None):
return util.is_FreeBSD()
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
new file mode 100644
index 00000000..9cc8ef31
--- /dev/null
+++ b/cloudinit/net/netbsd.py
@@ -0,0 +1,42 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import log as logging
+from cloudinit import util
+import cloudinit.net.bsd
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(cloudinit.net.bsd.BSDRenderer):
+
+ def __init__(self, config=None):
+ super(Renderer, self).__init__()
+
+ def write_config(self):
+ if self.dhcp_interfaces():
+ self.set_rc_config_value('dhcpcd', 'YES')
+ self.set_rc_config_value(
+ 'dhcpcd_flags',
+ ' '.join(self.dhcp_interfaces()))
+ for device_name, v in self.interface_configurations.items():
+ if isinstance(v, dict):
+ self.set_rc_config_value(
+ 'ifconfig_' + device_name,
+ v.get('address') + ' netmask ' + v.get('netmask'))
+
+ def start_services(self, run=False):
+ if not run:
+ LOG.debug("netbsd generate postcmd disabled")
+ return
+
+ util.subp(['service', 'network', 'restart'], capture=True)
+ if self.dhcp_interfaces():
+ util.subp(['service', 'dhcpcd', 'restart'], capture=True)
+
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ self.set_rc_config_value('defaultroute', gateway)
+
+
+def available(target=None):
+ return util.is_NetBSD()
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 9b126100..35c279f9 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -10,8 +10,6 @@ import logging
import socket
import struct
-import six
-
from cloudinit import safeyaml
from cloudinit import util
@@ -186,7 +184,7 @@ class NetworkState(object):
def iter_interfaces(self, filter_func=None):
ifaces = self._network_state.get('interfaces', {})
- for iface in six.itervalues(ifaces):
+ for iface in ifaces.values():
if filter_func is None:
yield iface
else:
@@ -220,8 +218,7 @@ class NetworkState(object):
)
-@six.add_metaclass(CommandHandlerMeta)
-class NetworkStateInterpreter(object):
+class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
initial_network_state = {
'interfaces': {},
@@ -315,7 +312,7 @@ class NetworkStateInterpreter(object):
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
- if command_type == 'version':
+ if command_type in ['version', 'renderer']:
continue
try:
handler = self.command_handlers[command_type]
@@ -699,7 +696,7 @@ class NetworkStateInterpreter(object):
def handle_wifis(self, command):
LOG.warning('Wifi configuration is only available to distros with'
- 'netplan rendering support.')
+ ' netplan rendering support.')
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
@@ -970,7 +967,7 @@ def ipv4_mask_to_net_prefix(mask):
"""
if isinstance(mask, int):
return mask
- if isinstance(mask, six.string_types):
+ if isinstance(mask, str):
try:
return int(mask)
except ValueError:
@@ -997,7 +994,7 @@ def ipv6_mask_to_net_prefix(mask):
if isinstance(mask, int):
return mask
- if isinstance(mask, six.string_types):
+ if isinstance(mask, str):
try:
return int(mask)
except ValueError:
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
new file mode 100644
index 00000000..b9897e90
--- /dev/null
+++ b/cloudinit/net/openbsd.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import log as logging
+from cloudinit import util
+import cloudinit.net.bsd
+
+LOG = logging.getLogger(__name__)
+
+
+class Renderer(cloudinit.net.bsd.BSDRenderer):
+
+ def write_config(self):
+ for device_name, v in self.interface_configurations.items():
+ if_file = 'etc/hostname.{}'.format(device_name)
+ fn = util.target_path(self.target, if_file)
+ if device_name in self.dhcp_interfaces():
+ content = 'dhcp\n'
+ elif isinstance(v, dict):
+ try:
+ content = "inet {address} {netmask}\n".format(
+ address=v['address'],
+ netmask=v['netmask'])
+ except KeyError:
+ LOG.error(
+ "Invalid static configuration for %s",
+ device_name)
+ util.write_file(fn, content)
+
+ def start_services(self, run=False):
+ if not self._postcmds:
+ LOG.debug("openbsd generate postcmd disabled")
+ return
+ util.subp(['sh', '/etc/netstart'], capture=True)
+
+ def set_route(self, network, netmask, gateway):
+ if network == '0.0.0.0':
+ if_file = 'etc/mygate'
+ fn = util.target_path(self.target, if_file)
+ content = gateway + '\n'
+ util.write_file(fn, content)
+
+
+def available(target=None):
+ return util.is_OpenBSD()
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 5f32e90f..2a61a7a8 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -6,7 +6,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
-import six
+import io
from .network_state import parse_net_config_data
from .udev import generate_udev_rule
@@ -34,7 +34,7 @@ class Renderer(object):
"""Given state, emit udev rules to map mac to ifname."""
# TODO(harlowja): this seems shared between eni renderer and
# this, so move it to a shared location.
- content = six.StringIO()
+ content = io.StringIO()
for iface in network_state.iter_interfaces(filter_by_physical):
# for physical interfaces write out a persist net udev rule
if 'name' in iface and iface.get('mac_address'):
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index b98dbbe3..e2de4d55 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -2,18 +2,23 @@
from . import eni
from . import freebsd
+from . import netbsd
from . import netplan
from . import RendererNotFoundError
+from . import openbsd
from . import sysconfig
NAME_TO_RENDERER = {
"eni": eni,
"freebsd": freebsd,
+ "netbsd": netbsd,
"netplan": netplan,
+ "openbsd": openbsd,
"sysconfig": sysconfig,
}
-DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"]
+DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
+ "netbsd", "openbsd"]
def search(priority=None, target=None, first=False):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 3e06af01..0a387377 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -1,16 +1,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
+import io
import os
import re
-import six
+from configobj import ConfigObj
-from cloudinit.distros.parsers import networkmanager_conf
-from cloudinit.distros.parsers import resolv_conf
from cloudinit import log as logging
from cloudinit import util
-
-from configobj import ConfigObj
+from cloudinit.distros.parsers import networkmanager_conf
+from cloudinit.distros.parsers import resolv_conf
from . import renderer
from .network_state import (
@@ -86,6 +86,9 @@ class ConfigMap(object):
def __getitem__(self, key):
return self._conf[key]
+ def get(self, key):
+ return self._conf.get(key)
+
def __contains__(self, key):
return key in self._conf
@@ -96,7 +99,7 @@ class ConfigMap(object):
return len(self._conf)
def to_string(self):
- buf = six.StringIO()
+ buf = io.StringIO()
buf.write(_make_header())
if self._conf:
buf.write("\n")
@@ -104,11 +107,14 @@ class ConfigMap(object):
value = self._conf[key]
if isinstance(value, bool):
value = self._bool_map[value]
- if not isinstance(value, six.string_types):
+ if not isinstance(value, str):
value = str(value)
buf.write("%s=%s\n" % (key, _quote_value(value)))
return buf.getvalue()
+ def update(self, updates):
+ self._conf.update(updates)
+
class Route(ConfigMap):
"""Represents a route configuration."""
@@ -150,7 +156,7 @@ class Route(ConfigMap):
# only accept ipv4 and ipv6
if proto not in ['ipv4', 'ipv6']:
raise ValueError("Unknown protocol '%s'" % (str(proto)))
- buf = six.StringIO()
+ buf = io.StringIO()
buf.write(_make_header())
if self._conf:
buf.write("\n")
@@ -269,13 +275,29 @@ class Renderer(renderer.Renderer):
# s1-networkscripts-interfaces.html (or other docs for
# details about this)
- iface_defaults = tuple([
- ('ONBOOT', True),
- ('USERCTL', False),
- ('NM_CONTROLLED', False),
- ('BOOTPROTO', 'none'),
- ('STARTMODE', 'auto'),
- ])
+ iface_defaults = {
+ 'rhel': {'ONBOOT': True, 'USERCTL': False, 'NM_CONTROLLED': False,
+ 'BOOTPROTO': 'none'},
+ 'suse': {'BOOTPROTO': 'static', 'STARTMODE': 'auto'},
+ }
+
+ cfg_key_maps = {
+ 'rhel': {
+ 'accept-ra': 'IPV6_FORCE_ACCEPT_RA',
+ 'bridge_stp': 'STP',
+ 'bridge_ageing': 'AGEING',
+ 'bridge_bridgeprio': 'PRIO',
+ 'mac_address': 'HWADDR',
+ 'mtu': 'MTU',
+ },
+ 'suse': {
+ 'bridge_stp': 'BRIDGE_STP',
+ 'bridge_ageing': 'BRIDGE_AGEINGTIME',
+ 'bridge_bridgeprio': 'BRIDGE_PRIORITY',
+ 'mac_address': 'LLADDR',
+ 'mtu': 'MTU',
+ },
+ }
# If these keys exist, then their values will be used to form
# a BONDING_OPTS grouping; otherwise no grouping will be set.
@@ -297,12 +319,6 @@ class Renderer(renderer.Renderer):
('bond_primary_reselect', "primary_reselect=%s"),
])
- bridge_opts_keys = tuple([
- ('bridge_stp', 'STP'),
- ('bridge_ageing', 'AGEING'),
- ('bridge_bridgeprio', 'PRIO'),
- ])
-
templates = {}
def __init__(self, config=None):
@@ -320,65 +336,101 @@ class Renderer(renderer.Renderer):
'iface_templates': config.get('iface_templates'),
'route_templates': config.get('route_templates'),
}
+ self.flavor = config.get('flavor', 'rhel')
@classmethod
- def _render_iface_shared(cls, iface, iface_cfg):
- for k, v in cls.iface_defaults:
- iface_cfg[k] = v
+ def _render_iface_shared(cls, iface, iface_cfg, flavor):
+ flavor_defaults = copy.deepcopy(cls.iface_defaults.get(flavor, {}))
+ iface_cfg.update(flavor_defaults)
- for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
+ for old_key in ('mac_address', 'mtu', 'accept-ra'):
old_value = iface.get(old_key)
if old_value is not None:
# only set HWADDR on physical interfaces
if (old_key == 'mac_address' and
iface['type'] not in ['physical', 'infiniband']):
continue
- iface_cfg[new_key] = old_value
-
- if iface['accept-ra'] is not None:
- iface_cfg['IPV6_FORCE_ACCEPT_RA'] = iface['accept-ra']
+ new_key = cls.cfg_key_maps[flavor].get(old_key)
+ if new_key:
+ iface_cfg[new_key] = old_value
@classmethod
- def _render_subnets(cls, iface_cfg, subnets, has_default_route):
+ def _render_subnets(cls, iface_cfg, subnets, has_default_route, flavor):
# setting base values
- iface_cfg['BOOTPROTO'] = 'none'
+ if flavor == 'suse':
+ iface_cfg['BOOTPROTO'] = 'static'
+ if 'BRIDGE' in iface_cfg:
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ iface_cfg.drop('BRIDGE')
+ else:
+ iface_cfg['BOOTPROTO'] = 'none'
# modifying base values according to subnets
for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
mtu_key = 'MTU'
subnet_type = subnet.get('type')
if subnet_type == 'dhcp6' or subnet_type == 'ipv6_dhcpv6-stateful':
- # TODO need to set BOOTPROTO to dhcp6 on SUSE
- iface_cfg['IPV6INIT'] = True
- # Configure network settings using DHCPv6
- iface_cfg['DHCPV6C'] = True
+ if flavor == 'suse':
+ # User wants dhcp for both protocols
+ if iface_cfg['BOOTPROTO'] == 'dhcp4':
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ else:
+ # Only IPv6 is DHCP, IPv4 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp6'
+ iface_cfg['DHCLIENT6_MODE'] = 'managed'
+ else:
+ iface_cfg['IPV6INIT'] = True
+ # Configure network settings using DHCPv6
+ iface_cfg['DHCPV6C'] = True
elif subnet_type == 'ipv6_dhcpv6-stateless':
- iface_cfg['IPV6INIT'] = True
- # Configure network settings using SLAAC from RAs and optional
- # info from dhcp server using DHCPv6
- iface_cfg['IPV6_AUTOCONF'] = True
- iface_cfg['DHCPV6C'] = True
- # Use Information-request to get only stateless configuration
- # parameters (i.e., without address).
- iface_cfg['DHCPV6C_OPTIONS'] = '-S'
+ if flavor == 'suse':
+ # User wants dhcp for both protocols
+ if iface_cfg['BOOTPROTO'] == 'dhcp4':
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ else:
+ # Only IPv6 is DHCP, IPv4 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp6'
+ iface_cfg['DHCLIENT6_MODE'] = 'info'
+ else:
+ iface_cfg['IPV6INIT'] = True
+ # Configure network settings using SLAAC from RAs and
+ # optional info from dhcp server using DHCPv6
+ iface_cfg['IPV6_AUTOCONF'] = True
+ iface_cfg['DHCPV6C'] = True
+ # Use Information-request to get only stateless
+ # configuration parameters (i.e., without address).
+ iface_cfg['DHCPV6C_OPTIONS'] = '-S'
elif subnet_type == 'ipv6_slaac':
- iface_cfg['IPV6INIT'] = True
- # Configure network settings using SLAAC from RAs
- iface_cfg['IPV6_AUTOCONF'] = True
+ if flavor == 'suse':
+ # User wants dhcp for both protocols
+ if iface_cfg['BOOTPROTO'] == 'dhcp4':
+ iface_cfg['BOOTPROTO'] = 'dhcp'
+ else:
+ # Only IPv6 is DHCP, IPv4 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp6'
+ iface_cfg['DHCLIENT6_MODE'] = 'info'
+ else:
+ iface_cfg['IPV6INIT'] = True
+ # Configure network settings using SLAAC from RAs
+ iface_cfg['IPV6_AUTOCONF'] = True
elif subnet_type in ['dhcp4', 'dhcp']:
+ bootproto_in = iface_cfg['BOOTPROTO']
iface_cfg['BOOTPROTO'] = 'dhcp'
+ if flavor == 'suse' and subnet_type == 'dhcp4':
+ # If dhcp6 is already specified the user wants dhcp
+ # for both protocols
+ if bootproto_in != 'dhcp6':
+ # Only IPv4 is DHCP, IPv6 may be static
+ iface_cfg['BOOTPROTO'] = 'dhcp4'
elif subnet_type in ['static', 'static6']:
+ # RH info
# grep BOOTPROTO sysconfig.txt -A2 | head -3
# BOOTPROTO=none|bootp|dhcp
# 'bootp' or 'dhcp' cause a DHCP client
# to run on the device. Any other
# value causes any static configuration
# in the file to be applied.
- # ==> the following should not be set to 'static'
- # but should remain 'none'
- # if iface_cfg['BOOTPROTO'] == 'none':
- # iface_cfg['BOOTPROTO'] = 'static'
- if subnet_is_ipv6(subnet):
+ if subnet_is_ipv6(subnet) and flavor != 'suse':
mtu_key = 'IPV6_MTU'
iface_cfg['IPV6INIT'] = True
if 'mtu' in subnet:
@@ -389,18 +441,31 @@ class Renderer(renderer.Renderer):
'Network config: ignoring %s device-level mtu:%s'
' because ipv4 subnet-level mtu:%s provided.',
iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
- iface_cfg[mtu_key] = subnet['mtu']
+ if subnet_is_ipv6(subnet):
+ if flavor == 'suse':
+ # TODO(rjschwei) write mtu setting to
+ # /etc/sysctl.d/
+ pass
+ else:
+ iface_cfg[mtu_key] = subnet['mtu']
+ else:
+ iface_cfg[mtu_key] = subnet['mtu']
elif subnet_type == 'manual':
- # If the subnet has an MTU setting, then ONBOOT=True
- # to apply the setting
- iface_cfg['ONBOOT'] = mtu_key in iface_cfg
+ if flavor == 'suse':
+ LOG.debug('Unknown subnet type setting "%s"', subnet_type)
+ else:
+ # If the subnet has an MTU setting, then ONBOOT=True
+ # to apply the setting
+ iface_cfg['ONBOOT'] = mtu_key in iface_cfg
else:
raise ValueError("Unknown subnet type '%s' found"
" for interface '%s'" % (subnet_type,
iface_cfg.name))
if subnet.get('control') == 'manual':
- iface_cfg['ONBOOT'] = False
- iface_cfg['STARTMODE'] = 'manual'
+ if flavor == 'suse':
+ iface_cfg['STARTMODE'] = 'manual'
+ else:
+ iface_cfg['ONBOOT'] = False
# set IPv4 and IPv6 static addresses
ipv4_index = -1
@@ -409,13 +474,14 @@ class Renderer(renderer.Renderer):
subnet_type = subnet.get('type')
# metric may apply to both dhcp and static config
if 'metric' in subnet:
- iface_cfg['METRIC'] = subnet['metric']
- # TODO(hjensas): Including dhcp6 here is likely incorrect. DHCPv6
- # does not ever provide a default gateway, the default gateway
- # come from RA's. (https://github.com/openSUSE/wicked/issues/570)
- if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']:
- if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
- iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
+ if flavor != 'suse':
+ iface_cfg['METRIC'] = subnet['metric']
+ if subnet_type in ['dhcp', 'dhcp4']:
+ # On SUSE distros 'DHCLIENT_SET_DEFAULT_ROUTE' is a global
+ # setting in /etc/sysconfig/network/dhcp
+ if flavor != 'suse':
+ if has_default_route and iface_cfg['BOOTPROTO'] != 'none':
+ iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False
continue
elif subnet_type in IPV6_DYNAMIC_TYPES:
continue
@@ -424,14 +490,21 @@ class Renderer(renderer.Renderer):
ipv6_index = ipv6_index + 1
ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix'])
if ipv6_index == 0:
- iface_cfg['IPV6ADDR'] = ipv6_cidr
- iface_cfg['IPADDR6'] = ipv6_cidr
+ if flavor == 'suse':
+ iface_cfg['IPADDR6'] = ipv6_cidr
+ else:
+ iface_cfg['IPV6ADDR'] = ipv6_cidr
elif ipv6_index == 1:
- iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
- iface_cfg['IPADDR6_0'] = ipv6_cidr
+ if flavor == 'suse':
+ iface_cfg['IPADDR6_1'] = ipv6_cidr
+ else:
+ iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr
else:
- iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr
- iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
+ if flavor == 'suse':
+ iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr
+ else:
+ iface_cfg['IPV6ADDR_SECONDARIES'] += \
+ " " + ipv6_cidr
else:
ipv4_index = ipv4_index + 1
suff = "" if ipv4_index == 0 else str(ipv4_index)
@@ -439,17 +512,17 @@ class Renderer(renderer.Renderer):
iface_cfg['NETMASK' + suff] = \
net_prefix_to_ipv4_mask(subnet['prefix'])
- if 'gateway' in subnet:
+ if 'gateway' in subnet and flavor != 'suse':
iface_cfg['DEFROUTE'] = True
if is_ipv6_addr(subnet['gateway']):
iface_cfg['IPV6_DEFAULTGW'] = subnet['gateway']
else:
iface_cfg['GATEWAY'] = subnet['gateway']
- if 'dns_search' in subnet:
+ if 'dns_search' in subnet and flavor != 'suse':
iface_cfg['DOMAIN'] = ' '.join(subnet['dns_search'])
- if 'dns_nameservers' in subnet:
+ if 'dns_nameservers' in subnet and flavor != 'suse':
if len(subnet['dns_nameservers']) > 3:
# per resolv.conf(5) MAXNS sets this to 3.
LOG.debug("%s has %d entries in dns_nameservers. "
@@ -459,7 +532,12 @@ class Renderer(renderer.Renderer):
iface_cfg['DNS' + str(i)] = k
@classmethod
- def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
+ def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets, flavor):
+ # TODO(rjschwei): route configuration on SUSE distro happens via
+ # ifroute-* files, see lp#1812117. SUSE currently carries a local
+ # patch in their package.
+ if flavor == 'suse':
+ return
for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
subnet_type = subnet.get('type')
for route in subnet.get('routes', []):
@@ -487,14 +565,7 @@ class Renderer(renderer.Renderer):
# TODO(harlowja): add validation that no other iface has
# also provided the default route?
iface_cfg['DEFROUTE'] = True
- # TODO(hjensas): Including dhcp6 here is likely incorrect.
- # DHCPv6 does not ever provide a default gateway, the
- # default gateway come from RA's.
- # (https://github.com/openSUSE/wicked/issues/570)
- if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'):
- # NOTE(hjensas): DHCLIENT_SET_DEFAULT_ROUTE is SuSE
- # only. RHEL, CentOS, Fedora does not implement this
- # option.
+ if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4'):
iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True
if 'gateway' in route:
if is_ipv6:
@@ -538,7 +609,9 @@ class Renderer(renderer.Renderer):
iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
@classmethod
- def _render_physical_interfaces(cls, network_state, iface_contents):
+ def _render_physical_interfaces(
+ cls, network_state, iface_contents, flavor
+ ):
physical_filter = renderer.filter_by_physical
for iface in network_state.iter_interfaces(physical_filter):
iface_name = iface['name']
@@ -547,12 +620,15 @@ class Renderer(renderer.Renderer):
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
@classmethod
- def _render_bond_interfaces(cls, network_state, iface_contents):
+ def _render_bond_interfaces(cls, network_state, iface_contents, flavor):
bond_filter = renderer.filter_by_type('bond')
slave_filter = renderer.filter_by_attr('bond-master')
for iface in network_state.iter_interfaces(bond_filter):
@@ -566,17 +642,24 @@ class Renderer(renderer.Renderer):
master_cfgs.extend(iface_cfg.children)
for master_cfg in master_cfgs:
master_cfg['BONDING_MASTER'] = True
- master_cfg.kind = 'bond'
+ if flavor != 'suse':
+ master_cfg.kind = 'bond'
if iface.get('mac_address'):
- iface_cfg['MACADDR'] = iface.get('mac_address')
+ if flavor == 'suse':
+ iface_cfg['LLADDR'] = iface.get('mac_address')
+ else:
+ iface_cfg['MACADDR'] = iface.get('mac_address')
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
# iter_interfaces on network-state is not sorted to produce
# consistent numbers we need to sort.
@@ -586,28 +669,44 @@ class Renderer(renderer.Renderer):
if slave_iface['bond-master'] == iface_name])
for index, bond_slave in enumerate(bond_slaves):
- slavestr = 'BONDING_SLAVE%s' % index
+ if flavor == 'suse':
+ slavestr = 'BONDING_SLAVE_%s' % index
+ else:
+ slavestr = 'BONDING_SLAVE%s' % index
iface_cfg[slavestr] = bond_slave
slave_cfg = iface_contents[bond_slave]
- slave_cfg['MASTER'] = iface_name
- slave_cfg['SLAVE'] = True
+ if flavor == 'suse':
+ slave_cfg['BOOTPROTO'] = 'none'
+ slave_cfg['STARTMODE'] = 'hotplug'
+ else:
+ slave_cfg['MASTER'] = iface_name
+ slave_cfg['SLAVE'] = True
@classmethod
- def _render_vlan_interfaces(cls, network_state, iface_contents):
+ def _render_vlan_interfaces(cls, network_state, iface_contents, flavor):
vlan_filter = renderer.filter_by_type('vlan')
for iface in network_state.iter_interfaces(vlan_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
- iface_cfg['VLAN'] = True
- iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
+ if flavor == 'suse':
+ vlan_id = iface.get('vlan_id')
+ if vlan_id:
+ iface_cfg['VLAN_ID'] = vlan_id
+ iface_cfg['ETHERDEVICE'] = iface_name[:iface_name.rfind('.')]
+ else:
+ iface_cfg['VLAN'] = True
+ iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
@staticmethod
def _render_dns(network_state, existing_dns_path=None):
@@ -644,19 +743,39 @@ class Renderer(renderer.Renderer):
return out
@classmethod
- def _render_bridge_interfaces(cls, network_state, iface_contents):
+ def _render_bridge_interfaces(cls, network_state, iface_contents, flavor):
+ bridge_key_map = {
+ old_k: new_k for old_k, new_k in cls.cfg_key_maps[flavor].items()
+ if old_k.startswith('bridge')}
bridge_filter = renderer.filter_by_type('bridge')
+
for iface in network_state.iter_interfaces(bridge_filter):
iface_name = iface['name']
iface_cfg = iface_contents[iface_name]
- iface_cfg.kind = 'bridge'
- for old_key, new_key in cls.bridge_opts_keys:
+ if flavor != 'suse':
+ iface_cfg.kind = 'bridge'
+ for old_key, new_key in bridge_key_map.items():
if old_key in iface:
iface_cfg[new_key] = iface[old_key]
- if iface.get('mac_address'):
- iface_cfg['MACADDR'] = iface.get('mac_address')
+ if flavor == 'suse':
+ if 'BRIDGE_STP' in iface_cfg:
+ if iface_cfg.get('BRIDGE_STP'):
+ iface_cfg['BRIDGE_STP'] = 'on'
+ else:
+ iface_cfg['BRIDGE_STP'] = 'off'
+ if iface.get('mac_address'):
+ key = 'MACADDR'
+ if flavor == 'suse':
+ key = 'LLADDRESS'
+ iface_cfg[key] = iface.get('mac_address')
+
+ if flavor == 'suse':
+ if iface.get('bridge_ports', []):
+ iface_cfg['BRIDGE_PORTS'] = '%s' % " ".join(
+ iface.get('bridge_ports')
+ )
# Is this the right key to get all the connected interfaces?
for bridged_iface_name in iface.get('bridge_ports', []):
# Ensure all bridged interfaces are correctly tagged
@@ -665,17 +784,23 @@ class Renderer(renderer.Renderer):
bridged_cfgs = [bridged_cfg]
bridged_cfgs.extend(bridged_cfg.children)
for bridge_cfg in bridged_cfgs:
- bridge_cfg['BRIDGE'] = iface_name
+ bridge_value = iface_name
+ if flavor == 'suse':
+ bridge_value = 'yes'
+ bridge_cfg['BRIDGE'] = bridge_value
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
@classmethod
- def _render_ib_interfaces(cls, network_state, iface_contents):
+ def _render_ib_interfaces(cls, network_state, iface_contents, flavor):
ib_filter = renderer.filter_by_type('infiniband')
for iface in network_state.iter_interfaces(ib_filter):
iface_name = iface['name']
@@ -684,12 +809,15 @@ class Renderer(renderer.Renderer):
iface_subnets = iface.get("subnets", [])
route_cfg = iface_cfg.routes
cls._render_subnets(
- iface_cfg, iface_subnets, network_state.has_default_route
+ iface_cfg, iface_subnets, network_state.has_default_route,
+ flavor
+ )
+ cls._render_subnet_routes(
+ iface_cfg, route_cfg, iface_subnets, flavor
)
- cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets)
@classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state,
+ def _render_sysconfig(cls, base_sysconf_dir, network_state, flavor,
templates=None):
'''Given state, return /etc/sysconfig files + contents'''
if not templates:
@@ -700,13 +828,17 @@ class Renderer(renderer.Renderer):
continue
iface_name = iface['name']
iface_cfg = NetInterface(iface_name, base_sysconf_dir, templates)
- cls._render_iface_shared(iface, iface_cfg)
+ if flavor == 'suse':
+ iface_cfg.drop('DEVICE')
+ # If type detection fails it is considered a bug in SUSE
+ iface_cfg.drop('TYPE')
+ cls._render_iface_shared(iface, iface_cfg, flavor)
iface_contents[iface_name] = iface_cfg
- cls._render_physical_interfaces(network_state, iface_contents)
- cls._render_bond_interfaces(network_state, iface_contents)
- cls._render_vlan_interfaces(network_state, iface_contents)
- cls._render_bridge_interfaces(network_state, iface_contents)
- cls._render_ib_interfaces(network_state, iface_contents)
+ cls._render_physical_interfaces(network_state, iface_contents, flavor)
+ cls._render_bond_interfaces(network_state, iface_contents, flavor)
+ cls._render_vlan_interfaces(network_state, iface_contents, flavor)
+ cls._render_bridge_interfaces(network_state, iface_contents, flavor)
+ cls._render_ib_interfaces(network_state, iface_contents, flavor)
contents = {}
for iface_name, iface_cfg in iface_contents.items():
if iface_cfg or iface_cfg.children:
@@ -728,7 +860,7 @@ class Renderer(renderer.Renderer):
file_mode = 0o644
base_sysconf_dir = util.target_path(target, self.sysconf_dir)
for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state,
+ network_state, self.flavor,
templates=templates).items():
util.write_file(path, data, file_mode)
if self.dns_path:
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index c3fa1e04..bc7bef45 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -62,7 +62,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
class TestDHCPRFC3442(CiTestCase):
@@ -88,7 +88,7 @@ class TestDHCPRFC3442(CiTestCase):
'renew': '4 2017/07/27 18:02:30',
'expire': '5 2017/07/28 07:08:15'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
def test_parse_lease_finds_classless_static_routes(self):
"""
@@ -114,7 +114,7 @@ class TestDHCPRFC3442(CiTestCase):
'renew': '4 2017/07/27 18:02:30',
'expire': '5 2017/07/28 07:08:15'}]
write_file(lease_file, content)
- self.assertItemsEqual(expected, parse_dhcp_lease_file(lease_file))
+ self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
@@ -324,7 +324,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
""")
write_file(self.tmp_path('dhcp.leases', tmpdir), lease_content)
- self.assertItemsEqual(
+ self.assertCountEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
@@ -389,7 +389,7 @@ class TestDHCPDiscoveryClean(CiTestCase):
write_file(pid_file, "%d\n" % my_pid)
m_getppid.return_value = 1 # Indicate that dhclient has daemonized
- self.assertItemsEqual(
+ self.assertCountEqual(
[{'interface': 'eth9', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}],
dhcp_discovery(dhclient_script, 'eth9', tmpdir))
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 6db93e26..835ed807 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -2,16 +2,19 @@
import copy
import errno
-import httpretty
-import mock
+import ipaddress
import os
-import requests
import textwrap
+from unittest import mock
+
+import httpretty
+import pytest
+import requests
import cloudinit.net as net
-from cloudinit.util import ensure_file, write_file, ProcessExecutionError
-from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
from cloudinit import safeyaml as yaml
+from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase
+from cloudinit.util import ProcessExecutionError, ensure_file, write_file
class TestSysDevPath(CiTestCase):
@@ -341,8 +344,6 @@ class TestGenerateFallbackConfig(CiTestCase):
class TestNetFindFallBackNic(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetFindFallBackNic, self).setUp()
sys_mock = mock.patch('cloudinit.net.get_sys_class_path')
@@ -396,7 +397,7 @@ class TestGetDeviceList(CiTestCase):
"""get_devicelist returns a directory listing for SYS_CLASS_NET."""
write_file(os.path.join(self.sysdir, 'eth0', 'operstate'), 'up')
write_file(os.path.join(self.sysdir, 'eth1', 'operstate'), 'up')
- self.assertItemsEqual(['eth0', 'eth1'], net.get_devicelist())
+ self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist())
class TestGetInterfaceMAC(CiTestCase):
@@ -995,8 +996,6 @@ class TestExtractPhysdevs(CiTestCase):
class TestWaitForPhysdevs(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestWaitForPhysdevs, self).setUp()
self.add_patch('cloudinit.net.get_interfaces_by_mac',
@@ -1071,8 +1070,6 @@ class TestWaitForPhysdevs(CiTestCase):
class TestNetFailOver(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetFailOver, self).setUp()
self.add_patch('cloudinit.net.util', 'm_util')
@@ -1297,4 +1294,48 @@ class TestNetFailOver(CiTestCase):
m_standby.return_value = False
self.assertFalse(net.is_netfailover(devname, driver))
+
+class TestIsIpAddress:
+ """Tests for net.is_ip_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ip_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize('ip_address_side_effect,expected_return', (
+ (ValueError, False),
+ (lambda _: ipaddress.IPv4Address('192.168.0.1'), True),
+ (lambda _: ipaddress.IPv6Address('2001:db8::'), True),
+ ))
+ def test_is_ip_address(self, ip_address_side_effect, expected_return):
+ with mock.patch('cloudinit.net.ipaddress.ip_address',
+ side_effect=ip_address_side_effect) as m_ip_address:
+ ret = net.is_ip_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ip_address.call_args_list
+
+
+class TestIsIpv4Address:
+ """Tests for net.is_ipv4_address.
+
+ Instead of testing with values we rely on the ipaddress stdlib module to
+ handle all values correctly, so simply test that is_ipv4_address defers to
+ the ipaddress module correctly.
+ """
+
+ @pytest.mark.parametrize('ipv4address_mock,expected_return', (
+ (mock.Mock(side_effect=ValueError), False),
+ (mock.Mock(return_value=ipaddress.IPv4Address('192.168.0.1')), True),
+ ))
+ def test_is_ip_address(self, ipv4address_mock, expected_return):
+ with mock.patch('cloudinit.net.ipaddress.IPv4Address',
+ ipv4address_mock) as m_ipv4address:
+ ret = net.is_ipv4_address(mock.sentinel.ip_address_in)
+ assert expected_return == ret
+ expected_call = mock.call(mock.sentinel.ip_address_in)
+ assert [expected_call] == m_ipv4address.call_args_list
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
index fcb4a995..07d726e2 100644
--- a/cloudinit/net/tests/test_network_state.py
+++ b/cloudinit/net/tests/test_network_state.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-import mock
+from unittest import mock
+
from cloudinit.net import network_state
from cloudinit.tests.helpers import CiTestCase
@@ -44,4 +45,14 @@ class TestNetworkStateParseConfig(CiTestCase):
self.assertNotEqual(None, result)
+class TestNetworkStateParseConfigV2(CiTestCase):
+
+ def test_version_2_ignores_renderer_key(self):
+ ncfg = {'version': 2, 'renderer': 'networkd', 'ethernets': {}}
+ nsi = network_state.NetworkStateInterpreter(version=ncfg['version'],
+ config=ncfg)
+ nsi.parse_config(skip_broken=False)
+ self.assertEqual(ncfg, nsi.as_dict()['config'])
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 6ba21f4d..1001f149 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -91,6 +91,53 @@ def _netdev_info_iproute(ipaddr_out):
return devs
+def _netdev_info_ifconfig_netbsd(ifconfig_data):
+ # fields that need to be returned in devs for each dev
+ devs = {}
+ for line in ifconfig_data.splitlines():
+ if len(line) == 0:
+ continue
+ if line[0] not in ("\t", " "):
+ curdev = line.split()[0]
+ # current ifconfig pops a ':' on the end of the device
+ if curdev.endswith(':'):
+ curdev = curdev[:-1]
+ if curdev not in devs:
+ devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
+ toks = line.lower().strip().split()
+ if len(toks) > 1:
+ if re.search(r"flags=[x\d]+<up.*>", toks[1]):
+ devs[curdev]['up'] = True
+
+ for i in range(len(toks)):
+ if toks[i] == "inet": # Create new ipv4 addr entry
+ network, net_bits = toks[i + 1].split('/')
+ devs[curdev]['ipv4'].append(
+ {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)})
+ elif toks[i] == "broadcast":
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ elif toks[i] == "address:":
+ devs[curdev]['hwaddr'] = toks[i + 1]
+ elif toks[i] == "inet6":
+ if toks[i + 1] == "addr:":
+ devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ else:
+ devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
+ addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
+ devs[curdev]['ipv6'][-1]['ip'] = addr6
+ elif toks[i].startswith("scope:"):
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ elif toks[i] == "scopeid":
+ res = re.match(r'.*<(\S+)>', toks[i + 1])
+ if res:
+ devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ else:
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1]
+
+ return devs
+
+
def _netdev_info_ifconfig(ifconfig_data):
# fields that need to be returned in devs for each dev
devs = {}
@@ -149,7 +196,10 @@ def _netdev_info_ifconfig(ifconfig_data):
def netdev_info(empty=""):
devs = {}
- if util.which('ip'):
+ if util.is_NetBSD():
+ (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ devs = _netdev_info_ifconfig_netbsd(ifcfg_out)
+ elif util.which('ip'):
# Try iproute first of all
(ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
devs = _netdev_info_iproute(ipaddr_out)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 6605e795..946df7e0 100755
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,25 +1,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
-import uuid
import fcntl
import json
-import six
import os
+import queue
import struct
import threading
import time
+import uuid
+from datetime import datetime
from cloudinit import log as logging
from cloudinit.registry import DictRegistry
from cloudinit import (url_helper, util)
-from datetime import datetime
-from six.moves.queue import Empty as QueueEmptyError
-
-if six.PY2:
- from multiprocessing.queues import JoinableQueue as JQueue
-else:
- from queue import Queue as JQueue
LOG = logging.getLogger(__name__)
@@ -28,8 +22,7 @@ class ReportException(Exception):
pass
-@six.add_metaclass(abc.ABCMeta)
-class ReportingHandler(object):
+class ReportingHandler(metaclass=abc.ABCMeta):
"""Base class for report handlers.
Implement :meth:`~publish_event` for controlling what
@@ -141,7 +134,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
self._kvp_file_path)
self._event_types = event_types
- self.q = JQueue()
+ self.q = queue.Queue()
self.incarnation_no = self._get_incarnation_no()
self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX,
self.incarnation_no)
@@ -303,7 +296,7 @@ class HyperVKvpReportingHandler(ReportingHandler):
# get all the rest of the events in the queue
event = self.q.get(block=False)
items_from_queue += 1
- except QueueEmptyError:
+ except queue.Empty:
event = None
try:
self._append_kvp_item(encoded_data)
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
index f9ef7acc..67486e09 100644
--- a/cloudinit/serial.py
+++ b/cloudinit/serial.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import absolute_import
-
try:
from serial import Serial
except ImportError:
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 12fdfe6c..9272d22d 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -9,8 +9,7 @@
import inspect
import signal
import sys
-
-from six import StringIO
+from io import StringIO
from cloudinit import log as logging
from cloudinit import util
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index f77923c2..ae31934b 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -71,11 +71,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
dslist = self.sys_cfg.get('datasource_list')
for dev in find_candidate_devs(dslist=dslist):
- try:
- if util.is_FreeBSD() and dev.startswith("/dev/cd"):
+ mtype = None
+ if util.is_BSD():
+ if dev.startswith("/dev/cd"):
mtype = "cd9660"
- else:
- mtype = None
+ try:
results = util.mount_cb(dev, read_config_drive,
mtype=mtype)
found = dev
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index b9f346a6..355b4e2f 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -29,8 +29,10 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
API_TOKEN_ROUTE = 'latest/api/token'
-API_TOKEN_DISABLED = '_ec2_disable_api_token'
AWS_TOKEN_TTL_SECONDS = '21600'
+AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
class CloudNames(object):
@@ -60,7 +62,7 @@ class DataSourceEc2(sources.DataSource):
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2016-09-02']
+ extended_metadata_versions = ['2018-09-24', '2016-09-02']
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -158,7 +160,8 @@ class DataSourceEc2(sources.DataSource):
for api_ver in self.extended_metadata_versions:
url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url, headers=headers)
+ resp = uhelp.readurl(url=url, headers=headers,
+ headers_redact=AWS_TOKEN_REDACT)
except uhelp.UrlError as e:
LOG.debug('url %s raised exception %s', url, e)
else:
@@ -180,6 +183,7 @@ class DataSourceEc2(sources.DataSource):
self.identity = ec2.get_instance_identity(
api_version, self.metadata_address,
headers_cb=self._get_headers,
+ headers_redact=AWS_TOKEN_REDACT,
exception_cb=self._refresh_stale_aws_token_cb).get(
'document', {})
return self.identity.get(
@@ -188,6 +192,12 @@ class DataSourceEc2(sources.DataSource):
return self.metadata['instance-id']
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
+ """ Get an API token for EC2 Instance Metadata Service.
+
+ On EC2. IMDS will always answer an API token, unless
+ the instance owner has disabled the IMDS HTTP endpoint or
+ the network topology conflicts with the configured hop-limit.
+ """
if self.cloud_name != CloudNames.AWS:
return
@@ -200,17 +210,33 @@ class DataSourceEc2(sources.DataSource):
urls.append(cur)
url2base[cur] = url
- # use the self._status_cb to check for Read errors, which means
- # we can't reach the API token URL, so we should disable IMDSv2
+ # use the self._imds_exception_cb to check for Read errors
LOG.debug('Fetching Ec2 IMDSv2 API Token')
- url, response = uhelp.wait_for_url(
- urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb,
- headers_cb=self._get_headers, request_method=request_method)
+
+ response = None
+ url = None
+ url_params = self.get_url_params()
+ try:
+ url, response = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ headers_cb=self._get_headers,
+ exception_cb=self._imds_exception_cb,
+ request_method=request_method,
+ headers_redact=AWS_TOKEN_REDACT)
+ except uhelp.UrlError:
+ # We use the raised exception to interupt the retry loop.
+ # Nothing else to do here.
+ pass
if url and response:
self._api_token = response
return url2base[url]
+ # If we get here, then wait_for_url timed out, waiting for IMDS
+ # or the IMDS HTTP endpoint is disabled
+ return None
+
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
@@ -234,9 +260,11 @@ class DataSourceEc2(sources.DataSource):
# try the api token path first
metadata_address = self._maybe_fetch_api_token(mdurls)
- if not metadata_address:
- if self._api_token == API_TOKEN_DISABLED:
- LOG.warning('Retrying with IMDSv1')
+ # When running on EC2, we always access IMDS with an API token.
+ # If we could not get an API token, then we assume the IMDS
+ # endpoint was disabled and we move on without a data source.
+ # Fallback to IMDSv1 if not running on EC2
+ if not metadata_address and self.cloud_name != CloudNames.AWS:
# if we can't get a token, use instance-id path
urls = []
url2base = {}
@@ -252,7 +280,8 @@ class DataSourceEc2(sources.DataSource):
url, _ = uhelp.wait_for_url(
urls=urls, max_wait=url_params.max_wait_seconds,
timeout=url_params.timeout_seconds, status_cb=LOG.warning,
- headers_cb=self._get_headers, request_method=request_method)
+ headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
+ request_method=request_method)
if url:
metadata_address = url2base[url]
@@ -260,6 +289,8 @@ class DataSourceEc2(sources.DataSource):
if metadata_address:
self.metadata_address = metadata_address
LOG.debug("Using metadata source: '%s'", self.metadata_address)
+ elif self.cloud_name == CloudNames.AWS:
+ LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
LOG.critical("Giving up on md from %s after %s seconds",
urls, int(time.time() - start_time))
@@ -374,13 +405,16 @@ class DataSourceEc2(sources.DataSource):
logfunc=LOG.debug, msg='Re-crawl of metadata service',
func=self.get_data)
- # Limit network configuration to only the primary/fallback nic
iface = self.fallback_interface
- macs_to_nics = {net.get_interface_mac(iface): iface}
net_md = self.metadata.get('network')
if isinstance(net_md, dict):
+ # SRU_BLOCKER: xenial, bionic and eoan should default
+ # apply_full_imds_network_config to False to retain original
+ # behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, macs_to_nics=macs_to_nics, fallback_nic=iface)
+ net_md, fallback_nic=iface,
+ full_network_config=util.get_cfg_option_bool(
+ self.ds_cfg, 'apply_full_imds_network_config', True))
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -420,6 +454,7 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return {}
api_version = self.get_metadata_api_version()
+ redact = AWS_TOKEN_REDACT
crawled_metadata = {}
if self.cloud_name == CloudNames.AWS:
exc_cb = self._refresh_stale_aws_token_cb
@@ -429,14 +464,17 @@ class DataSourceEc2(sources.DataSource):
try:
crawled_metadata['user-data'] = ec2.get_instance_userdata(
api_version, self.metadata_address,
- headers_cb=self._get_headers, exception_cb=exc_cb_ud)
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb_ud)
crawled_metadata['meta-data'] = ec2.get_instance_metadata(
api_version, self.metadata_address,
- headers_cb=self._get_headers, exception_cb=exc_cb)
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb)
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
api_version, self.metadata_address,
- headers_cb=self._get_headers, exception_cb=exc_cb)
+ headers_cb=self._get_headers, headers_redact=redact,
+ exception_cb=exc_cb)
crawled_metadata['dynamic'] = {'instance-identity': identity}
except Exception:
util.logexc(
@@ -455,11 +493,12 @@ class DataSourceEc2(sources.DataSource):
if self.cloud_name != CloudNames.AWS:
return None
LOG.debug("Refreshing Ec2 metadata API token")
- request_header = {'X-aws-ec2-metadata-token-ttl-seconds': seconds}
+ request_header = {AWS_TOKEN_REQ_HEADER: seconds}
token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
try:
- response = uhelp.readurl(
- token_url, headers=request_header, request_method="PUT")
+ response = uhelp.readurl(token_url, headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT")
except uhelp.UrlError as e:
LOG.warning(
'Unable to get API token: %s raised exception %s',
@@ -484,11 +523,29 @@ class DataSourceEc2(sources.DataSource):
self._api_token = None
return True # always retry
- def _status_cb(self, msg, exc=None):
- LOG.warning(msg)
- if 'Read timed out' in msg:
- LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1')
- self._api_token = API_TOKEN_DISABLED
+ def _imds_exception_cb(self, msg, exception=None):
+ """Fail quickly on proper AWS if IMDSv2 rejects API token request
+
+ Guidance from Amazon is that if IMDSv2 had disabled token requests
+ by returning a 403, or cloud-init malformed requests resulting in
+ other 40X errors, we want the datasource detection to fail quickly
+ without retries as those symptoms will likely not be resolved by
+ retries.
+
+ Exceptions such as requests.ConnectionError due to IMDS being
+ temporarily unroutable or unavailable will still retry due to the
+ callsite wait_for_url.
+ """
+ if isinstance(exception, uhelp.UrlError):
+ # requests.ConnectionError will have exception.code == None
+ if exception.code and exception.code >= 400:
+ if exception.code == 403:
+ LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
+ 'HTTP endpoint is disabled. Aborting.')
+ else:
+ LOG.warning('Fatal error while requesting '
+ 'Ec2 IMDSv2 API tokens')
+ raise exception
def _get_headers(self, url=''):
"""Return a dict of headers for accessing a url.
@@ -496,12 +553,10 @@ class DataSourceEc2(sources.DataSource):
If _api_token is unset on AWS, attempt to refresh the token via a PUT
and then return the updated token header.
"""
- if self.cloud_name != CloudNames.AWS or (self._api_token ==
- API_TOKEN_DISABLED):
+ if self.cloud_name != CloudNames.AWS:
return {}
# Request a 6 hour token if URL is API_TOKEN_ROUTE
- request_token_header = {
- 'X-aws-ec2-metadata-token-ttl-seconds': AWS_TOKEN_TTL_SECONDS}
+ request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS}
if API_TOKEN_ROUTE in url:
return request_token_header
if not self._api_token:
@@ -511,7 +566,7 @@ class DataSourceEc2(sources.DataSource):
self._api_token = self._refresh_api_token()
if not self._api_token:
return {}
- return {'X-aws-ec2-metadata-token': self._api_token}
+ return {AWS_TOKEN_PUT_HEADER: self._api_token}
class DataSourceEc2Local(DataSourceEc2):
@@ -667,9 +722,10 @@ def _collect_platform_data():
return data
-def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
- fallback_nic=None):
- """Convert ec2 metadata to network config version 1 data dict.
+def convert_ec2_metadata_network_config(
+ network_md, macs_to_nics=None, fallback_nic=None,
+ full_network_config=True):
+ """Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
generally formed as {"interfaces": {"macs": {}} where
@@ -679,28 +735,105 @@ def convert_ec2_metadata_network_config(network_md, macs_to_nics=None,
not provided, get_interfaces_by_mac is called to get it from the OS.
@param: fallback_nic: Optionally provide the primary nic interface name.
This nic will be guaranteed to minimally have a dhcp4 configuration.
+ @param: full_network_config: Boolean set True to configure all networking
+ presented by IMDS. This includes rendering secondary IPv4 and IPv6
+ addresses on all NICs and rendering network config on secondary NICs.
+ If False, only the primary nic will be configured and only with dhcp
+ (IPv4/IPv6).
- @return A dict of network config version 1 based on the metadata and macs.
+ @return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 1, 'config': []}
+ netcfg = {'version': 2, 'ethernets': {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
macs_metadata = network_md['interfaces']['macs']
- for mac, nic_name in macs_to_nics.items():
+
+ if not full_network_config:
+ for mac, nic_name in macs_to_nics.items():
+ if nic_name == fallback_nic:
+ break
+ dev_config = {'dhcp4': True,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ nic_metadata = macs_metadata.get(mac)
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ netcfg['ethernets'][nic_name] = dev_config
+ return netcfg
+ # Apply network config for all nics and any secondary IPv4/v6 addresses
+ for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
- nic_cfg = {'type': 'physical', 'name': nic_name, 'subnets': []}
- nic_cfg['mac_address'] = mac
- if (nic_name == fallback_nic or nic_metadata.get('public-ipv4s') or
- nic_metadata.get('local-ipv4s')):
- nic_cfg['subnets'].append({'type': 'dhcp4'})
- if nic_metadata.get('ipv6s'):
- nic_cfg['subnets'].append({'type': 'dhcp6'})
- netcfg['config'].append(nic_cfg)
+ # device-number is zero-indexed, we want it 1-indexed for the
+ # multiplication on the following line
+ nic_idx = int(nic_metadata['device-number']) + 1
+ dhcp_override = {'route-metric': nic_idx * 100}
+ dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
+ 'dhcp6': False,
+ 'match': {'macaddress': mac.lower()},
+ 'set-name': nic_name}
+ if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
+ dev_config['dhcp6'] = True
+ dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config['addresses']:
+ dev_config.pop('addresses') # Since we found none configured
+ netcfg['ethernets'][nic_name] = dev_config
+ # Remove route-metric dhcp overrides if only one nic configured
+ if len(netcfg['ethernets']) == 1:
+ for nic_name in netcfg['ethernets'].keys():
+ netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
+ netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
return netcfg
+def get_secondary_addresses(nic_metadata, mac):
+ """Parse interface-specific nic metadata and return any secondary IPs
+
+ :return: List of secondary IPv4 or IPv6 addresses to configure on the
+ interface
+ """
+ ipv4s = nic_metadata.get('local-ipv4s')
+ ipv6s = nic_metadata.get('ipv6s')
+ addresses = []
+ # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
+ if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
+ addresses.extend(
+ _get_secondary_addresses(
+ nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ return sorted(addresses)
+
+
+def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
+ """Return list of IP addresses as CIDRs for secondary IPs
+
+ The CIDR prefix will be default_prefix if cidr_key is absent or not
+ parseable in nic_metadata.
+ """
+ addresses = []
+ cidr = nic_metadata.get(cidr_key)
+ prefix = default_prefix
+ if not cidr or len(cidr.split('/')) != 2:
+ ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ LOG.warning(
+ 'Could not parse %s %s for mac %s. %s network'
+ ' config prefix defaults to /%s',
+ cidr_key, cidr, mac, ip_type, prefix)
+ else:
+ prefix = cidr.split('/')[1]
+ # We know we have > 1 ips for in metadata for this IP type
+ for ip in ips[1:]:
+ addresses.append(
+ '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ return addresses
+
+
# Used to match classes to dependencies
datasources = [
(DataSourceEc2Local, (sources.DEP_FILESYSTEM,)), # Run at init-local
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 517913aa..c80f70c2 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -6,8 +6,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import hashlib
import os
import time
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index ee748b41..e8856920 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,23 +36,14 @@ class DataSourceNoCloud(sources.DataSource):
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
def _get_devices(self, label):
- if util.is_FreeBSD():
- devlist = [
- p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
- if os.path.exists(p)]
- else:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
+ fslist = util.find_devs_with("TYPE=vfat")
+ fslist.extend(util.find_devs_with("TYPE=iso9660"))
- label_list = util.find_devs_with("LABEL=%s" % label.upper())
- label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
+ label_list = util.find_devs_with("LABEL=%s" % label.upper())
+ label_list.extend(util.find_devs_with("LABEL=%s" % label.lower()))
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
+ devlist = list(set(fslist) & set(label_list))
+ devlist.sort(reverse=True)
return devlist
def _get_data(self):
@@ -370,7 +361,7 @@ def _merge_new_seed(cur, seeded):
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
# Used to match classes to dependencies
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 896841e3..41f999e3 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -8,19 +8,15 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from xml.dom import minidom
-
import base64
import os
import re
import time
-
-import six
+from xml.dom import minidom
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
-
from cloudinit.sources.helpers.vmware.imc.config \
import Config
from cloudinit.sources.helpers.vmware.imc.config_custom_script \
@@ -41,7 +37,8 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
set_customization_status,
- get_tools_config
+ get_tools_config,
+ set_gc_status
)
LOG = logging.getLogger(__name__)
@@ -144,6 +141,8 @@ class DataSourceOVF(sources.DataSource):
try:
cf = ConfigFile(vmwareImcConfigFilePath)
self._vmware_cust_conf = Config(cf)
+ set_gc_status(self._vmware_cust_conf, "Started")
+
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
@@ -175,7 +174,8 @@ class DataSourceOVF(sources.DataSource):
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
if customscript:
@@ -187,7 +187,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing pre-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
try:
LOG.debug("Preparing the Network configuration")
@@ -201,7 +202,8 @@ class DataSourceOVF(sources.DataSource):
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if special_customization:
LOG.debug("Applying password customization")
@@ -219,7 +221,8 @@ class DataSourceOVF(sources.DataSource):
"Error applying Password Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if customscript:
try:
@@ -232,7 +235,8 @@ class DataSourceOVF(sources.DataSource):
"Error executing post-customization script",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
if product_marker:
try:
@@ -244,7 +248,8 @@ class DataSourceOVF(sources.DataSource):
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
- vmwareImcConfigFilePath)
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf)
self._vmware_cust_found = True
found.append('vmware-tools')
@@ -256,6 +261,7 @@ class DataSourceOVF(sources.DataSource):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ set_gc_status(self._vmware_cust_conf, "Successful")
else:
np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
@@ -331,7 +337,7 @@ class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
@@ -458,7 +464,7 @@ def maybe_cdrom_device(devname):
"""
if not devname:
return False
- elif not isinstance(devname, six.string_types):
+ elif not isinstance(devname, str):
raise ValueError("Unexpected input for devname: %s" % devname)
# resolve '..' and multi '/' elements
@@ -650,7 +656,7 @@ def setup_marker_files(markerid, marker_dir):
open(markerfile, 'w').close()
-def _raise_error_status(prefix, error, event, config_file):
+def _raise_error_status(prefix, error, event, config_file, conf):
"""
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
@@ -659,6 +665,7 @@ def _raise_error_status(prefix, error, event, config_file):
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
event)
+ set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 7a5e71b6..bf539091 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -29,7 +29,10 @@ DMI_PRODUCT_NOVA = 'OpenStack Nova'
DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
-VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
+# -> compute.defaults.vmware.smbios_asset_tag for this value
+DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index eec87403..90e1881a 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -15,17 +15,19 @@ Notes:
* Both bare-metal and vms provide chassis-asset-tag of OracleCloud.com
"""
-from cloudinit.url_helper import combine_url, readurl, UrlError
-from cloudinit.net import dhcp, get_interfaces_by_mac, is_netfail_master
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import util
-from cloudinit.net import cmdline
-from cloudinit import log as logging
-
import json
import re
+from cloudinit import log as logging
+from cloudinit import net, sources, util
+from cloudinit.net import (
+ cmdline,
+ dhcp,
+ get_interfaces_by_mac,
+ is_netfail_master,
+)
+from cloudinit.url_helper import UrlError, combine_url, readurl
+
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
@@ -248,15 +250,9 @@ class DataSourceOracle(sources.DataSource):
@property
def network_config(self):
"""Network config is read from initramfs provided files
- If none is present, then we fall back to fallback configuration.
-
- One thing to note here is that this method is not currently
- considered at all if there is is kernel/initramfs provided
- data. In that case, stages considers that the cmdline data
- overrides datasource provided data and does not consult here.
- We nonetheless return cmdline provided config if present
- and fallback to generate fallback."""
+ If none is present, then we fall back to fallback configuration.
+ """
if self._network_config == sources.UNSET:
# this is v1
self._network_config = cmdline.read_initramfs_config()
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index c3cd5c79..084cb7d5 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -55,11 +55,18 @@ def gratuitous_arp(items, distro):
if distro.name in ['fedora', 'centos', 'rhel']:
source_param = '-s'
for item in items:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ try:
+ _sub_arp([
+ '-c', '2',
+ source_param, item['source'],
+ item['destination']
+ ])
+ except util.ProcessExecutionError as error:
+ # warning, because the system is able to function properly
+ # despite no success - some ARP table may be waiting for
+ # expiration, but the system may continue
+ LOG.warning('Failed to arping from "%s" to "%s": %s',
+ item['source'], item['destination'], error)
def get_md():
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index b573b382..83c2bf65 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -188,7 +188,7 @@ class DataSourceScaleway(sources.DataSource):
self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
self._fallback_interface = None
- self._network_config = None
+ self._network_config = sources.UNSET
def _crawl_metadata(self):
resp = url_helper.readurl(self.metadata_address,
@@ -227,7 +227,12 @@ class DataSourceScaleway(sources.DataSource):
Configure networking according to data received from the
metadata API.
"""
- if self._network_config:
+ if self._network_config is None:
+ LOG.warning('Found None as cached _network_config. '
+ 'Resetting to %s', sources.UNSET)
+ self._network_config = sources.UNSET
+
+ if self._network_config != sources.UNSET:
return self._network_config
if self._fallback_interface is None:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index e6baf8f4..923e3cea 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,21 +9,19 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
-from collections import namedtuple
import copy
import json
import os
-import six
+from collections import namedtuple
-from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
-from cloudinit.event import EventType
from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
-
+from cloudinit.atomic_helper import write_json
+from cloudinit.event import EventType
from cloudinit.filters import launch_index
from cloudinit.reporting import events
@@ -91,26 +89,26 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
@return Dict copy of processed metadata.
"""
md_copy = copy.deepcopy(metadata)
- md_copy['base64_encoded_keys'] = []
- md_copy['sensitive_keys'] = []
+ base64_encoded_keys = []
+ sens_keys = []
for key, val in metadata.items():
if key_path:
sub_key_path = key_path + '/' + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
- md_copy['sensitive_keys'].append(sub_key_path)
+ sens_keys.append(sub_key_path)
if isinstance(val, str) and val.startswith('ci-b64:'):
- md_copy['base64_encoded_keys'].append(sub_key_path)
+ base64_encoded_keys.append(sub_key_path)
md_copy[key] = val.replace('ci-b64:', '')
if isinstance(val, dict):
return_val = process_instance_metadata(
val, sub_key_path, sensitive_keys)
- md_copy['base64_encoded_keys'].extend(
- return_val.pop('base64_encoded_keys'))
- md_copy['sensitive_keys'].extend(
- return_val.pop('sensitive_keys'))
+ base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
+ sens_keys.extend(return_val.pop('sensitive_keys'))
md_copy[key] = return_val
+ md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
+ md_copy['sensitive_keys'] = sorted(sens_keys)
return md_copy
@@ -136,8 +134,7 @@ URLParams = namedtuple(
'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
-@six.add_metaclass(abc.ABCMeta)
-class DataSource(object):
+class DataSource(metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
default_locale = 'en_US.UTF-8'
@@ -196,7 +193,7 @@ class DataSource(object):
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('security-credentials',)
+ sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
@@ -221,14 +218,15 @@ class DataSource(object):
def __str__(self):
return type_utils.obj_name(self)
- def _get_standardized_metadata(self):
+ def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
local_hostname = self.get_hostname()
instance_id = self.get_instance_id()
availability_zone = self.availability_zone
# In the event of upgrade from existing cloudinit, pickled datasource
# will not contain these new class attributes. So we need to recrawl
- # metadata to discover that content.
+ # metadata to discover that content
+ sysinfo = instance_data["sys_info"]
return {
'v1': {
'_beta_keys': ['subplatform'],
@@ -236,14 +234,22 @@ class DataSource(object):
'availability_zone': availability_zone,
'cloud-name': self.cloud_name,
'cloud_name': self.cloud_name,
+ 'distro': sysinfo["dist"][0],
+ 'distro_version': sysinfo["dist"][1],
+ 'distro_release': sysinfo["dist"][2],
'platform': self.platform_type,
'public_ssh_keys': self.get_public_ssh_keys(),
+ 'python_version': sysinfo["python"],
'instance-id': instance_id,
'instance_id': instance_id,
+ 'kernel_release': sysinfo["uname"][2],
'local-hostname': local_hostname,
'local_hostname': local_hostname,
+ 'machine': sysinfo["uname"][4],
'region': self.region,
- 'subplatform': self.subplatform}}
+ 'subplatform': self.subplatform,
+ 'system_platform': sysinfo["platform"],
+ 'variant': sysinfo["variant"]}}
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -302,9 +308,15 @@ class DataSource(object):
ec2_metadata = getattr(self, 'ec2_metadata')
if ec2_metadata != UNSET:
instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data.update(
- self._get_standardized_metadata())
instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ # Add merged cloud.cfg and sys info for jinja templates and cli query
+ instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
+ instance_data['merged_cfg']['_doc'] = (
+ 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
+ ' /etc/cloud/cloud.cfg.d/')
+ instance_data['sys_info'] = util.system_info()
+ instance_data.update(
+ self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
@@ -318,12 +330,12 @@ class DataSource(object):
except UnicodeDecodeError as e:
LOG.warning('Error persisting instance-data.json: %s', str(e))
return False
- json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
- write_json(json_file, processed_data) # World readable
json_sensitive_file = os.path.join(self.paths.run_dir,
INSTANCE_JSON_SENSITIVE_FILE)
- write_json(json_sensitive_file,
- redact_sensitive_keys(processed_data), mode=0o600)
+ write_json(json_sensitive_file, processed_data, mode=0o600)
+ json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ # World readable
+ write_json(json_file, redact_sensitive_keys(processed_data))
return True
def _get_data(self):
@@ -436,7 +448,7 @@ class DataSource(object):
return self._cloud_name
if self.metadata and self.metadata.get(METADATA_CLOUD_NAME_KEY):
cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY)
- if isinstance(cloud_name, six.string_types):
+ if isinstance(cloud_name, str):
self._cloud_name = cloud_name.lower()
else:
self._cloud_name = self._get_cloud_name().lower()
@@ -590,7 +602,7 @@ class DataSource(object):
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
+ if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
@@ -718,8 +730,8 @@ def normalize_pubkey_data(pubkey_data):
if not pubkey_data:
return keys
- if isinstance(pubkey_data, six.string_types):
- return str(pubkey_data).splitlines()
+ if isinstance(pubkey_data, str):
+ return pubkey_data.splitlines()
if isinstance(pubkey_data, (list, set)):
return list(pubkey_data)
@@ -729,7 +741,7 @@ def normalize_pubkey_data(pubkey_data):
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
# than a list.
- if isinstance(klist, six.string_types):
+ if isinstance(klist, str):
klist = [klist]
if isinstance(klist, (list, set)):
for pkey in klist:
@@ -837,7 +849,7 @@ def convert_vendordata(data, recurse=True):
"""
if not data:
return None
- if isinstance(data, six.string_types):
+ if isinstance(data, str):
return data
if isinstance(data, list):
return copy.deepcopy(data)
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 0778f45a..e91398ea 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -12,15 +12,12 @@ import copy
import functools
import os
-import six
-
from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import net
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -71,6 +68,7 @@ KNOWN_PHYSICAL_TYPES = (
None,
'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
'bridge',
+ 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
'dvs',
'ethernet',
'hw_veb',
@@ -163,8 +161,7 @@ class SourceMixin(object):
return device
-@six.add_metaclass(abc.ABCMeta)
-class BaseReader(object):
+class BaseReader(metaclass=abc.ABCMeta):
def __init__(self, base_path):
self.base_path = base_path
@@ -227,7 +224,7 @@ class BaseReader(object):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list) + six.string_types)
+ util.load_json, root_types=(dict, list, str))
def datafiles(version):
files = {}
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
index c2898a16..58c3adc6 100644
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ b/cloudinit/sources/helpers/tests/test_netlink.py
@@ -87,7 +87,7 @@ class TestParseNetlinkMessage(CiTestCase):
data = None
with self.assertRaises(AssertionError) as context:
read_rta_oper_state(data)
- self.assertTrue('data is none', str(context.exception))
+ self.assertEqual('data is none', str(context.exception))
def test_read_invalid_rta_operstate_none(self):
'''read_rta_oper_state returns none if operstate is none'''
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
new file mode 100644
index 00000000..2bde1e3f
--- /dev/null
+++ b/cloudinit/sources/helpers/tests/test_openstack.py
@@ -0,0 +1,44 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# ./cloudinit/sources/helpers/tests/test_openstack.py
+
+from cloudinit.sources.helpers import openstack
+from cloudinit.tests import helpers as test_helpers
+
+
+class TestConvertNetJson(test_helpers.CiTestCase):
+
+ def test_phy_types(self):
+ """Verify the different known physical types are handled."""
+ # network_data.json example from
+ # https://docs.openstack.org/nova/latest/user/metadata.html
+ mac0 = "fa:16:3e:9c:bf:3d"
+ net_json = {
+ "links": [
+ {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
+ "mtu": None, "type": "bridge",
+ "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
+ ],
+ "networks": [
+ {"id": "network0", "link": "tapcd9f6d46-4a",
+ "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
+ "type": "ipv4_dhcp"}
+ ],
+ "services": [{"address": "8.8.8.8", "type": "dns"}]
+ }
+ macs = {mac0: 'eth0'}
+
+ expected = {
+ 'version': 1,
+ 'config': [
+ {'mac_address': 'fa:16:3e:9c:bf:3d',
+ 'mtu': None, 'name': 'eth0',
+ 'subnets': [{'type': 'dhcp4'}],
+ 'type': 'physical'},
+ {'address': '8.8.8.8', 'type': 'nameserver'}]}
+
+ for t in openstack.KNOWN_PHYSICAL_TYPES:
+ net_json["links"][0]["type"] = t
+ self.assertEqual(
+ expected,
+ openstack.convert_net_json(network_json=net_json,
+ known_macs=macs))
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 2eaeff34..f2a81416 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -25,6 +25,7 @@ class Config(object):
SUFFIX = 'DNS|SUFFIX|'
TIMEZONE = 'DATETIME|TIMEZONE'
UTC = 'DATETIME|UTC'
+ POST_GC_STATUS = 'MISC|POST-GC-STATUS'
def __init__(self, configFile):
self._configFile = configFile
@@ -104,4 +105,14 @@ class Config(object):
def custom_script_name(self):
"""Return the name of custom (pre/post) script."""
return self._configFile.get(Config.CUSTOM_SCRIPT, None)
+
+ @property
+ def post_gc_status(self):
+ """Return whether to post guestinfo.gc.status VMX property."""
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = postGcStatus.lower()
+ if postGcStatus not in ('yes', 'no'):
+ raise ValueError('PostGcStatus value should be yes/no')
+ return postGcStatus == 'yes'
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 3d369d04..c60a38d7 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -154,4 +154,13 @@ def get_tools_config(section, key, defaultVal):
return retValue
+# Sets message to the VMX guestinfo.gc.status property to the
+# underlying VMware Virtualization Platform.
+def set_gc_status(config, gcMsg):
+ if config and config.post_gc_status:
+ rpc = "info-set guestinfo.gc.status %s" % gcMsg
+ return send_rpc(rpc)
+ return None
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 9698261b..5b6f1b3f 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -3,7 +3,6 @@
import copy
import inspect
import os
-import six
import stat
from cloudinit.event import EventType
@@ -13,7 +12,7 @@ from cloudinit.sources import (
EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource,
canonical_cloud_id, redact_sensitive_keys)
-from cloudinit.tests.helpers import CiTestCase, skipIf, mock
+from cloudinit.tests.helpers import CiTestCase, mock
from cloudinit.user_data import UserDataProcessor
from cloudinit import util
@@ -56,6 +55,7 @@ class InvalidDataSourceTestSubclassNet(DataSource):
class TestDataSource(CiTestCase):
with_logs = True
+ maxDiff = None
def setUp(self):
super(TestDataSource, self).setUp()
@@ -289,27 +289,47 @@ class TestDataSource(CiTestCase):
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.get_data()
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': [],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': ['merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
+
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {'availability_zone': 'myaz',
'local-hostname': 'test-subclass-hostname',
@@ -319,8 +339,8 @@ class TestDataSource(CiTestCase):
self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
- def test_get_data_writes_json_instance_data_sensitive(self):
- """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root."""
+ def test_get_data_writes_redacted_public_json_instance_data(self):
+ """get_data writes redacted content to public INSTANCE_JSON_FILE."""
tmp = self.tmp_dir()
datasource = DataSourceTestSubclassNet(
self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
@@ -330,33 +350,49 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': {
'cred1': 'sekret', 'cred2': 'othersekret'}}})
- self.assertEqual(
- ('security-credentials',), datasource.sensitive_metadata_keys)
- datasource.get_data()
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
redacted = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'cred1': 'sekret', 'cred2': 'othersekret'},
- redacted['ds']['meta_data']['some']['security-credentials'])
- content = util.load_file(sensitive_json_file)
expected = {
'base64_encoded_keys': [],
- 'sensitive_keys': ['ds/meta_data/some/security-credentials'],
+ 'merged_cfg': REDACT_SENSITIVE_VALUE,
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
'v1': {
'_beta_keys': ['subplatform'],
'availability-zone': 'myaz',
'availability_zone': 'myaz',
'cloud-name': 'subclasscloudname',
'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
'instance-id': 'iid-datasource',
'instance_id': 'iid-datasource',
'local-hostname': 'test-subclass-hostname',
'local_hostname': 'test-subclass-hostname',
+ 'kernel_release': '5.4.0-24-generic',
+ 'machine': 'x86_64',
'platform': 'mytestsubclass',
'public_ssh_keys': [],
+ 'python_version': '3.7',
'region': 'myregion',
- 'subplatform': 'unknown'},
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'subplatform': 'unknown',
+ 'variant': 'ubuntu'},
'ds': {
'_doc': EXPERIMENTAL_TEXT,
'meta_data': {
@@ -365,8 +401,82 @@ class TestDataSource(CiTestCase):
'region': 'myregion',
'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
}
- self.maxDiff = None
- self.assertEqual(expected, util.load_json(content))
+ self.assertCountEqual(expected, redacted)
+ file_stat = os.stat(json_file)
+ self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
+
+ def test_get_data_writes_json_instance_data_sensitive(self):
+ """
+ get_data writes unmodified data to sensitive file as root-readonly.
+ """
+ tmp = self.tmp_dir()
+ datasource = DataSourceTestSubclassNet(
+ self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
+ custom_metadata={
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {'security-credentials': {
+ 'cred1': 'sekret', 'cred2': 'othersekret'}}})
+ sys_info = {
+ "python": "3.7",
+ "platform":
+ "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
+ "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
+ "x86_64"],
+ "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
+
+ self.assertCountEqual(
+ ('merged_cfg', 'security-credentials',),
+ datasource.sensitive_metadata_keys)
+ with mock.patch("cloudinit.util.system_info", return_value=sys_info):
+ datasource.get_data()
+ sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
+ content = util.load_file(sensitive_json_file)
+ expected = {
+ 'base64_encoded_keys': [],
+ 'merged_cfg': {
+ '_doc': (
+ 'Merged cloud-init system config from '
+ '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'),
+ 'datasource': {'_undef': {'key1': False}}},
+ 'sensitive_keys': [
+ 'ds/meta_data/some/security-credentials', 'merged_cfg'],
+ 'sys_info': sys_info,
+ 'v1': {
+ '_beta_keys': ['subplatform'],
+ 'availability-zone': 'myaz',
+ 'availability_zone': 'myaz',
+ 'cloud-name': 'subclasscloudname',
+ 'cloud_name': 'subclasscloudname',
+ 'distro': 'ubuntu',
+ 'distro_release': 'focal',
+ 'distro_version': '20.04',
+ 'instance-id': 'iid-datasource',
+ 'instance_id': 'iid-datasource',
+ 'kernel_release': '5.4.0-24-generic',
+ 'local-hostname': 'test-subclass-hostname',
+ 'local_hostname': 'test-subclass-hostname',
+ 'machine': 'x86_64',
+ 'platform': 'mytestsubclass',
+ 'public_ssh_keys': [],
+ 'python_version': '3.7',
+ 'region': 'myregion',
+ 'subplatform': 'unknown',
+ 'system_platform':
+ 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
+ 'variant': 'ubuntu'},
+ 'ds': {
+ '_doc': EXPERIMENTAL_TEXT,
+ 'meta_data': {
+ 'availability_zone': 'myaz',
+ 'local-hostname': 'test-subclass-hostname',
+ 'region': 'myregion',
+ 'some': {
+ 'security-credentials':
+ {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
+ }
+ self.assertCountEqual(expected, util.load_json(content))
file_stat = os.stat(sensitive_json_file)
self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
self.assertEqual(expected, util.load_json(content))
@@ -422,7 +532,6 @@ class TestDataSource(CiTestCase):
{'network_json': 'is good'},
instance_data['ds']['network_json'])
- @skipIf(not six.PY3, "json serialization on <= py2.7 handles bytes")
def test_get_data_base64encodes_unserializable_bytes(self):
"""On py3, get_data base64encodes any unserializable content."""
tmp = self.tmp_dir()
@@ -433,47 +542,16 @@ class TestDataSource(CiTestCase):
json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
content = util.load_file(json_file)
instance_json = util.load_json(content)
- self.assertItemsEqual(
+ self.assertCountEqual(
['ds/meta_data/key2/key2.1'],
instance_json['base64_encoded_keys'])
self.assertEqual(
{'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
instance_json['ds']['meta_data'])
- @skipIf(not six.PY2, "json serialization on <= py2.7 handles bytes")
- def test_get_data_handles_bytes_values(self):
- """On py2 get_data handles bytes values without having to b64encode."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- instance_json = util.load_json(content)
- self.assertEqual([], instance_json['base64_encoded_keys'])
- self.assertEqual(
- {'key1': 'val1', 'key2': {'key2.1': '\x123'}},
- instance_json['ds']['meta_data'])
-
- @skipIf(not six.PY2, "Only python2 hits UnicodeDecodeErrors on non-utf8")
- def test_non_utf8_encoding_gets_b64encoded(self):
- """When non-utf-8 values exist in py2 instance-data is b64encoded."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'ab\xaadef'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- instance_json = util.load_json(util.load_file(json_file))
- key21_value = instance_json['ds']['meta_data']['key2']['key2.1']
- self.assertEqual('ci-b64:' + util.b64e(b'ab\xaadef'), key21_value)
-
def test_get_hostname_subclass_support(self):
"""Validate get_hostname signature on all subclasses of DataSource."""
- # Use inspect.getfullargspec when we drop py2.6 and py2.7
- get_args = inspect.getargspec # pylint: disable=W1505
- base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505
+ base_args = inspect.getfullargspec(DataSource.get_hostname)
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
for _loc, name in modules.items():
@@ -485,13 +563,13 @@ class TestDataSource(CiTestCase):
continue
self.assertEqual(
base_args,
- get_args(child.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(child.get_hostname),
'%s does not implement DataSource.get_hostname params'
% child)
for grandchild in child.__subclasses__():
self.assertEqual(
base_args,
- get_args(grandchild.get_hostname), # pylint: disable=W1505
+ inspect.getfullargspec(grandchild.get_hostname),
'%s does not implement DataSource.get_hostname params'
% grandchild)
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
index 85b6db97..2265327b 100644
--- a/cloudinit/sources/tests/test_oracle.py
+++ b/cloudinit/sources/tests/test_oracle.py
@@ -1,20 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import BrokenMetadata, NetworkConfigSource
-from cloudinit import helpers
-
-from cloudinit.tests import helpers as test_helpers
-
-from textwrap import dedent
import argparse
import copy
-import httpretty
import json
-import mock
import os
-import six
import uuid
+from textwrap import dedent
+from unittest import mock
+
+import httpretty
+
+from cloudinit import helpers
+from cloudinit.sources import BrokenMetadata
+from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources import NetworkConfigSource
+from cloudinit.tests import helpers as test_helpers
DS_PATH = "cloudinit.sources.DataSourceOracle"
MD_VER = "2013-10-17"
@@ -186,6 +186,7 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
self.assertEqual(self.my_md['uuid'], ds.get_instance_id())
self.assertEqual(my_userdata, ds.userdata_raw)
+ @mock.patch(DS_PATH + ".get_interfaces_by_mac", mock.Mock(return_value={}))
@mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
side_effect=lambda network_config: network_config)
@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
@@ -207,6 +208,7 @@ class TestDataSourceOracle(test_helpers.CiTestCase):
self.assertEqual([mock.call()], m_initramfs_config.call_args_list)
self.assertFalse(distro.generate_fallback_config.called)
+ @mock.patch(DS_PATH + ".get_interfaces_by_mac", mock.Mock(return_value={}))
@mock.patch(DS_PATH + "._add_network_config_from_opc_imds",
side_effect=lambda network_config: network_config)
@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
@@ -334,7 +336,7 @@ class TestReadMetaData(test_helpers.HttprettyTestCase):
for k, v in data.items():
httpretty.register_uri(
httpretty.GET, self.mdurl + MD_VER + "/" + k,
- v if not isinstance(v, six.text_type) else v.encode('utf-8'))
+ v if not isinstance(v, str) else v.encode('utf-8'))
def test_broken_no_sys_uuid(self, m_read_system_uuid):
"""Datasource requires ability to read system_uuid and true return."""
@@ -589,8 +591,6 @@ class TestNetworkConfigFromOpcImds(test_helpers.CiTestCase):
class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestNetworkConfigFiltersNetFailover, self).setUp()
self.add_patch(DS_PATH + '.get_interfaces_by_mac',
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 71f3a49e..db8ba64c 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -6,11 +6,9 @@
import copy
import os
+import pickle
import sys
-import six
-from six.moves import cPickle as pickle
-
from cloudinit.settings import (
FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
@@ -758,7 +756,7 @@ class Modules(object):
for item in cfg_mods:
if not item:
continue
- if isinstance(item, six.string_types):
+ if isinstance(item, str):
module_list.append({
'mod': item.strip(),
})
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index e47cdeda..a00ade20 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -21,13 +21,10 @@ except (ImportError, AttributeError):
CHEETAH_AVAILABLE = False
try:
- from jinja2.runtime import implements_to_string
from jinja2 import Template as JTemplate
from jinja2 import DebugUndefined as JUndefined
JINJA_AVAILABLE = True
except (ImportError, AttributeError):
- from cloudinit.helpers import identity
- implements_to_string = identity
JINJA_AVAILABLE = False
JUndefined = object
@@ -42,7 +39,6 @@ BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/'
-@implements_to_string # Needed for python2.7. Otherwise cached super.__str__
class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 4dad2afd..f3ab7e8c 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -1,9 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
import functools
import httpretty
+import io
import logging
import os
import random
@@ -12,21 +11,10 @@ import string
import sys
import tempfile
import time
-
-import mock
-import six
-import unittest2
-from unittest2.util import strclass
-
-try:
- from contextlib import ExitStack, contextmanager
-except ImportError:
- from contextlib2 import ExitStack, contextmanager
-
-try:
- from configparser import ConfigParser
-except ImportError:
- from ConfigParser import ConfigParser
+import unittest
+from contextlib import ExitStack, contextmanager
+from unittest import mock
+from unittest.util import strclass
from cloudinit.config.schema import (
SchemaValidationError, validate_cloudconfig_schema)
@@ -40,8 +28,8 @@ from cloudinit import util
_real_subp = util.subp
# Used for skipping tests
-SkipTest = unittest2.SkipTest
-skipIf = unittest2.skipIf
+SkipTest = unittest.SkipTest
+skipIf = unittest.skipIf
# Makes the old path start
@@ -72,13 +60,13 @@ def retarget_many_wrapper(new_base, am, old_func):
# Python 3 some of these now accept file-descriptors (integers).
# That breaks rebase_path() so in lieu of a better solution, just
# don't rebase if we get a fd.
- if isinstance(path, six.string_types):
+ if isinstance(path, str):
n_args[i] = rebase_path(path, new_base)
return old_func(*n_args, **kwds)
return wrapper
-class TestCase(unittest2.TestCase):
+class TestCase(unittest.TestCase):
def reset_global_state(self):
"""Reset any global state to its original settings.
@@ -114,16 +102,6 @@ class TestCase(unittest2.TestCase):
self.addCleanup(m.stop)
setattr(self, attr, p)
- # prefer python3 read_file over readfp but allow fallback
- def parse_and_read(self, contents):
- parser = ConfigParser()
- if hasattr(parser, 'read_file'):
- parser.read_file(contents)
- elif hasattr(parser, 'readfp'):
- # pylint: disable=W1505
- parser.readfp(contents)
- return parser
-
class CiTestCase(TestCase):
"""This is the preferred test case base class unless user
@@ -149,7 +127,7 @@ class CiTestCase(TestCase):
if self.with_logs:
# Create a log handler so unit tests can search expected logs.
self.logger = logging.getLogger()
- self.logs = six.StringIO()
+ self.logs = io.StringIO()
formatter = logging.Formatter('%(levelname)s: %(message)s')
handler = logging.StreamHandler(self.logs)
handler.setFormatter(formatter)
@@ -164,9 +142,12 @@ class CiTestCase(TestCase):
if 'args' in kwargs:
cmd = kwargs['args']
else:
+ if not args:
+ raise TypeError(
+ "subp() missing 1 required positional argument: 'args'")
cmd = args[0]
- if not isinstance(cmd, six.string_types):
+ if not isinstance(cmd, str):
cmd = cmd[0]
pass_through = False
if not isinstance(self.allowed_subp, (list, bool)):
@@ -212,16 +193,6 @@ class CiTestCase(TestCase):
dir = self.tmp_dir()
return os.path.normpath(os.path.abspath(os.path.join(dir, path)))
- def sys_exit(self, code):
- """Provide a wrapper around sys.exit for python 2.6
-
- In 2.6, this code would produce 'cm.exception' with value int(2)
- rather than the SystemExit that was raised by sys.exit(2).
- with assertRaises(SystemExit) as cm:
- sys.exit(2)
- """
- raise SystemExit(code)
-
def tmp_cloud(self, distro, sys_cfg=None, metadata=None):
"""Create a cloud with tmp working directory paths.
@@ -346,8 +317,9 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
def patchOpen(self, new_root):
trap_func = retarget_many_wrapper(new_root, 1, open)
- name = 'builtins.open' if six.PY3 else '__builtin__.open'
- self.patched_funcs.enter_context(mock.patch(name, trap_func))
+ self.patched_funcs.enter_context(
+ mock.patch('builtins.open', trap_func)
+ )
def patchStdoutAndStderr(self, stdout=None, stderr=None):
if stdout is not None:
@@ -362,6 +334,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
root = self.tmp_dir()
self.patchUtils(root)
self.patchOS(root)
+ self.patchOpen(root)
return root
@contextmanager
@@ -395,7 +368,7 @@ class HttprettyTestCase(CiTestCase):
super(HttprettyTestCase, self).tearDown()
-class SchemaTestCaseMixin(unittest2.TestCase):
+class SchemaTestCaseMixin(unittest.TestCase):
def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
"""Assert the config is valid per self.schema.
@@ -420,7 +393,7 @@ def populate_dir(path, files):
p = os.path.sep.join([path, name])
util.ensure_dir(os.path.dirname(p))
with open(p, "wb") as fp:
- if isinstance(content, six.binary_type):
+ if isinstance(content, bytes):
fp.write(content)
else:
fp.write(content.encode('utf-8'))
@@ -527,13 +500,4 @@ if not hasattr(mock.Mock, 'assert_not_called'):
raise AssertionError(msg)
mock.Mock.assert_not_called = __mock_assert_not_called
-
-# older unittest2.TestCase (centos6) have only the now-deprecated
-# assertRaisesRegexp. Simple assignment makes pylint complain, about
-# users of assertRaisesRegex so we use getattr to trick it.
-# https://github.com/PyCQA/pylint/issues/1946
-if not hasattr(unittest2.TestCase, 'assertRaisesRegex'):
- unittest2.TestCase.assertRaisesRegex = (
- getattr(unittest2.TestCase, 'assertRaisesRegexp'))
-
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_conftest.py b/cloudinit/tests/test_conftest.py
new file mode 100644
index 00000000..773ef8fe
--- /dev/null
+++ b/cloudinit/tests/test_conftest.py
@@ -0,0 +1,61 @@
+import pytest
+
+from cloudinit import util
+from cloudinit.tests.helpers import CiTestCase
+
+
+class TestDisableSubpUsage:
+ """Test that the disable_subp_usage fixture behaves as expected."""
+
+ def test_using_subp_raises_assertion_error(self):
+ with pytest.raises(AssertionError):
+ util.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ # We are intentionally passing no value for a parameter, so:
+ # pylint: disable=no-value-for-parameter
+ util.subp()
+
+ @pytest.mark.parametrize('disable_subp_usage', [False], indirect=True)
+ def test_subp_usage_can_be_reenabled(self):
+ util.subp(['whoami'])
+
+ @pytest.mark.parametrize(
+ 'disable_subp_usage', [['whoami'], 'whoami'], indirect=True)
+ def test_subp_usage_can_be_conditionally_reenabled(self):
+ # The two parameters test each potential invocation with a single
+ # argument
+ with pytest.raises(AssertionError) as excinfo:
+ util.subp(["some", "args"])
+ assert "allowed: whoami" in str(excinfo.value)
+ util.subp(['whoami'])
+
+ @pytest.mark.parametrize(
+ 'disable_subp_usage', [['whoami', 'bash']], indirect=True)
+ def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self):
+ with pytest.raises(AssertionError) as excinfo:
+ util.subp(["some", "args"])
+ assert "allowed: whoami,bash" in str(excinfo.value)
+ util.subp(['bash', '-c', 'true'])
+ util.subp(['whoami'])
+
+
+class TestDisableSubpUsageInTestSubclass(CiTestCase):
+ """Test that disable_subp_usage doesn't impact CiTestCase's subp logic."""
+
+ def test_using_subp_raises_exception(self):
+ with pytest.raises(Exception):
+ util.subp(["some", "args"])
+
+ def test_typeerrors_on_incorrect_usage(self):
+ with pytest.raises(TypeError):
+ util.subp()
+
+ def test_subp_usage_can_be_reenabled(self):
+ _old_allowed_subp = self.allow_subp
+ self.allowed_subp = True
+ try:
+ util.subp(['bash', '-c', 'true'])
+ finally:
+ self.allowed_subp = _old_allowed_subp
diff --git a/cloudinit/tests/test_dhclient_hook.py b/cloudinit/tests/test_dhclient_hook.py
index 7aab8dd5..eadae81c 100644
--- a/cloudinit/tests/test_dhclient_hook.py
+++ b/cloudinit/tests/test_dhclient_hook.py
@@ -7,8 +7,8 @@ from cloudinit.tests.helpers import CiTestCase, dir2dict, populate_dir
import argparse
import json
-import mock
import os
+from unittest import mock
class TestDhclientHook(CiTestCase):
diff --git a/cloudinit/tests/test_gpg.py b/cloudinit/tests/test_gpg.py
index 0562b966..8dd57137 100644
--- a/cloudinit/tests/test_gpg.py
+++ b/cloudinit/tests/test_gpg.py
@@ -1,12 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Test gpg module."""
+from unittest import mock
+
from cloudinit import gpg
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase
-import mock
-
@mock.patch("cloudinit.gpg.time.sleep")
@mock.patch("cloudinit.gpg.util.subp")
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index 1674120f..29b39374 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,7 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.url_helper import (
- NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc)
+ NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url,
+ retry_on_url_exc)
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
from cloudinit import util
from cloudinit import version
@@ -50,6 +51,9 @@ class TestOAuthHeaders(CiTestCase):
class TestReadFileOrUrl(CiTestCase):
+
+ with_logs = True
+
def test_read_file_or_url_str_from_file(self):
"""Test that str(result.contents) on file is text version of contents.
It should not be "b'data'", but just "'data'" """
@@ -71,6 +75,34 @@ class TestReadFileOrUrl(CiTestCase):
self.assertEqual(result.contents, data)
self.assertEqual(str(result), data.decode('utf-8'))
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self):
+ """Headers are redacted from logs but unredacted in requests."""
+ url = 'http://hostname/path'
+ headers = {'sensitive': 'sekret', 'server': 'blah'}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers, headers_redact=['sensitive'])
+ logs = self.logs.getvalue()
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ self.assertIn(REDACTED, logs)
+ self.assertNotIn('sekret', logs)
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url_redacts_noheaders(self):
+ """When no headers_redact, header values are in logs and requests."""
+ url = 'http://hostname/path'
+ headers = {'sensitive': 'sekret', 'server': 'blah'}
+ httpretty.register_uri(httpretty.GET, url)
+
+ read_file_or_url(url, headers=headers)
+ for k in headers.keys():
+ self.assertEqual(headers[k], httpretty.last_request().headers[k])
+ logs = self.logs.getvalue()
+ self.assertNotIn(REDACTED, logs)
+ self.assertIn('sekret', logs)
+
@mock.patch(M_PATH + 'readurl')
def test_read_file_or_url_passes_params_to_readurl(self, m_readurl):
"""read_file_or_url passes all params through to readurl."""
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 11f37000..bfccfe1e 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -6,6 +6,7 @@ import base64
import logging
import json
import platform
+import pytest
import cloudinit.util as util
@@ -419,12 +420,6 @@ class TestGetLinuxDistro(CiTestCase):
if path == '/etc/redhat-release':
return 1
- @classmethod
- def freebsd_version_exists(self, path):
- """Side effect function """
- if path == '/bin/freebsd-version':
- return 1
-
@mock.patch('cloudinit.util.load_file')
def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
"""Verify we get the correct name if the os-release file has
@@ -443,11 +438,18 @@ class TestGetLinuxDistro(CiTestCase):
dist = util.get_linux_distro()
self.assertEqual(('ubuntu', '16.04', 'xenial'), dist)
- @mock.patch('cloudinit.util.subp')
- def test_get_linux_freebsd(self, m_subp, m_path_exists):
+ @mock.patch('platform.system')
+ @mock.patch('platform.release')
+ @mock.patch('cloudinit.util._parse_redhat_release')
+ def test_get_linux_freebsd(self, m_parse_redhat_release,
+ m_platform_release,
+ m_platform_system, m_path_exists):
"""Verify we get the correct name and release name on FreeBSD."""
- m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists
- m_subp.return_value = ("12.0-RELEASE-p10\n", '')
+ m_path_exists.return_value = False
+ m_platform_release.return_value = '12.0-RELEASE-p10'
+ m_platform_system.return_value = 'FreeBSD'
+ m_parse_redhat_release.return_value = {}
+ util.is_BSD.cache_clear()
dist = util.get_linux_distro()
self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist)
@@ -538,27 +540,36 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_no_data(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get no information if os-release does not exist"""
m_platform_dist.return_value = ('', '', '')
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_no_impl(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get an empty tuple when no information exists and
Exceptions are not propagated"""
m_platform_dist.side_effect = Exception()
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('', '', ''), dist)
+ @mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
- def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+ def test_get_linux_distro_plat_data(self, m_platform_dist,
+ m_platform_system, m_path_exists):
"""Verify we get the correct platform information"""
m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+ m_platform_system.return_value = "Linux"
m_path_exists.return_value = 0
dist = util.get_linux_distro()
self.assertEqual(('foo', '1.1', 'aarch64'), dist)
@@ -597,4 +608,99 @@ class TestIsLXD(CiTestCase):
self.assertFalse(util.is_lxd())
m_exists.assert_called_once_with('/dev/lxd/sock')
+
+class TestReadCcFromCmdline:
+
+ @pytest.mark.parametrize(
+ "cmdline,expected_cfg",
+ [
+ # Return None if cmdline has no cc:<YAML>end_cc content.
+ (CiTestCase.random_string(), None),
+ # Return None if YAML content is empty string.
+ ('foo cc: end_cc bar', None),
+ # Return expected dictionary without trailing end_cc marker.
+ ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}),
+ # Return expected dictionary w escaped newline and no end_cc.
+ ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}),
+ # Return expected dictionary of yaml between cc: and end_cc.
+ ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}),
+ # Return dict with list value w escaped newline, no end_cc.
+ (
+ 'cc: ssh_import_id: [smoser, kirkland]\\n',
+ {'ssh_import_id': ['smoser', 'kirkland']}
+ ),
+ # Parse urlencoded brackets in yaml content.
+ (
+ 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc',
+ {'ssh_import_id': ['smoser', 'kirkland']}
+ ),
+ # Parse complete urlencoded yaml content.
+ (
+ 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc',
+ {'ssh_import_id': ['user1', 'user2']}
+ ),
+ # Parse nested dictionary in yaml content.
+ (
+ 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc',
+ {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}
+ ),
+ # Parse single mapping value in yaml content.
+ ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}),
+ # Parse multiline content with multiple mapping and nested lists.
+ (
+ ('cc: ssh_import_id: [smoser, bob]\\n'
+ 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # Parse multiline encoded content w/ mappings and nested lists.
+ (
+ ('cc: ssh_import_id: %5Bsmoser, bob%5D\\n'
+ 'runcmd: [ [ ls, -l ], echo hi ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # test encoded escaped newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ ('cc: ' +
+ ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn'
+ 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
+ '%20echo%20hi%20%5D') + ' end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # test encoded newlines work.
+ #
+ # unquote(encoded_content)
+ # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]'
+ (
+ ("cc: " +
+ ('ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A'
+ 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C'
+ '%20echo%20hi%20%5D') + ' end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l'], 'echo hi']}
+ ),
+ # Parse and merge multiple yaml content sections.
+ (
+ ('cc:ssh_import_id: [smoser, bob] end_cc '
+ 'cc: runcmd: [ [ ls, -l ] ] end_cc'),
+ {'ssh_import_id': ['smoser', 'bob'],
+ 'runcmd': [['ls', '-l']]}
+ ),
+ # Parse and merge multiple encoded yaml content sections.
+ (
+ ('cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc '
+ 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc'),
+ {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}
+ ),
+ ]
+ )
+ def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline):
+ assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py
index a96c2a47..778a762c 100644
--- a/cloudinit/tests/test_version.py
+++ b/cloudinit/tests/test_version.py
@@ -1,10 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
from cloudinit.tests.helpers import CiTestCase
from cloudinit import version
-import mock
-
class TestExportsFeatures(CiTestCase):
def test_has_network_config_v1(self):
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index 6132654b..2c1ae368 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -10,29 +10,18 @@
import types
-import six
-
-if six.PY3:
- _NAME_TYPES = (
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- type,
- )
-else:
- _NAME_TYPES = (
- types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- types.ClassType,
- )
+_NAME_TYPES = (
+ types.ModuleType,
+ types.FunctionType,
+ types.LambdaType,
+ type,
+)
def obj_name(obj):
if isinstance(obj, _NAME_TYPES):
- return six.text_type(obj.__name__)
+ return str(obj.__name__)
else:
if not hasattr(obj, '__class__'):
return repr(obj)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 1496a471..f3c0cf9c 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -8,39 +8,31 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import json
import os
-import requests
-import six
import time
-
from email.utils import parsedate
from errno import ENOENT
from functools import partial
+from http.client import NOT_FOUND
from itertools import count
-from requests import exceptions
+from urllib.parse import urlparse, urlunparse, quote
-from six.moves.urllib.parse import (
- urlparse, urlunparse,
- quote as urlquote)
+import requests
+from requests import exceptions
from cloudinit import log as logging
from cloudinit import version
LOG = logging.getLogger(__name__)
-if six.PY2:
- import httplib
- NOT_FOUND = httplib.NOT_FOUND
-else:
- import http.client
- NOT_FOUND = http.client.NOT_FOUND
-
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
_REQ_VER = None
+REDACTED = 'REDACTED'
try:
from distutils.version import LooseVersion
import pkg_resources
@@ -71,7 +63,7 @@ def combine_url(base, *add_ons):
path = url_parsed[2]
if path and not path.endswith("/"):
path += "/"
- path += urlquote(str(add_on), safe="/:")
+ path += quote(str(add_on), safe="/:")
url_parsed[2] = path
return urlunparse(url_parsed)
@@ -199,9 +191,9 @@ def _get_ssl_args(url, ssl_details):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None,
- session=None, infinite=False, log_req_resp=True,
+ headers=None, headers_cb=None, headers_redact=None,
+ ssl_details=None, check_status=True, allow_redirects=True,
+ exception_cb=None, session=None, infinite=False, log_req_resp=True,
request_method=None):
"""Wrapper around requests.Session to read the url and retry if necessary
@@ -217,6 +209,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
:param headers: Optional dict of headers to send during request
:param headers_cb: Optional callable returning a dict of values to send as
headers during request
+ :param headers_redact: Optional list of header names to redact from the log
:param ssl_details: Optional dict providing key_file, ca_certs, and
cert_file keys for use on in ssl connections.
:param check_status: Optional boolean set True to raise when HTTPError
@@ -243,6 +236,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
req_args['method'] = request_method
if timeout is not None:
req_args['timeout'] = max(float(timeout), 0)
+ if headers_redact is None:
+ headers_redact = []
# It doesn't seem like config
# was added in older library versions (or newer ones either), thus we
# need to manually do the retries if it wasn't...
@@ -286,7 +281,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
for (k, v) in req_args.items():
if k == 'data':
continue
- filtered_req_args[k] = v
+ if k == 'headers' and headers_redact:
+ matched_headers = [k for k in headers_redact if v.get(k)]
+ if matched_headers:
+ filtered_req_args[k] = copy.deepcopy(v)
+ for key in matched_headers:
+ filtered_req_args[k][key] = REDACTED
+ else:
+ filtered_req_args[k] = v
try:
if log_req_resp:
@@ -339,8 +341,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
return None # Should throw before this...
-def wait_for_url(urls, max_wait=None, timeout=None,
- status_cb=None, headers_cb=None, sleep_time=1,
+def wait_for_url(urls, max_wait=None, timeout=None, status_cb=None,
+ headers_cb=None, headers_redact=None, sleep_time=1,
exception_cb=None, sleep_time_cb=None, request_method=None):
"""
urls: a list of urls to try
@@ -352,6 +354,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
status_cb: call method with string message when a url is not available
headers_cb: call method with single argument of url to get headers
for request.
+ headers_redact: a list of header names to redact from the log
exception_cb: call method with 2 arguments 'msg' (per status_cb) and
'exception', the exception that occurred.
sleep_time_cb: call method with 2 arguments (response, loop_n) that
@@ -415,8 +418,9 @@ def wait_for_url(urls, max_wait=None, timeout=None,
headers = {}
response = readurl(
- url, headers=headers, timeout=timeout,
- check_status=False, request_method=request_method)
+ url, headers=headers, headers_redact=headers_redact,
+ timeout=timeout, check_status=False,
+ request_method=request_method)
if not response.contents:
reason = "empty response [%s]" % (response.code)
url_exc = UrlError(ValueError(reason), code=response.code,
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 15af1daf..670dbee6 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -9,14 +9,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
-import six
-
from cloudinit import handlers
from cloudinit import log as logging
from cloudinit.url_helper import read_file_or_url, UrlError
@@ -28,6 +25,7 @@ LOG = logging.getLogger(__name__)
NOT_MULTIPART_TYPE = handlers.NOT_MULTIPART_TYPE
PART_FN_TPL = handlers.PART_FN_TPL
OCTET_TYPE = handlers.OCTET_TYPE
+INCLUDE_MAP = handlers.INCLUSION_TYPES_MAP
# Saves typing errors
CONTENT_TYPE = 'Content-Type'
@@ -118,7 +116,8 @@ class UserDataProcessor(object):
# Attempt to figure out the payloads content-type
if not ctype_orig:
ctype_orig = UNDEF_TYPE
- if ctype_orig in TYPE_NEEDED:
+ if ctype_orig in TYPE_NEEDED or (ctype_orig in
+ INCLUDE_MAP.values()):
ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
@@ -259,7 +258,7 @@ class UserDataProcessor(object):
# filename and type not be present
# or
# scalar(payload)
- if isinstance(ent, six.string_types):
+ if isinstance(ent, str):
ent = {'content': ent}
if not isinstance(ent, (dict)):
# TODO(harlowja) raise?
@@ -269,13 +268,13 @@ class UserDataProcessor(object):
mtype = ent.get('type')
if not mtype:
default = ARCHIVE_UNDEF_TYPE
- if isinstance(content, six.binary_type):
+ if isinstance(content, bytes):
default = ARCHIVE_UNDEF_BINARY_TYPE
mtype = handlers.type_from_starts_with(content, default)
maintype, subtype = mtype.split('/', 1)
if maintype == "text":
- if isinstance(content, six.binary_type):
+ if isinstance(content, bytes):
content = content.decode()
msg = MIMEText(content, _subtype=subtype)
else:
@@ -348,7 +347,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
msg.set_payload(data)
return msg
- if isinstance(raw_data, six.text_type):
+ if isinstance(raw_data, str):
bdata = raw_data.encode('utf-8')
else:
bdata = raw_data
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 76d7db78..985e7d20 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -15,6 +15,7 @@ import glob
import grp
import gzip
import hashlib
+import io
import json
import os
import os.path
@@ -30,34 +31,22 @@ import string
import subprocess
import sys
import time
-
-from errno import ENOENT, ENOEXEC
-
from base64 import b64decode, b64encode
-from six.moves.urllib import parse as urlparse
-
-import six
+from errno import ENOENT, ENOEXEC
+from functools import lru_cache
+from urllib import parse
from cloudinit import importer
from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import safeyaml
-from cloudinit import temp_utils
-from cloudinit import type_utils
-from cloudinit import url_helper
-from cloudinit import version
-
-from cloudinit.settings import (CFG_BUILTIN)
-
-try:
- from functools import lru_cache
-except ImportError:
- def lru_cache():
- """pass-thru replace for Python3's lru_cache()"""
- def wrapper(f):
- return f
- return wrapper
-
+from cloudinit import (
+ mergers,
+ safeyaml,
+ temp_utils,
+ type_utils,
+ url_helper,
+ version,
+)
+from cloudinit.settings import CFG_BUILTIN
_DNS_REDIRECT_IP = None
LOG = logging.getLogger(__name__)
@@ -79,14 +68,19 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
@lru_cache()
-def get_architecture(target=None):
+def get_dpkg_architecture(target=None):
+ """Return the sanitized string output by `dpkg --print-architecture`.
+
+ N.B. This function is wrapped in functools.lru_cache, so repeated calls
+ won't shell out every time.
+ """
out, _ = subp(['dpkg', '--print-architecture'], capture=True,
target=target)
return out.strip()
@lru_cache()
-def _lsb_release(target=None):
+def lsb_release(target=None):
fmap = {'Codename': 'codename', 'Description': 'description',
'Distributor ID': 'id', 'Release': 'release'}
@@ -109,19 +103,11 @@ def _lsb_release(target=None):
return data
-def lsb_release(target=None):
- if target_path(target) != "/":
- # do not use or update cache if target is provided
- return _lsb_release(target)
-
- return _lsb_release()
-
-
def target_path(target, path=None):
# return 'path' inside target, accepting target as None
if target in (None, ""):
target = "/"
- elif not isinstance(target, six.string_types):
+ elif not isinstance(target, str):
raise ValueError("Unexpected input for target: %s" % target)
else:
target = os.path.abspath(target)
@@ -141,14 +127,14 @@ def target_path(target, path=None):
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
- if isinstance(blob, six.string_types):
+ if isinstance(blob, str):
return blob
return blob.decode(encoding)
def encode_text(text, encoding='utf-8'):
# Converts a text string into a binary type using given encoding.
- if isinstance(text, six.binary_type):
+ if isinstance(text, bytes):
return text
return text.encode(encoding)
@@ -178,8 +164,7 @@ def fully_decoded_payload(part):
# bytes, first try to decode to str via CT charset, and failing that, try
# utf-8 using surrogate escapes.
cte_payload = part.get_payload(decode=True)
- if (six.PY3 and
- part.get_content_maintype() == 'text' and
+ if (part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
charset = part.get_charset()
if charset and charset.input_codec:
@@ -243,7 +228,7 @@ class ProcessExecutionError(IOError):
else:
self.description = description
- if not isinstance(exit_code, six.integer_types):
+ if not isinstance(exit_code, int):
self.exit_code = self.empty_attr
else:
self.exit_code = exit_code
@@ -284,7 +269,7 @@ class ProcessExecutionError(IOError):
"""
if data is bytes object, decode
"""
- return text.decode() if isinstance(text, six.binary_type) else text
+ return text.decode() if isinstance(text, bytes) else text
def _indent_text(self, text, indent_level=8):
"""
@@ -293,7 +278,7 @@ class ProcessExecutionError(IOError):
cr = '\n'
indent = ' ' * indent_level
# if input is bytes, return bytes
- if isinstance(text, six.binary_type):
+ if isinstance(text, bytes):
cr = cr.encode()
indent = indent.encode()
# remove any newlines at end of text first to prevent unneeded blank
@@ -325,9 +310,6 @@ class SeLinuxGuard(object):
return
path = os.path.realpath(self.path)
- # path should be a string, not unicode
- if six.PY2:
- path = str(path)
try:
stats = os.lstat(path)
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
@@ -372,7 +354,7 @@ def is_true(val, addons=None):
check_set = TRUE_STRINGS
if addons:
check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
+ if str(val).lower().strip() in check_set:
return True
return False
@@ -383,7 +365,7 @@ def is_false(val, addons=None):
check_set = FALSE_STRINGS
if addons:
check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
+ if str(val).lower().strip() in check_set:
return True
return False
@@ -400,9 +382,10 @@ def translate_bool(val, addons=None):
def rand_str(strlen=32, select_from=None):
+ r = random.SystemRandom()
if not select_from:
select_from = string.ascii_letters + string.digits
- return "".join([random.choice(select_from) for _x in range(0, strlen)])
+ return "".join([r.choice(select_from) for _x in range(0, strlen)])
def rand_dict_key(dictionary, postfix=None):
@@ -443,7 +426,7 @@ def uniq_merge_sorted(*lists):
def uniq_merge(*lists):
combined_list = []
for a_list in lists:
- if isinstance(a_list, six.string_types):
+ if isinstance(a_list, str):
a_list = a_list.strip().split(",")
# Kickout the empty ones
a_list = [a for a in a_list if len(a)]
@@ -466,7 +449,7 @@ def clean_filename(fn):
def decomp_gzip(data, quiet=True, decode=True):
try:
- buf = six.BytesIO(encode_text(data))
+ buf = io.BytesIO(encode_text(data))
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
# E1101 is https://github.com/PyCQA/pylint/issues/1444
if decode:
@@ -477,7 +460,7 @@ def decomp_gzip(data, quiet=True, decode=True):
if quiet:
return data
else:
- raise DecompressionError(six.text_type(e))
+ raise DecompressionError(str(e))
def extract_usergroup(ug_pair):
@@ -535,18 +518,9 @@ def multi_log(text, console=True, stderr=True,
log.log(log_level, text)
-def is_ipv4(instr):
- """determine if input string is a ipv4 address. return boolean."""
- toks = instr.split('.')
- if len(toks) != 4:
- return False
-
- try:
- toks = [x for x in toks if 0 <= int(x) < 256]
- except Exception:
- return False
-
- return len(toks) == 4
+@lru_cache()
+def is_BSD():
+ return 'BSD' in platform.system()
@lru_cache()
@@ -554,6 +528,16 @@ def is_FreeBSD():
return system_info()['variant'] == "freebsd"
+@lru_cache()
+def is_NetBSD():
+ return system_info()['variant'] == "netbsd"
+
+
+@lru_cache()
+def is_OpenBSD():
+ return system_info()['variant'] == "openbsd"
+
+
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
@@ -564,7 +548,7 @@ def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
val = yobj[key]
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val)
return val
@@ -627,10 +611,9 @@ def get_linux_distro():
flavor = match.groupdict()['codename']
if distro_name == 'rhel':
distro_name = 'redhat'
- elif os.path.exists('/bin/freebsd-version'):
- distro_name = 'freebsd'
- distro_version, _ = subp(['uname', '-r'])
- distro_version = distro_version.strip()
+ elif is_BSD():
+ distro_name = platform.system().lower()
+ distro_version = platform.release()
else:
dist = ('', '', '')
try:
@@ -658,7 +641,7 @@ def system_info():
'system': platform.system(),
'release': platform.release(),
'python': platform.python_version(),
- 'uname': platform.uname(),
+ 'uname': list(platform.uname()),
'dist': get_linux_distro()
}
system = info['system'].lower()
@@ -677,7 +660,7 @@ def system_info():
var = 'suse'
else:
var = 'linux'
- elif system in ('windows', 'darwin', "freebsd"):
+ elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"):
var = system
info['variant'] = var
@@ -705,7 +688,7 @@ def get_cfg_option_list(yobj, key, default=None):
if isinstance(val, (list)):
cval = [v for v in val]
return cval
- if not isinstance(val, six.string_types):
+ if not isinstance(val, str):
val = str(val)
return [val]
@@ -726,7 +709,7 @@ def get_cfg_by_path(yobj, keyp, default=None):
@return: The value of the item at keyp."
is not found."""
- if isinstance(keyp, six.string_types):
+ if isinstance(keyp, str):
keyp = keyp.split("/")
cur = yobj
for tok in keyp:
@@ -824,7 +807,7 @@ def make_url(scheme, host, port=None,
pieces.append(query or '')
pieces.append(fragment or '')
- return urlparse.urlunparse(pieces)
+ return parse.urlunparse(pieces)
def mergemanydict(srcs, reverse=False):
@@ -1033,7 +1016,7 @@ def read_conf_with_confd(cfgfile):
if "conf_d" in cfg:
confd = cfg['conf_d']
if confd:
- if not isinstance(confd, six.string_types):
+ if not isinstance(confd, str):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
(cfgfile, type_utils.obj_name(confd)))
@@ -1051,7 +1034,7 @@ def read_conf_with_confd(cfgfile):
def read_conf_from_cmdline(cmdline=None):
- # return a dictionary or config on the cmdline or None
+ # return a dictionary of config on the cmdline or None
return load_yaml(read_cc_from_cmdline(cmdline=cmdline))
@@ -1059,11 +1042,12 @@ def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
# format:
- # cc: <yaml content here> [end_cc]
+ # cc: <yaml content here|urlencoded yaml content> [end_cc]
# this would include:
# cc: ssh_import_id: [smoser, kirkland]\\n
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
+ # cc:ssh_import_id: %5Bsmoser%5D end_cc
if cmdline is None:
cmdline = get_cmdline()
@@ -1078,9 +1062,9 @@ def read_cc_from_cmdline(cmdline=None):
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
- tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
- "\n"))
-
+ tokens.append(
+ parse.unquote(
+ cmdline[begin + begin_l:end].lstrip()).replace("\\n", "\n"))
begin = cmdline.find(tag_begin, end + end_l)
return '\n'.join(tokens)
@@ -1225,7 +1209,7 @@ def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
func=is_resolvable,
- args=(urlparse.urlparse(url).hostname,))
+ args=(parse.urlparse(url).hostname,))
def search_for_mirror(candidates):
@@ -1233,9 +1217,14 @@ def search_for_mirror(candidates):
Search through a list of mirror urls for one that works
This needs to return quickly.
"""
+ if candidates is None:
+ return None
+
+ LOG.debug("search for mirror in candidates: '%s'", candidates)
for cand in candidates:
try:
if is_resolvable_url(cand):
+ LOG.debug("found working mirror: '%s'", cand)
return cand
except Exception:
pass
@@ -1256,6 +1245,67 @@ def close_stdin():
os.dup2(fp.fileno(), sys.stdin.fileno())
+def find_devs_with_freebsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ if not criteria:
+ return glob.glob("/dev/msdosfs/*") + glob.glob("/dev/iso9660/*")
+ if criteria.startswith("LABEL="):
+ label = criteria.lstrip("LABEL=")
+ devlist = [
+ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label]
+ if os.path.exists(p)]
+ elif criteria == "TYPE=vfat":
+ devlist = glob.glob("/dev/msdosfs/*")
+ elif criteria == "TYPE=iso9660":
+ devlist = glob.glob("/dev/iso9660/*")
+ return devlist
+
+
+def find_devs_with_netbsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ devlist = []
+ label = None
+ _type = None
+ if criteria:
+ if criteria.startswith("LABEL="):
+ label = criteria.lstrip("LABEL=")
+ if criteria.startswith("TYPE="):
+ _type = criteria.lstrip("TYPE=")
+ out, _err = subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ for dev in out.split():
+ if label or _type:
+ mscdlabel_out, _ = subp(['mscdlabel', dev], rcs=[0, 1])
+ if label and not ('label "%s"' % label) in mscdlabel_out:
+ continue
+ if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out:
+ continue
+ if _type == "vfat" and "ISO filesystem" in mscdlabel_out:
+ continue
+ devlist.append('/dev/' + dev)
+ return devlist
+
+
+def find_devs_with_openbsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ out, _err = subp(['sysctl', '-n', 'hw.disknames'], rcs=[0])
+ devlist = []
+ for entry in out.split(','):
+ if not entry.endswith(':'):
+ # ffs partition with a serial, not a config-drive
+ continue
+ if entry == 'fd0:':
+ continue
+ part_id = 'a' if entry.startswith('cd') else 'i'
+ devlist.append(entry[:-1] + part_id)
+ if criteria == "TYPE=iso9660":
+ devlist = [i for i in devlist if i.startswith('cd')]
+ elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
+ devlist = [i for i in devlist if not i.startswith('cd')]
+ elif criteria:
+ LOG.debug("Unexpected criteria: %s", criteria)
+ return ['/dev/' + i for i in devlist]
+
+
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
@@ -1265,6 +1315,16 @@ def find_devs_with(criteria=None, oformat='device',
LABEL=<label>
UUID=<uuid>
"""
+ if is_FreeBSD():
+ return find_devs_with_freebsd(criteria, oformat,
+ tag, no_cache, path)
+ elif is_NetBSD():
+ return find_devs_with_netbsd(criteria, oformat,
+ tag, no_cache, path)
+ elif is_OpenBSD():
+ return find_devs_with_openbsd(criteria, oformat,
+ tag, no_cache, path)
+
blk_id_cmd = ['blkid']
options = []
if criteria:
@@ -1357,7 +1417,7 @@ def uniq_list(in_list):
def load_file(fname, read_cb=None, quiet=False, decode=True):
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
- ofh = six.BytesIO()
+ ofh = io.BytesIO()
try:
with open(fname, 'rb') as ifh:
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
@@ -1806,6 +1866,7 @@ def time_rfc2822():
return ts
+@lru_cache()
def boottime():
"""Use sysctlbyname(3) via ctypes to find kern.boottime
@@ -1815,6 +1876,7 @@ def boottime():
@return boottime: float to be compatible with linux
"""
import ctypes
+ import ctypes.util
NULL_BYTES = b"\x00"
@@ -1823,7 +1885,7 @@ def boottime():
("tv_sec", ctypes.c_int64),
("tv_usec", ctypes.c_int64)
]
- libc = ctypes.CDLL('/lib/libc.so.7')
+ libc = ctypes.CDLL(ctypes.util.find_library('c'))
size = ctypes.c_size_t()
size.value = ctypes.sizeof(timeval)
buf = timeval()
@@ -2053,13 +2115,13 @@ def subp(args, data=None, rcs=None, env=None, capture=True,
# Popen converts entries in the arguments array from non-bytes to bytes.
# When locale is unset it may use ascii for that encoding which can
# cause UnicodeDecodeErrors. (LP: #1751051)
- if isinstance(args, six.binary_type):
+ if isinstance(args, bytes):
bytes_args = args
- elif isinstance(args, six.string_types):
+ elif isinstance(args, str):
bytes_args = args.encode("utf-8")
else:
bytes_args = [
- x if isinstance(x, six.binary_type) else x.encode("utf-8")
+ x if isinstance(x, bytes) else x.encode("utf-8")
for x in args]
try:
sp = subprocess.Popen(bytes_args, stdout=stdout,
@@ -2138,10 +2200,10 @@ def shellify(cmdlist, add_header=True):
if isinstance(args, (list, tuple)):
fixed = []
for f in args:
- fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
+ fixed.append("'%s'" % (str(f).replace("'", escaped)))
content = "%s%s\n" % (content, ' '.join(fixed))
cmds_made += 1
- elif isinstance(args, six.string_types):
+ elif isinstance(args, str):
content = "%s%s\n" % (content, args)
cmds_made += 1
else:
@@ -2267,7 +2329,7 @@ def expand_package_list(version_fmt, pkgs):
pkglist = []
for pkg in pkgs:
- if isinstance(pkg, six.string_types):
+ if isinstance(pkg, str):
pkglist.append(pkg)
continue
@@ -2767,7 +2829,7 @@ def read_dmi_data(key):
def message_from_string(string):
if sys.version_info[:2] < (2, 7):
- return email.message_from_file(six.StringIO(string))
+ return email.message_from_file(io.StringIO(string))
return email.message_from_string(string)
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 45234499..96682f8a 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "19.4"
+__VERSION__ = "20.2"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 99f96ea1..1bb97f83 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -2,7 +2,7 @@
# The top level settings are used as module
# and system configuration.
-{% if variant in ["freebsd"] %}
+{% if variant.endswith("bsd") %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
syslog_fix_perms: root:root
@@ -33,7 +33,7 @@ ssh_pwauth: 0
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
-{% if variant in ["freebsd"] %}
+{% if variant.endswith("bsd") %}
# This should not be required, but leave it in place until the real cause of
# not finding -any- datasources is resolved.
datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2']
@@ -55,18 +55,22 @@ network:
# The modules that run in the 'init' stage
cloud_init_modules:
- migrator
+{% if variant not in ["netbsd"] %}
- seed_random
+{% endif %}
- bootcmd
- write-files
+{% if variant not in ["netbsd"] %}
- growpart
- resizefs
-{% if variant not in ["freebsd"] %}
+{% endif %}
+{% if variant not in ["freebsd", "netbsd"] %}
- disk_setup
- mounts
{% endif %}
- set_hostname
- update_hostname
-{% if variant not in ["freebsd"] %}
+{% if variant not in ["freebsd", "netbsd"] %}
- update_etc_hosts
- ca-certs
- rsyslog
@@ -100,7 +104,7 @@ cloud_config_modules:
{% if variant in ["suse"] %}
- zypper-add-repo
{% endif %}
-{% if variant not in ["freebsd"] %}
+{% if variant not in ["freebsd", "netbsd"] %}
- ntp
{% endif %}
- timezone
@@ -121,11 +125,9 @@ cloud_final_modules:
{% if variant in ["ubuntu", "unknown"] %}
- ubuntu-drivers
{% endif %}
-{% if variant not in ["freebsd"] %}
- puppet
- chef
- mcollective
-{% endif %}
- salt-minion
- rightscale_userdata
- scripts-vendor
@@ -143,7 +145,7 @@ cloud_final_modules:
# (not accessible to handlers/transforms)
system_info:
# This will affect which distro class gets used
-{% if variant in ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %}
+{% if variant in ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu"] %}
distro: {{ variant }}
{% else %}
# Unknown/fallback distro.
@@ -158,6 +160,9 @@ system_info:
groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash
+{# SRU_BLOCKER: do not ship network renderers on Xenial, Bionic or Eoan #}
+ network:
+ renderers: ['netplan', 'eni', 'sysconfig']
# Automatically discover the best ntp_client
ntp_client: auto
# Other config here will be given to the distro class and/or path classes
@@ -226,4 +231,24 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/tcsh
+{% elif variant in ["netbsd"] %}
+ default_user:
+ name: netbsd
+ lock_passwd: True
+ gecos: NetBSD
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/sh
+{% elif variant in ["openbsd"] %}
+ default_user:
+ name: openbsd
+ lock_passwd: True
+ gecos: OpenBSD
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/ksh
+{% endif %}
+{% if variant in ["freebsd", "netbsd", "openbsd"] %}
+ network:
+ renderers: ['{{ variant }}']
{% endif %}
diff --git a/doc-requirements.txt b/doc-requirements.txt
index e8977de9..d5f921e3 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -1,5 +1,5 @@
doc8
m2r
-sphinx
+sphinx<2
sphinx_rtd_theme
pyyaml
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index ff8206f6..004894b7 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -1,3 +1,4 @@
+#cloud-config
# apt_pipelining (configure Acquire::http::Pipeline-Depth)
# Default: disables HTTP pipelining. Certain web servers, such
# as S3 do not pipeline properly (LP: #948461).
@@ -141,7 +142,7 @@ apt:
# as above, allowing to have one config for different per arch mirrors
# security is optional, if not defined it is set to the same value as primary
security:
- uri: http://security.ubuntu.com/ubuntu
+ - uri: http://security.ubuntu.com/ubuntu
# If search_dns is set for security the searched pattern is:
# <distro>-security-mirror
@@ -222,19 +223,19 @@ apt:
# This allows merging between multiple input files than a list like:
# cloud-config1
# sources:
- # s1: {'key': 'key1', 'source': 'source1'}
+ # s1: {'key': 'key1', 'source': 'source1'}
# cloud-config2
# sources:
- # s2: {'key': 'key2'}
- # s1: {'keyserver': 'foo'}
+ # s2: {'key': 'key2'}
+ # s1: {'keyserver': 'foo'}
# This would be merged to
# sources:
- # s1:
- # keyserver: foo
- # key: key1
- # source: source1
- # s2:
- # key: key2
+ # s1:
+ # keyserver: foo
+ # key: key1
+ # source: source1
+ # s2:
+ # key: key2
#
# The following examples number the subfeatures per sources entry to ease
# identification in discussions.
@@ -314,15 +315,15 @@ apt:
# As with keyid's this can be specified with or without some actual source
# content.
key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.0.10
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
+ mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
+ qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
+ 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
+ IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
+ 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
+ t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
+ uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
+ =Y2oI
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/doc/examples/cloud-config-boot-cmds.txt b/doc/examples/cloud-config-boot-cmds.txt
index 84e487a5..f9357b52 100644
--- a/doc/examples/cloud-config-boot-cmds.txt
+++ b/doc/examples/cloud-config-boot-cmds.txt
@@ -11,5 +11,5 @@
# - the INSTANCE_ID variable will be set to the current instance id.
# - you can use 'cloud-init-per' command to help only run once
bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
+ - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts
+ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
diff --git a/doc/examples/cloud-config-chef-oneiric.txt b/doc/examples/cloud-config-chef-oneiric.txt
index 75c9aeed..241fbf9b 100644
--- a/doc/examples/cloud-config-chef-oneiric.txt
+++ b/doc/examples/cloud-config-chef-oneiric.txt
@@ -13,73 +13,74 @@
# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
apt:
sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
+ source1:
+ source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.9 (GNU/Linux)
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
+ lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
+ DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
+ wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
+ EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
+ w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
+ AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
+ QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
+ Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
+ 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
+ Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
+ zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
+ DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
+ 0GLl8EkfA8uhluM=
+ =zKAm
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
- # 11.10 will fail if install_type is "gems" (LP: #960576)
- install_type: "packages"
+ # 11.10 will fail if install_type is "gems" (LP: #960576)
+ install_type: "packages"
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
+ # Chef settings
+ server_url: "https://chef.yourorg.com:4000"
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
+ # Node Name
+ # Defaults to the instance-id if not present
+ node_name: "your-node-name"
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
+ # Environment
+ # Defaults to '_default' if not present
+ environment: "production"
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
+ # Default validation name is chef-validator
+ validation_name: "yourorg-validator"
- # value of validation_cert is not used if validation_key defined,
- # but variable needs to be defined (LP: #960547)
- validation_cert: "unused"
- validation_key: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
+ # value of validation_cert is not used if validation_key defined,
+ # but variable needs to be defined (LP: #960547)
+ validation_cert: "unused"
+ validation_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ YOUR-ORGS-VALIDATION-KEY-HERE
+ -----END RSA PRIVATE KEY-----
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
+ # A run list for a first boot json
+ run_list:
+ - "recipe[apache2]"
+ - "role[db]"
+
+ # Specify a list of initial attributes used by the cookbooks
+ initial_attributes:
apache:
prefork:
maxclients: 100
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 2320e01a..39957e58 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -52,55 +52,55 @@ apt:
chef:
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json, an example (not required)
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
+ # Valid values are 'gems' and 'packages' and 'omnibus'
+ install_type: "packages"
+
+ # Boolean: run 'install_type' code even if chef-client
+ # appears already installed.
+ force_install: false
+
+ # Chef settings
+ server_url: "https://chef.yourorg.com"
+
+ # Node Name
+ # Defaults to the instance-id if not present
+ node_name: "your-node-name"
+
+ # Environment
+ # Defaults to '_default' if not present
+ environment: "production"
+
+ # Default validation name is chef-validator
+ validation_name: "yourorg-validator"
+ # if validation_cert's value is "system" then it is expected
+ # that the file already exists on the system.
+ validation_cert: |
+ -----BEGIN RSA PRIVATE KEY-----
+ YOUR-ORGS-VALIDATION-KEY-HERE
+ -----END RSA PRIVATE KEY-----
+
+ # A run list for a first boot json, an example (not required)
+ run_list:
+ - "recipe[apache2]"
+ - "role[db]"
+
+ # Specify a list of initial attributes used by the cookbooks
+ initial_attributes:
apache:
prefork:
maxclients: 100
keepalive: "off"
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.chef.io/chef/install.sh"
+ # if install_type is 'omnibus', change the url to download
+ omnibus_url: "https://www.chef.io/chef/install.sh"
- # if install_type is 'omnibus', pass pinned version string
- # to the install script
- omnibus_version: "12.3.0"
+ # if install_type is 'omnibus', pass pinned version string
+ # to the install script
+ omnibus_version: "12.3.0"
- # If encrypted data bags are used, the client needs to have a secrets file
- # configured to decrypt them
- encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret"
+ # If encrypted data bags are used, the client needs to have a secrets file
+ # configured to decrypt them
+ encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret"
# Capture all subprocess output into a logfile
# Useful for troubleshooting cloud-init issues
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 52a2476b..13bb687c 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -1,3 +1,5 @@
+#cloud-config
+
# Documentation on data sources configuration options
datasource:
# Ec2
@@ -38,10 +40,10 @@ datasource:
# these are optional, but allow you to basically provide a datasource
# right here
user-data: |
- # This is the user-data verbatim
+ # This is the user-data verbatim
meta-data:
- instance-id: i-87018aed
- local-hostname: myhost.internal
+ instance-id: i-87018aed
+ local-hostname: myhost.internal
Azure:
agent_command: [service, walinuxagent, start]
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index 89d9ff57..5c6de77e 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -1,3 +1,4 @@
+#cloud-config
# Cloud-init supports the creation of simple partition tables and file systems
# on devices.
@@ -6,47 +7,47 @@
# (Not implemented yet, but provided for future documentation)
disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: True
- overwrite: False
+ ephmeral0:
+ table_type: 'mbr'
+ layout: True
+ overwrite: False
fs_setup:
- - label: None,
- filesystem: ext3
- device: ephemeral0
- partition: auto
+ - label: None,
+ filesystem: ext3
+ device: ephemeral0
+ partition: auto
# Default disk definitions for Microsoft Azure
# ------------------------------------------
device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
- ephemeral0:
- table_type: mbr
- layout: True
- overwrite: False
+ ephemeral0:
+ table_type: mbr
+ layout: True
+ overwrite: False
fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.1
- replace_fs: ntfs
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.1
+ replace_fs: ntfs
# Data disks definitions for Microsoft Azure
# ------------------------------------------
disk_setup:
- /dev/disk/azure/scsi1/lun0:
- table_type: gpt
- layout: True
- overwrite: True
+ /dev/disk/azure/scsi1/lun0:
+ table_type: gpt
+ layout: True
+ overwrite: True
fs_setup:
- - device: /dev/disk/azure/scsi1/lun0
- partition: 1
- filesystem: ext4
+ - device: /dev/disk/azure/scsi1/lun0
+ partition: 1
+ filesystem: ext4
# Default disk definitions for SmartOS
@@ -54,17 +55,17 @@ fs_setup:
device_aliases: {'ephemeral0': '/dev/vdb'}
disk_setup:
- ephemeral0:
- table_type: mbr
- layout: False
- overwrite: False
+ ephemeral0:
+ table_type: mbr
+ layout: False
+ overwrite: False
fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.0
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.0
-# Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
+# Caveat for SmartOS: if ephemeral disk is not defined, then the disk will
# not be automatically added to the mounts.
@@ -77,87 +78,87 @@ fs_setup:
# The disk_setup directive instructs Cloud-init to partition a disk. The format is:
disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: 'auto'
- /dev/xvdh:
- table_type: 'mbr'
- layout:
- - 33
- - [33, 82]
- - 33
- overwrite: True
+ ephmeral0:
+ table_type: 'mbr'
+ layout: 'auto'
+ /dev/xvdh:
+ table_type: 'mbr'
+ layout:
+ - 33
+ - [33, 82]
+ - 33
+ overwrite: True
# The format is a list of dicts of dicts. The first value is the name of the
# device and the subsequent values define how to create and layout the
# partition.
# The general format is:
-# disk_setup:
-# <DEVICE>:
-# table_type: 'mbr'
-# layout: <LAYOUT|BOOL>
-# overwrite: <BOOL>
+# disk_setup:
+# <DEVICE>:
+# table_type: 'mbr'
+# layout: <LAYOUT|BOOL>
+# overwrite: <BOOL>
#
# Where:
-# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
-# values which are specific to the cloud. For these devices
-# Cloud-init will look up what the real devices is and then
-# use it.
+# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
+# values which are specific to the cloud. For these devices
+# Cloud-init will look up what the real devices is and then
+# use it.
#
-# For other devices, the kernel device name is used. At this
-# time only simply kernel devices are supported, meaning
-# that device mapper and other targets may not work.
+# For other devices, the kernel device name is used. At this
+# time only simply kernel devices are supported, meaning
+# that device mapper and other targets may not work.
#
-# Note: At this time, there is no handling or setup of
-# device mapper targets.
+# Note: At this time, there is no handling or setup of
+# device mapper targets.
#
-# table_type=<TYPE>: Currently the following are supported:
-# 'mbr': default and setups a MS-DOS partition table
-# 'gpt': setups a GPT partition table
+# table_type=<TYPE>: Currently the following are supported:
+# 'mbr': default and setups a MS-DOS partition table
+# 'gpt': setups a GPT partition table
#
-# Note: At this time only 'mbr' and 'gpt' partition tables
-# are allowed. It is anticipated in the future that
-# we'll also have "RAID" to create a mdadm RAID.
+# Note: At this time only 'mbr' and 'gpt' partition tables
+# are allowed. It is anticipated in the future that
+# we'll also have "RAID" to create a mdadm RAID.
#
-# layout={...}: The device layout. This is a list of values, with the
-# percentage of disk that partition will take.
-# Valid options are:
-# [<SIZE>, [<SIZE>, <PART_TYPE]]
+# layout={...}: The device layout. This is a list of values, with the
+# percentage of disk that partition will take.
+# Valid options are:
+# [<SIZE>, [<SIZE>, <PART_TYPE]]
#
-# Where <SIZE> is the _percentage_ of the disk to use, while
-# <PART_TYPE> is the numerical value of the partition type.
+# Where <SIZE> is the _percentage_ of the disk to use, while
+# <PART_TYPE> is the numerical value of the partition type.
#
-# The following setups two partitions, with the first
-# partition having a swap label, taking 1/3 of the disk space
-# and the remainder being used as the second partition.
-# /dev/xvdh':
-# table_type: 'mbr'
-# layout:
-# - [33,82]
-# - 66
-# overwrite: True
+# The following setups two partitions, with the first
+# partition having a swap label, taking 1/3 of the disk space
+# and the remainder being used as the second partition.
+# /dev/xvdh':
+# table_type: 'mbr'
+# layout:
+# - [33,82]
+# - 66
+# overwrite: True
#
-# When layout is "true" it means single partition the entire
-# device.
+# When layout is "true" it means single partition the entire
+# device.
#
-# When layout is "false" it means don't partition or ignore
-# existing partitioning.
+# When layout is "false" it means don't partition or ignore
+# existing partitioning.
#
-# If layout is set to "true" and overwrite is set to "false",
-# it will skip partitioning the device without a failure.
+# If layout is set to "true" and overwrite is set to "false",
+# it will skip partitioning the device without a failure.
#
-# overwrite=<BOOL>: This describes whether to ride with saftey's on and
-# everything holstered.
+# overwrite=<BOOL>: This describes whether to ride with saftey's on and
+# everything holstered.
#
-# 'false' is the default, which means that:
-# 1. The device will be checked for a partition table
-# 2. The device will be checked for a file system
-# 3. If either a partition of file system is found, then
-# the operation will be _skipped_.
+# 'false' is the default, which means that:
+# 1. The device will be checked for a partition table
+# 2. The device will be checked for a file system
+# 3. If either a partition of file system is found, then
+# the operation will be _skipped_.
#
-# 'true' is cowboy mode. There are no checks and things are
-# done blindly. USE with caution, you can do things you
-# really, really don't want to do.
+# 'true' is cowboy mode. There are no checks and things are
+# done blindly. USE with caution, you can do things you
+# really, really don't want to do.
#
#
# fs_setup: Setup the file system
@@ -166,101 +167,101 @@ disk_setup:
# fs_setup describes the how the file systems are supposed to look.
fs_setup:
- - label: ephemeral0
- filesystem: 'ext3'
- device: 'ephemeral0'
- partition: 'auto'
- - label: mylabl2
- filesystem: 'ext4'
- device: '/dev/xvda1'
- - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
- label: mylabl3
- filesystem: 'btrfs'
- device: '/dev/xvdh'
+ - label: ephemeral0
+ filesystem: 'ext3'
+ device: 'ephemeral0'
+ partition: 'auto'
+ - label: mylabl2
+ filesystem: 'ext4'
+ device: '/dev/xvda1'
+ - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
+ label: mylabl3
+ filesystem: 'btrfs'
+ device: '/dev/xvdh'
# The general format is:
-# fs_setup:
-# - label: <LABEL>
-# filesystem: <FS_TYPE>
-# device: <DEVICE>
-# partition: <PART_VALUE>
-# overwrite: <OVERWRITE>
-# replace_fs: <FS_TYPE>
+# fs_setup:
+# - label: <LABEL>
+# filesystem: <FS_TYPE>
+# device: <DEVICE>
+# partition: <PART_VALUE>
+# overwrite: <OVERWRITE>
+# replace_fs: <FS_TYPE>
#
# Where:
-# <LABEL>: The file system label to be used. If set to None, no label is
-# used.
+# <LABEL>: The file system label to be used. If set to None, no label is
+# used.
#
-# <FS_TYPE>: The file system type. It is assumed that the there
-# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
-# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
-# and vfat by default.
+# <FS_TYPE>: The file system type. It is assumed that the there
+# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
+# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
+# and vfat by default.
#
-# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
-# are allowed and the actual device is acquired from the cloud datasource.
-# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
-# label as 'ephemeralX' otherwise there may be issues with the mounting
-# of the ephemeral storage layer.
+# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
+# are allowed and the actual device is acquired from the cloud datasource.
+# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
+# label as 'ephemeralX' otherwise there may be issues with the mounting
+# of the ephemeral storage layer.
#
-# If you define the device as 'ephemeralX.Y' then Y will be interpetted
-# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
+# If you define the device as 'ephemeralX.Y' then Y will be interpetted
+# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
#
-# <PART_VALUE>:
-# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
+# <PART_VALUE>:
+# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
#
-# The valid options are:
-# "auto|any": tell cloud-init not to care whether there is a partition
-# or not. Auto will use the first partition that does not contain a
-# file system already. In the absence of a partition table, it will
-# put it directly on the disk.
+# The valid options are:
+# "auto|any": tell cloud-init not to care whether there is a partition
+# or not. Auto will use the first partition that does not contain a
+# file system already. In the absence of a partition table, it will
+# put it directly on the disk.
#
-# "auto": If a file system that matches the specification in terms of
-# label, type and device, then cloud-init will skip the creation of
-# the file system.
+# "auto": If a file system that matches the specification in terms of
+# label, type and device, then cloud-init will skip the creation of
+# the file system.
#
-# "any": If a file system that matches the file system type and device,
-# then cloud-init will skip the creation of the file system.
+# "any": If a file system that matches the file system type and device,
+# then cloud-init will skip the creation of the file system.
#
-# Devices are selected based on first-detected, starting with partitions
-# and then the raw disk. Consider the following:
-# NAME FSTYPE LABEL
-# xvdb
-# |-xvdb1 ext4
-# |-xvdb2
-# |-xvdb3 btrfs test
-# \-xvdb4 ext4 test
+# Devices are selected based on first-detected, starting with partitions
+# and then the raw disk. Consider the following:
+# NAME FSTYPE LABEL
+# xvdb
+# |-xvdb1 ext4
+# |-xvdb2
+# |-xvdb3 btrfs test
+# \-xvdb4 ext4 test
#
-# If you ask for 'auto', label of 'test, and file system of 'ext4'
-# then cloud-init will select the 2nd partition, even though there
-# is a partition match at the 4th partition.
+# If you ask for 'auto', label of 'test, and file system of 'ext4'
+# then cloud-init will select the 2nd partition, even though there
+# is a partition match at the 4th partition.
#
-# If you ask for 'any' and a label of 'test', then cloud-init will
-# select the 1st partition.
+# If you ask for 'any' and a label of 'test', then cloud-init will
+# select the 1st partition.
#
-# If you ask for 'auto' and don't define label, then cloud-init will
-# select the 1st partition.
+# If you ask for 'auto' and don't define label, then cloud-init will
+# select the 1st partition.
#
-# In general, if you have a specific partition configuration in mind,
-# you should define either the device or the partition number. 'auto'
-# and 'any' are specifically intended for formating ephemeral storage or
-# for simple schemes.
+# In general, if you have a specific partition configuration in mind,
+# you should define either the device or the partition number. 'auto'
+# and 'any' are specifically intended for formating ephemeral storage or
+# for simple schemes.
#
-# "none": Put the file system directly on the device.
+# "none": Put the file system directly on the device.
#
-# <NUM>: where NUM is the actual partition number.
+# <NUM>: where NUM is the actual partition number.
#
-# <OVERWRITE>: Defines whether or not to overwrite any existing
-# filesystem.
+# <OVERWRITE>: Defines whether or not to overwrite any existing
+# filesystem.
#
-# "true": Indiscriminately destroy any pre-existing file system. Use at
-# your own peril.
+# "true": Indiscriminately destroy any pre-existing file system. Use at
+# your own peril.
#
-# "false": If an existing file system exists, skip the creation.
+# "false": If an existing file system exists, skip the creation.
#
-# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
-# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
-# unless you define a label, this requires the use of the 'any' partition
-# directive.
+# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
+# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
+# unless you define a label, this requires the use of the 'any' partition
+# directive.
#
# Behavior Caveat: The default behavior is to _check_ if the file system exists.
-# If a file system matches the specification, then the operation is a no-op.
+# If a file system matches the specification, then the operation is a no-op.
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index d7ff8ef8..88be57ce 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -1,3 +1,4 @@
+#cloud-config
# Landscape-client configuration
#
# Anything under the top 'landscape: client' entry
diff --git a/doc/examples/cloud-config-mcollective.txt b/doc/examples/cloud-config-mcollective.txt
index 67735682..a701616a 100644
--- a/doc/examples/cloud-config-mcollective.txt
+++ b/doc/examples/cloud-config-mcollective.txt
@@ -5,45 +5,45 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
mcollective:
- # Every key present in the conf object will be added to server.cfg:
- # key: value
- #
- # For example the configuration below will have the following key
- # added to server.cfg:
- # plugin.stomp.host: dbhost
- conf:
- plugin.stomp.host: dbhost
- # This will add ssl certs to mcollective
- # WARNING WARNING WARNING
- # The ec2 metadata service is a network service, and thus is readable
- # by non-root users on the system (ie: 'ec2metadata --user-data')
- # If you want security for this, please use include-once + SSL urls
- public-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
- private-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
+ # Every key present in the conf object will be added to server.cfg:
+ # key: value
+ #
+ # For example the configuration below will have the following key
+ # added to server.cfg:
+ # plugin.stomp.host: dbhost
+ conf:
+ plugin.stomp.host: dbhost
+ # This will add ssl certs to mcollective
+ # WARNING WARNING WARNING
+ # The ec2 metadata service is a network service, and thus is readable
+ # by non-root users on the system (ie: 'ec2metadata --user-data')
+ # If you want security for this, please use include-once + SSL urls
+ public-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+ private-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
index 5a6c24f5..43f80ec9 100644
--- a/doc/examples/cloud-config-mount-points.txt
+++ b/doc/examples/cloud-config-mount-points.txt
@@ -34,13 +34,13 @@ mounts:
# mount_default_fields
# These values are used to fill in any entries in 'mounts' that are not
-# complete. This must be an array, and must have 7 fields.
+# complete. This must be an array, and must have 6 fields.
mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
# swap can also be set up by the 'mounts' module
# default is to not create any swap files, because 'size' is set to 0
swap:
- filename: /swap.img
- size: "auto" # or size in bytes
- maxsize: size in bytes
+ filename: /swap.img
+ size: "auto" # or size in bytes
+ maxsize: size in bytes
diff --git a/doc/examples/cloud-config-phone-home.txt b/doc/examples/cloud-config-phone-home.txt
index 7f2b69f7..b30c14e3 100644
--- a/doc/examples/cloud-config-phone-home.txt
+++ b/doc/examples/cloud-config-phone-home.txt
@@ -5,10 +5,10 @@
# url
# default: none
# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
+# url: http://my.foo.bar/$INSTANCE/
+# post: all
+# tries: 10
#
phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
index b470153d..9cd56814 100644
--- a/doc/examples/cloud-config-power-state.txt
+++ b/doc/examples/cloud-config-power-state.txt
@@ -33,8 +33,8 @@
# for future use.
#
power_state:
- delay: "+30"
- mode: poweroff
- message: Bye Bye
- timeout: 30
- condition: True
+ delay: "+30"
+ mode: poweroff
+ message: Bye Bye
+ timeout: 30
+ condition: True
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
index cd3c2f8e..3c7e2da7 100644
--- a/doc/examples/cloud-config-puppet.txt
+++ b/doc/examples/cloud-config-puppet.txt
@@ -5,47 +5,47 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetmaster.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
+ # Every key present in the conf object will be added to puppet.conf:
+ # [name]
+ # subkey=value
+ #
+ # For example the configuration below will have the following section
+ # added to puppet.conf:
+ # [puppetd]
+ # server=puppetmaster.example.org
+ # certname=i-0123456.ip-X-Y-Z.cloud.internal
+ #
+ # The puppmaster ca certificate will be available in
+ # /var/lib/puppet/ssl/certs/ca.pem
+ conf:
+ agent:
+ server: "puppetmaster.example.org"
+ # certname supports substitutions at runtime:
+ # %i: instanceid
+ # Example: i-0123456
+ # %f: fqdn of the machine
+ # Example: ip-X-Y-Z.cloud.internal
+ #
+ # NB: the certname will automatically be lowercased as required by puppet
+ certname: "%i.%f"
+ # ca_cert is a special case. It won't be added to puppet.conf.
+ # It holds the puppetmaster certificate in pem format.
+ # It should be a multi-line string (using the | yaml notation for
+ # multi-line strings).
+ # The puppetmaster certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ #
+ ca_cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt
index ee00078f..80bde303 100644
--- a/doc/examples/cloud-config-reporting.txt
+++ b/doc/examples/cloud-config-reporting.txt
@@ -4,14 +4,14 @@
## A 'webhook' and a 'log' type.
## It also disables the built in default 'log'
reporting:
- smtest:
- type: webhook
- endpoint: "http://myhost:8000/"
- consumer_key: "ckey_foo"
- consumer_secret: "csecret_foo"
- token_key: "tkey_foo"
- token_secret: "tkey_foo"
- smlogger:
- type: log
- level: WARN
- log: null
+ smtest:
+ type: webhook
+ endpoint: "http://myhost:8000/"
+ consumer_key: "ckey_foo"
+ consumer_secret: "csecret_foo"
+ token_key: "tkey_foo"
+ token_secret: "tkey_foo"
+ smlogger:
+ type: log
+ level: WARN
+ log: null
diff --git a/doc/examples/cloud-config-rh_subscription.txt b/doc/examples/cloud-config-rh_subscription.txt
index be121338..5cc903a2 100644
--- a/doc/examples/cloud-config-rh_subscription.txt
+++ b/doc/examples/cloud-config-rh_subscription.txt
@@ -14,36 +14,36 @@
# /etc/rhsm/rhs.conf file
rh_subscription:
- username: joe@foo.bar
+ username: joe@foo.bar
- ## Quote your password if it has symbols to be safe
- password: '1234abcd'
+ ## Quote your password if it has symbols to be safe
+ password: '1234abcd'
- ## If you prefer, you can use the activation key and
- ## org instead of username and password. Be sure to
- ## comment out username and password
+ ## If you prefer, you can use the activation key and
+ ## org instead of username and password. Be sure to
+ ## comment out username and password
- #activation-key: foobar
- #org: 12345
+ #activation-key: foobar
+ #org: 12345
- ## Uncomment to auto-attach subscriptions to your system
- #auto-attach: True
+ ## Uncomment to auto-attach subscriptions to your system
+ #auto-attach: True
- ## Uncomment to set the service level for your
- ## subscriptions
- #service-level: self-support
+ ## Uncomment to set the service level for your
+ ## subscriptions
+ #service-level: self-support
- ## Uncomment to add pools (needs to be a list of IDs)
- #add-pool: []
+ ## Uncomment to add pools (needs to be a list of IDs)
+ #add-pool: []
- ## Uncomment to add or remove yum repos
- ## (needs to be a list of repo IDs)
- #enable-repo: []
- #disable-repo: []
+ ## Uncomment to add or remove yum repos
+ ## (needs to be a list of repo IDs)
+ #enable-repo: []
+ #disable-repo: []
- ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
- #rhsm-baseurl: http://url
+ ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
+ #rhsm-baseurl: http://url
- ## Uncomment to alter the server hostname in
- ## /etc/rhsm/rhsm.conf
- #server-hostname: foo.bar.com
+ ## Uncomment to alter the server hostname in
+ ## /etc/rhsm/rhsm.conf
+ #server-hostname: foo.bar.com
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
index 28ea1f16..d28dd38e 100644
--- a/doc/examples/cloud-config-rsyslog.txt
+++ b/doc/examples/cloud-config-rsyslog.txt
@@ -1,3 +1,4 @@
+#cloud-config
## the rsyslog module allows you to configure the systems syslog.
## configuration of syslog is under the top level cloud-config
## entry 'rsyslog'.
@@ -5,22 +6,22 @@
## Example:
#cloud-config
rsyslog:
- remotes:
- # udp to host 'maas.mydomain' port 514
- maashost: maas.mydomain
- # udp to ipv4 host on port 514
- maas: "@[10.5.1.56]:514"
- # tcp to host ipv6 host on port 555
- maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- config_dir: /etc/rsyslog.d
- config_filename: 20-cloud-config.conf
- service_reload_command: [your, syslog, reload, command]
+ remotes:
+ # udp to host 'maas.mydomain' port 514
+ maashost: maas.mydomain
+ # udp to ipv4 host on port 514
+ maas: "@[10.5.1.56]:514"
+ # tcp to host ipv6 host on port 555
+ maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ config_dir: /etc/rsyslog.d
+ config_filename: 20-cloud-config.conf
+ service_reload_command: [your, syslog, reload, command]
## Additionally the following legacy format is supported
## it is converted into the format above before use.
@@ -28,11 +29,11 @@ rsyslog:
## rsyslog_dir -> rsyslog/config_dir
## rsyslog -> rsyslog/configs
# rsyslog:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
+# - "*.* @@192.158.1.1"
+# - content: "*.* @@192.0.2.1:10514"
+# filename: 01-example.conf
+# - content: |
+# *.* @@syslogd.example.com
# rsyslog_filename: 20-cloud-config.conf
# rsyslog_dir: /etc/rsyslog.d
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index f588bfbc..aa14a6dd 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -84,7 +84,7 @@ users:
# use <default_username> instead. This option only disables cloud
# provided public-keys. An error will be raised if ssh_authorized_keys
# or ssh_import_id is provided for the same user.
-#
+#
# ssh_authorized_keys.
# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule
# strings or False to explicitly deny sudo usage. Examples:
@@ -120,10 +120,10 @@ users:
# to have the 'ubuntu' user in addition to other users, you need to instruct
# cloud-init that you also want the default user. To do this use the following
# syntax:
-# users:
-# - default
-# - bob
-# - ....
+# users:
+# - default
+# - bob
+# - ....
# foobar: ...
#
# users[0] (the first user in users) overrides the user directive.
@@ -131,10 +131,10 @@ users:
# The 'default' user above references the distro's config:
# system_info:
# default_user:
-# name: Ubuntu
-# plain_text_passwd: 'ubuntu'
-# home: /home/ubuntu
-# shell: /bin/bash
-# lock_passwd: True
-# gecos: Ubuntu
-# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
+# name: Ubuntu
+# plain_text_passwd: 'ubuntu'
+# home: /home/ubuntu
+# shell: /bin/bash
+# lock_passwd: True
+# gecos: Ubuntu
+# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
index 7f90847b..920d12e8 100644
--- a/doc/examples/cloud-config-vendor-data.txt
+++ b/doc/examples/cloud-config-vendor-data.txt
@@ -7,8 +7,8 @@
# vendordata. Users of the end system are given ultimate control.
#
vendor_data:
- enabled: True
- prefix: /usr/bin/ltrace
+ enabled: True
+ prefix: /usr/bin/ltrace
# enabled: whether it is enabled or not
# prefix: the command to run before any vendor scripts.
diff --git a/doc/examples/cloud-config-write-files.txt b/doc/examples/cloud-config-write-files.txt
index ec98bc93..6c67c503 100644
--- a/doc/examples/cloud-config-write-files.txt
+++ b/doc/examples/cloud-config-write-files.txt
@@ -8,26 +8,26 @@
#
# Note: Content strings here are truncated for example purposes.
write_files:
-- encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
-- content: |
- # My new /etc/sysconfig/samba file
+- encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+- content: |
+ # My new /etc/sysconfig/samba file
- SMBDOPTIONS="-D"
- path: /etc/sysconfig/samba
-- content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- ....
- path: /bin/arch
- permissions: '0555'
-- encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /usr/bin/hello
- permissions: '0755'
+ SMBDOPTIONS="-D"
+ path: /etc/sysconfig/samba
+- content: !!binary |
+ f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
+ AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
+ AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
+ ....
+ path: /bin/arch
+ permissions: '0555'
+- encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt
index ab2c031e..e8f2bbb4 100644
--- a/doc/examples/cloud-config-yum-repo.txt
+++ b/doc/examples/cloud-config-yum-repo.txt
@@ -6,15 +6,15 @@
# The following example adds the file /etc/yum.repos.d/epel_testing.repo
# which can then subsequently be used by yum for later operations.
yum_repos:
- # The name of the repository
- epel-testing:
- # Any repository configuration options
- # See: man yum.conf
- #
- # This one is required!
- baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
- enabled: false
- failovermethod: priority
- gpgcheck: true
- gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
- name: Extra Packages for Enterprise Linux 5 - Testing
+ # The name of the repository
+ epel-testing:
+ # Any repository configuration options
+ # See: man yum.conf
+ #
+ # This one is required!
+ baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
+ enabled: false
+ failovermethod: priority
+ gpgcheck: true
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
+ name: Extra Packages for Enterprise Linux 5 - Testing
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index eb84dcf5..20a0ce0d 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -192,8 +192,8 @@ cloud_config_modules:
# ssh_import_id: [ user1, user2 ]
# ssh_import_id will feed the list in that variable to
-# ssh-import-id, so that public keys stored in launchpad
-# can easily be imported into the configured user
+# ssh-import-id, so that public keys stored in launchpad
+# can easily be imported into the configured user
# This can be a single string ('smoser') or a list ([smoser, kirkland])
ssh_import_id: [smoser]
@@ -202,14 +202,15 @@ ssh_import_id: [smoser]
# See debconf-set-selections man page.
#
# Default: none
-#
-debconf_selections: | # Need to preserve newlines
- # Force debconf priority to critical.
- debconf debconf/priority select critical
+#
+debconf_selections:
+ # Force debconf priority to critical.
+ set1: debconf debconf/priority select critical
- # Override default frontend to readline, but allow user to select.
- debconf debconf/frontend select readline
- debconf debconf/frontend seen false
+ # Override default frontend to readline, but allow user to select.
+ set2: |
+ debconf debconf/frontend select readline
+ debconf debconf/frontend seen false
# manage byobu defaults
# byobu_by_default:
@@ -375,11 +376,11 @@ final_message: "The system is finally up, after $UPTIME seconds"
# the special entry "&1" for an error means "same location as stdout"
# (Note, that '&1' has meaning in yaml, so it must be quoted)
output:
- init: "> /var/log/my-cloud-init.log"
- config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
- final:
- output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
- error: "&1"
+ init: "> /var/log/my-cloud-init.log"
+ config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
+ final:
+ output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
+ error: "&1"
# phone_home: if this dictionary is present, then the phone_home
@@ -392,8 +393,8 @@ output:
# tries: 10
#
phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
# timezone: set the timezone for this instance
# the value of 'timezone' must exist in /usr/share/zoneinfo
@@ -407,7 +408,7 @@ timezone: US/Eastern
# then 'L' will be initially created with root:root ownership (during
# cloud-init), and then at cloud-config time (when syslog is available)
# the syslog daemon will be unable to write to the file.
-#
+#
# to remedy this situation, 'def_log_file' can be set to a filename
# and syslog_fix_perms to a string containing "<user>:<group>"
# if syslog_fix_perms is a list, it will iterate through and use the
@@ -446,11 +447,11 @@ syslog_fix_perms: syslog:root
# to set hashed password, here account 'user3' has a password it set to
# 'cloud-init', hashed with SHA-256:
# chpasswd:
-# list: |
-# user1:password1
-# user2:RANDOM
-# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
-# expire: True
+# list: |
+# user1:password1
+# user2:RANDOM
+# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
+# expire: True
# ssh_pwauth: [ True, False, "" or "unchanged" ]
#
# Hashed passwords can be generated in multiple ways, example with python3:
@@ -510,7 +511,7 @@ manual_cache_clean: False
# power_state can be used to make the system shutdown, reboot or
# halt after boot is finished. This same thing can be acheived by
# user-data scripts or by runcmd by simply invoking 'shutdown'.
-#
+#
# Doing it this way ensures that cloud-init is entirely finished with
# modules that would be executed, and avoids any error/log messages
# that may go to the console as a result of system services like
@@ -521,6 +522,6 @@ manual_cache_clean: False
# mode: required. must be one of 'poweroff', 'halt', 'reboot'
# message: provided as the message argument to 'shutdown'. default is none.
power_state:
- delay: 30
- mode: poweroff
- message: Bye Bye
+ delay: 30
+ mode: poweroff
+ message: Bye Bye
diff --git a/doc/examples/kernel-cmdline.txt b/doc/examples/kernel-cmdline.txt
index f043baef..805bc3d3 100644
--- a/doc/examples/kernel-cmdline.txt
+++ b/doc/examples/kernel-cmdline.txt
@@ -3,16 +3,19 @@ configuration that comes from the kernel command line has higher priority
than configuration in /etc/cloud/cloud.cfg
The format is:
- cc: <yaml content here> [end_cc]
+ cc: <yaml content here|URL encoded yaml content> [end_cc]
cloud-config will consider any content after 'cc:' to be cloud-config
data. If an 'end_cc' string is present, then it will stop reading there.
otherwise it considers everthing after 'cc:' to be cloud-config content.
-In order to allow carriage returns, you must enter '\\n', literally,
+In order to allow carriage returns, you must enter '\\n', literally,
on the command line two backslashes followed by a letter 'n'.
+The yaml content may also be URL encoded (urllib.parse.quote()).
+
Here are some examples:
- root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
- root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
- cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
+ root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
+ root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
+ cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
+ cc:ssh_import_id: %5Bsmoser%5D end_cc cc:runcmd: %5B %5B ls, -l %5D %5D end_cc root=/dev/sda1
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 86441986..684822c2 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -18,7 +18,7 @@ from cloudinit.config.schema import get_schema_doc
# General information about the project.
project = 'cloud-init'
-copyright = '2019, Canonical Ltd.'
+copyright = '2020, Canonical Ltd.'
# -- General configuration ----------------------------------------------------
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 5d90c131..0015e35a 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -68,6 +68,7 @@ Having trouble? We would like to help!
:caption: Development
topics/hacking.rst
+ topics/code_review.rst
topics/security.rst
topics/debugging.rst
topics/logging.rst
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 3f215b1b..84490460 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,8 +14,8 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions and
-FreeBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
+and OpenBSD:
- Ubuntu
- SLES/openSUSE
@@ -25,6 +25,8 @@ FreeBSD:
- Debian
- ArchLinux
- FreeBSD
+- NetBSD
+- OpenBSD
Clouds
======
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index d846867b..4e79c958 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -129,7 +129,7 @@ Config
+---------+--------+----------------------------------------------------------+
This stage runs config modules only. Modules that do not really have an
-effect on other stages of boot are run here.
+effect on other stages of boot are run here, including ``runcmd``.
Final
=====
@@ -150,7 +150,7 @@ Things that run here include
* package installations
* configuration management plugins (puppet, chef, salt-minion)
- * user-scripts (including ``runcmd``).
+ * user-scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
finished, the ``cloud-init status`` subcommand can help block external
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
new file mode 100644
index 00000000..68c10405
--- /dev/null
+++ b/doc/rtd/topics/code_review.rst
@@ -0,0 +1,256 @@
+*******************
+Code Review Process
+*******************
+
+In order to manage incoming pull requests effectively, and provide
+timely feedback and/or acceptance this document serves as a guideline
+for the review process and outlines the expectations for those
+submitting code to the project as well as those reviewing the code.
+Code is reviewed for acceptance by at least one core team member (later
+referred to as committers), but comments and suggestions from others
+are encouraged and welcome.
+
+The process is intended to provide timely and actionable feedback for
+any submission.
+
+Asking For Help
+===============
+
+cloud-init contributors, potential contributors, community members and
+users are encouraged to ask for any help that they need. If you have
+questions about the code review process, or at any point during the
+code review process, these are the available avenues:
+
+* if you have an open Pull Request, comment on that pull request
+* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+ away
+* send an email to the cloud-init mailing list,
+ cloud-init@lists.launchpad.net
+
+These are listed in rough order of preference, but use whichever of
+them you are most comfortable with.
+
+Goals
+=====
+
+This process has the following goals:
+
+* Ensure code reviews occur in a timely fashion and provide actionable
+ feedback if changes are desired.
+* Ensure the minimization of ancillary problems to increase the
+ efficiency for those reviewing the submitted code
+
+Role Definitions
+================
+
+Any code review process will have (at least) two involved parties. For
+our purposes, these parties are referred to as **Proposer** and
+**Reviewer**. (We also have the **Committer** role which is a special
+case of the **Reviewer** role.) The terms are defined here (and the
+use of the singular form is not meant to imply that they refer to a
+single person):
+
+Proposer
+ The person proposing a pull request (hereafter known as a PR).
+
+Reviewer
+ A person who is reviewing a PR.
+
+Committer
+ A cloud-init core developer (i.e. a person who has permission to
+ merge PRs into master).
+
+Prerequisites For Landing Pull Requests
+=======================================
+
+Before a PR can be landed into master, the following conditions *must*
+be met:
+
+* the CLA has been signed by the **Proposer** (or is covered by an
+ entity-level CLA signature)
+* all required status checks are passing
+* at least one "Approve" review from a **Committer**
+* no "Request changes" reviews from any **Committer**
+
+The following conditions *should* be met:
+
+* any Python functions/methods/classes have docstrings added/updated
+* any changes to config module behaviour are captured in the
+ documentation of the config module
+* any Python code added has corresponding unit tests
+* no "Request changes" reviews from any **Reviewer**
+
+These conditions can be relaxed at the discretion of the
+**Committers** on a case-by-case basis. Generally, for accountability,
+this should not be the decision of a single **Committer**, and the
+decision should be documented in comments on the PR.
+
+(To take a specific example, the ``cc_phone_home`` module had no tests
+at the time `PR #237
+<https://github.com/canonical/cloud-init/pull/237>`_ was submitted, so
+the **Proposer** was not expected to write a full set of tests for
+their minor modification, but they were expected to update the config
+module docs.)
+
+Non-Committer Reviews
+=====================
+
+Reviews from non-**Committers** are *always* welcome. Please feel
+empowered to review PRs and leave your thoughts and comments on any
+submitted PRs, regardless of the **Proposer**.
+
+Much of the below process is written in terms of the **Committers**.
+This is not intended to reflect that reviews should only come from that
+group, but acknowledges that we are ultimately responsible for
+maintaining the standards of the codebase. It would be entirely
+reasonable (and very welcome) for a **Reviewer** to only examine part
+of a PR, but it would not be appropriate for a **Committer** to merge a
+PR without full scrutiny.
+
+Opening Phase
+=============
+
+In this phase, the **Proposer** is responsible for opening a pull
+request and meeting the prerequisites laid out above.
+
+If they need help understanding the prerequisites, or help meeting the
+prerequisites, then they can (and should!) ask for help. See the
+:ref:`Asking For Help` section above for the ways to do that.
+
+These are the steps that comprise the opening phase:
+
+1. The **Proposer** opens PR
+
+2. CI runs automatically, and if
+
+ CI fails
+ The **Proposer** is expected to fix CI failures. If the
+ **Proposer** doesn't understand the nature of the failures they
+ are seeing, they should comment in the PR to request assistance,
+ or use another way of :ref:`Asking For Help`.
+
+ (Note that if assistance is not requested, the **Committers**
+ will assume that the **Proposer** is working on addressing the
+ failures themselves. If you require assistance, please do ask
+ for help!)
+
+ CI passes
+ Move on to the :ref:`Review phase`.
+
+Review Phase
+============
+
+In this phase, the **Proposer** and the **Reviewers** will iterate
+together to, hopefully, get the PR merged into the cloud-init codebase.
+There are three potential outcomes: merged, rejected permanently, and
+temporarily closed. (The first two are covered in this section; see
+:ref:`Inactive Pull Requests` for details about temporary closure.)
+
+(In the below, when the verbs "merge" or "squash merge" are used, they
+should be understood to mean "squash merged using the GitHub UI", which
+is the only way that changes can land in cloud-init's master branch.)
+
+These are the steps that comprise the review phase:
+
+1. **The Committers** assign a **Committer** to the PR
+
+ This **Committer** is expected to shepherd the PR to completion (and
+ merge it, if that is the outcome reached). This means that they
+ will perform an initial review, and monitor the PR to ensure that
+ the **Proposer** is receiving any assistance that they require. The
+ **Committers** will perform this assignment on a daily basis.
+
+ This assignment is intended to ensure that the **Proposer** has a
+ clear point of contact with a cloud-init core developer, and that
+ they get timely feedback after submitting a PR. It *is not*
+ intended to preclude reviews from any other **Reviewers**, nor to
+ imply that the **Committer** has ownership over the review process.
+
+ The assigned **Committer** may choose to delegate the code review of
+ a PR to another **Reviewer** if they think that they would be better
+ suited.
+
+ (Note that, in GitHub terms, this is setting an Assignee, not
+ requesting a review.)
+
+2. That **Committer** performs an initial review of the PR, resulting
+ in one of the following:
+
+ Approve
+ If the submitted PR meets all of the :ref:`Prerequisites for
+ Landing Pull Requests` and passes code review, then the
+ **Committer** will squash merge immediately.
+
+ There may be circumstances where a PR should not be merged
+ immediately. The ``wip`` label will be applied to PRs for which
+ this is true. Only **Committers** are able to apply labels to
+ PRs, so anyone who believes that this label should be applied to a
+ PR should request its application in a comment on the PR.
+
+ The review process is **DONE**.
+
+ Approve (with nits)
+ If the **Proposer** submits their PR with "Allow edits from
+ maintainer" enabled, and the only changes the **Committer**
+ requests are minor "nits", the **Committer** can push fixes for
+ those nits and *immediately* squash merge. If the **Committer**
+ does not wish to fix these nits but believes they should block a
+ straight-up Approve, then their review should be "Needs Changes"
+ instead.
+
+ A nit is understood to be something like a minor style issue or a
+ spelling error, generally confined to a single line of code.
+
+ If a **Committer** is unsure as to whether their requested change
+ is a nit, they should not treat it as a nit.
+
+ (If a **Proposer** wants to opt-out of this, then they should
+ uncheck "Allow edits from maintainer" when submitting their PR.)
+
+ The review process is **DONE**.
+
+ Outright rejection
+ The **Committer** will close the PR, with useful messaging for the
+ **Proposer** as to why this has happened.
+
+ This is reserved for cases where the proposed change is completely
+ unfit for landing, and there is no reasonable path forward. This
+ should only be used sparingly, as there are very few cases where
+ proposals are completely unfit.
+
+ If a different approach to the same problem is planned, it should
+ be submitted as a separate PR. The **Committer** should include
+ this information in their message when the PR is closed.
+
+ The review process is **DONE**.
+
+ Needs Changes
+ The **Committer** will give the **Proposer** a clear idea of what
+ is required for an Approve vote or, for more complex PRs, what the
+ next steps towards an Approve vote are.
+
+ The **Proposer** will ask questions if they don't understand, or
+ disagree with, the **Committer**'s review comments.
+
+ Once consensus has been reached, the **Proposer** will address the
+ review comments.
+
+ Once the review comments are addressed (as well as, potentially,
+ in the interim), CI will run. If CI fails, the **Proposer** is
+ expected to fix CI failures. If CI passes, the **Proposer**
+ should indicate that the PR is ready for re-review (by @ing the
+ assigned reviewer), effectively moving back to the start of this
+ section.
+
+Inactive Pull Requests
+======================
+
+PRs will be temporarily closed if they have been waiting on
+**Proposer** action for a certain amount of time without activity. A
+PR will be marked as stale (with an explanatory comment) after 14 days
+of inactivity. It will be closed after a further 7 days of inactivity.
+
+These closes are not considered permanent, and the closing message
+should reflect this for the **Proposer**. However, if a PR is reopened,
+it should effectively enter the :ref:`Opening phase` again, as it may
+need some work done to get CI passing again.
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 1427fb3d..fdb919a5 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -114,19 +114,19 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- Azure:
- agent_command: __builtin__
- apply_network_config: true
- data_dir: /var/lib/waagent
- dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
- disk_aliases:
+ Azure:
+ agent_command: __builtin__
+ apply_network_config: true
+ data_dir: /var/lib/waagent
+ dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
+ disk_aliases:
ephemeral0: /dev/disk/cloud/azure_resource
- hostname_bounce:
+ hostname_bounce:
interface: eth0
command: builtin
policy: true
hostname_command: hostname
- set_hostname: true
+ set_hostname: true
Userdata
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index da183226..592328ea 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -37,11 +37,11 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- CloudStack:
- max_wait: 120
- timeout: 50
- datasource_list:
- - CloudStack
+ CloudStack:
+ max_wait: 120
+ timeout: 50
+ datasource_list:
+ - CloudStack
.. _Apache CloudStack: http://cloudstack.apache.org/
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index a90f3779..274ca1e4 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -42,6 +42,7 @@ Note that there are multiple versions of this data provided, cloud-init
by default uses **2009-04-04** but newer versions can be supported with
relative ease (newer versions have more data exposed, while maintaining
backward compatibility with the previous versions).
+Version **2016-09-02** is required for secondary IP address support.
To see which versions are supported from your cloud provider use the following
URL:
@@ -80,16 +81,26 @@ The settings that may be configured are:
* **timeout**: the timeout value provided to urlopen for each individual http
request. This is used both when selecting a metadata_url and when crawling
the metadata service. (default: 50)
+ * **apply_full_imds_network_config**: Boolean (default: True) to allow
+ cloud-init to configure any secondary NICs and secondary IPs described by
+ the metadata service. All network interfaces are configured with DHCP (v4)
+ to obtain an primary IPv4 address and route. Interfaces which have a
+ non-empty 'ipv6s' list will also enable DHCPv6 to obtain a primary IPv6
+ address and route. The DHCP response (v4 and v6) return an IP that matches
+ the first element of local-ipv4s and ipv6s lists respectively. All
+ additional values (secondary addresses) in the static ip lists will be
+ added to interface.
An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- Ec2:
- metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
- max_wait: 120
- timeout: 50
+ Ec2:
+ metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
+ max_wait: 120
+ timeout: 50
+ apply_full_imds_network_config: true
Notes
-----
@@ -102,4 +113,12 @@ Notes
The check for the instance type is performed by is_classic_instance()
method.
+ * For EC2 instances with multiple network interfaces (NICs) attached, dhcp4
+ will be enabled to obtain the primary private IPv4 address of those NICs.
+ Wherever dhcp4 or dhcp6 is enabled for a NIC, a dhcp route-metric will be
+ added with the value of ``<device-number + 1> * 100`` to ensure dhcp
+ routes on the primary NIC are preferred to any secondary NICs.
+ For example: the primary NIC will have a DHCP route-metric of 100,
+ the next NIC will be 200.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index bc96f7fe..6d3075f4 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -32,7 +32,7 @@ The permitted keys are:
With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or
``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start
-with ``http://``, ``https://`` or ``ftp://``
+with ``http://`` or ``https://``.
e.g. you can pass this option to QEMU:
@@ -133,12 +133,12 @@ be network configuration based on the filename.
version: 2
ethernets:
interface0:
- match:
- mac_address: "52:54:00:12:34:00"
- set-name: interface0
- addresses:
- - 192.168.1.10/255.255.255.0
- gateway4: 192.168.1.254
+ match:
+ mac_address: "52:54:00:12:34:00"
+ set-name: interface0
+ addresses:
+ - 192.168.1.10/255.255.255.0
+ gateway4: 192.168.1.254
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 8ce2a53d..b8870bf1 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -19,7 +19,8 @@ checks the following environment attributes as a potential OpenStack platform:
* **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
* **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
- * **DMI chassis_asset_tag** is *OpenTelekomCloud*
+ * **DMI chassis_asset_tag** is *OpenTelekomCloud*, *SAP CCloud VM*,
+ *OpenStack Nova* (since 19.2) or *OpenStack Compute* (since 19.2)
Configuration
@@ -50,12 +51,12 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- OpenStack:
- metadata_urls: ["http://169.254.169.254"]
- max_wait: -1
- timeout: 10
- retries: 5
- apply_network_config: True
+ OpenStack:
+ metadata_urls: ["http://169.254.169.254"]
+ max_wait: -1
+ timeout: 10
+ retries: 5
+ apply_network_config: True
Vendor Data
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index afcf2679..0d416f32 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -163,3 +163,104 @@ commandline:
Inspect cloud-init.log for output of what operations were performed as a
result.
+
+.. _proposed_sru_testing:
+
+Stable Release Updates (SRU) testing for cloud-init
+===================================================
+Once an Ubuntu release is stable (i.e. after it is released), updates for it
+must follow a special procedure called a "stable release update" (or `SRU`_).
+
+The cloud-init project has a specific process it follows when validating
+a cloud-init SRU, documented in the `CloudinitUpdates`_ wiki page.
+
+Generally an SRU test of cloud-init performs the following:
+
+ * Install a pre-release version of cloud-init from the
+ **-proposed** APT pocket (e.g. **bionic-proposed**)
+ * Upgrade cloud-init and attempt a clean run of cloud-init to assert the new
+ version of cloud-init works properly the specific platform and Ubuntu series
+ * Check for tracebacks or errors in behavior
+
+
+Manual SRU verification procedure
+---------------------------------
+Below are steps to manually test a pre-release version of cloud-init
+from **-proposed**
+
+.. note::
+ For each Ubuntu SRU, the Ubuntu Server team manually validates the new version of cloud-init
+ on these platforms: **Amazon EC2, Azure, GCE, OpenStack, Oracle,
+ Softlayer (IBM), LXD, KVM**
+
+1. Launch a VM on your favorite platform, providing this cloud-config
+ user-data and replacing `<YOUR_LAUNCHPAD_USERNAME>` with your username:
+
+.. code-block:: yaml
+
+ ## template: jinja
+ #cloud-config
+ ssh_import_id: [<YOUR_LAUNCHPAD_USERNAME>]
+ hostname: SRU-worked-{{v1.cloud_name}}
+
+2. Wait for current cloud-init to complete, replace `<YOUR_VM_IP>` with the IP
+ address of the VM that you launched in step 1:
+
+.. code-block:: bash
+
+ CI_VM_IP=<YOUR_VM_IP>
+ # Make note of the datasource cloud-init detected in --long output.
+ # In step 5, you will use this to confirm the same datasource is detected after upgrade.
+ ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long
+
+3. Set up the **-proposed** pocket on your VM and upgrade to the **-proposed**
+ cloud-init:
+
+.. code-block:: bash
+
+ # Create a script that will add the -proposed pocket to APT's sources
+ # and install cloud-init from that pocket
+ cat > setup_proposed.sh <<EOF
+ #/bin/bash
+ mirror=http://archive.ubuntu.com/ubuntu
+ echo deb \$mirror \$(lsb_release -sc)-proposed main | tee \
+ /etc/apt/sources.list.d/proposed.list
+ apt-get update -q
+ apt-get install -qy cloud-init
+ EOF
+
+ scp setup_proposed.sh ubuntu@$CI_VM_IP:.
+ ssh ubuntu@$CI_VM_IP -- sudo bash setup_proposed.sh
+
+4. Change hostname, clean cloud-init's state, and reboot to run cloud-init
+ from scratch:
+
+.. code-block:: bash
+
+ ssh ubuntu@$CI_VM_IP -- sudo hostname something-else
+ ssh ubuntu@$CI_VM_IP -- sudo cloud-init clean --logs --reboot
+
+5. Validate **-proposed** cloud-init came up without error
+
+.. code-block:: bash
+
+ # Block until cloud-init completes and verify from --long the datasource
+ # from step 1. Errors would show up in --long
+
+ ssh ubuntu@$CI_VM_IP -- cloud-init status --wait --long
+ # Make sure hostname was set properly to SRU-worked-<cloud name>
+ ssh ubuntu@$CI_VM_IP -- hostname
+ # Check for any errors or warnings in cloud-init logs.
+ # (This should produce no output if successful.)
+ ssh ubuntu@$CI_VM_IP -- grep Trace "/var/log/cloud-init*"
+
+6. If you encounter an error during SRU testing:
+
+ * Create a `new cloud-init bug`_ reporting the version of cloud-init
+ affected
+ * Ping upstream cloud-init on Freenode's `#cloud-init IRC channel`_
+
+.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
+.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
+.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
+.. _#cloud-init IRC channel: https://webchat.freenode.net/?channel=#cloud-init
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 98c0cfaa..aa1be142 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -104,6 +104,23 @@ The force parameter allows the command to be run again since the instance has
already launched. The other options increase the verbosity of logging and
put the logs to STDERR.
+How can I re-run datasource detection and cloud-init?
+=====================================================
+
+If a user is developing a new datasource or working on debugging an issue it
+may be useful to re-run datasource detection and the initial setup of
+cloud-init.
+
+To do this, force ds-identify to re-run, clean up any logs, and re-run
+cloud-init:
+
+.. code-block:: shell-session
+
+ $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
+ $ sudo cloud-init clean --logs
+ $ sudo cloud-init init --local
+ $ sudo cloud-init init
+
How can I debug my user data?
=============================
@@ -206,8 +223,8 @@ values or the LXD `Custom Network Configuration`_ document for more about
custom network config.
.. _LXD: https://linuxcontainers.org/
-.. _Instance Configuration: https://lxd.readthedocs.io/en/latest/instances/
-.. _Custom Network Configuration: https://lxd.readthedocs.io/en/latest/cloud-init/
+.. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances
+.. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init
Where can I learn more?
========================================
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index 2b60bdd3..1d0a7097 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -38,29 +38,18 @@ Supported content-types:
Helper script to generate mime messages
---------------------------------------
-.. code-block:: python
-
- #!/usr/bin/python
-
- import sys
-
- from email.mime.multipart import MIMEMultipart
- from email.mime.text import MIMEText
+The cloud-init codebase includes a helper script to generate MIME multi-part
+files: `make-mime.py`_.
- if len(sys.argv) == 1:
- print("%s input-file:type ..." % (sys.argv[0]))
- sys.exit(1)
+``make-mime.py`` takes pairs of (filename, "text/" mime subtype) separated by
+a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME multipart
+message to stdout. An example invocation, assuming you have your cloud config
+in ``config.yaml`` and a shell script in ``script.sh`` and want to store the
+multipart message in ``user-data``::
- combined_message = MIMEMultipart()
- for i in sys.argv[1:]:
- (filename, format_type) = i.split(":", 1)
- with open(filename) as fh:
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition', 'attachment; filename="%s"' % (filename))
- combined_message.attach(sub_message)
+ ./tools/make-mime.py -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
- print(combined_message)
+.. _make-mime.py: https://github.com/canonical/cloud-init/blob/master/tools/make-mime.py
User-Data Script
@@ -126,7 +115,7 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
using a MIME archive.
.. note::
- New in cloud-init v. 18.4: Cloud config dta can also render cloud instance
+ New in cloud-init v. 18.4: Cloud config data can also render cloud instance
metadata variables using jinja templating. See
:ref:`instance_metadata` for more information.
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index e7dd0d62..845098bb 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -76,6 +76,11 @@ There are three basic top-level keys:
'security sensitive'. Only the keys listed here will be redacted from
instance-data.json for non-root users.
+* **merged_cfg**: Merged cloud-init 'system_config' from `/etc/cloud/cloud.cfg`
+ and `/etc/cloud/cloud-cfg.d`. Values under this key could contain sensitive
+ information such as passwords, so it is included in the **sensitive-keys**
+ list which is only readable by root.
+
* **ds**: Datasource-specific metadata crawled for the specific cloud
platform. It should closely represent the structure of the cloud metadata
crawled. The structure of content and details provided are entirely
@@ -83,6 +88,9 @@ There are three basic top-level keys:
The content exposed under the 'ds' key is currently **experimental** and
expected to change slightly in the upcoming cloud-init release.
+* **sys_info**: Information about the underlying os, python, architecture and
+ kernel. This represents the data collected by `cloudinit.util.system_info`.
+
* **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to
exist on all cloud platforms. They will also retain their current behavior
and format and will be carried forward even if cloud-init introduces a new
@@ -103,7 +111,7 @@ v1.cloud_name
-------------
Where possible this will indicate the 'name' of the cloud the system is running
on. This is different than the 'platform' item. For example, the cloud name of
-Amazone Web Services is 'aws', while the platform is 'ec2'.
+Amazon Web Services is 'aws', while the platform is 'ec2'.
If determining a specific name is not possible or provided in meta-data, then
this filed may contain the same content as 'platform'.
@@ -117,6 +125,21 @@ Example output:
- nocloud
- ovf
+v1.distro, v1.distro_version, v1.distro_release
+-----------------------------------------------
+This shall be the distro name, version and release as determined by
+`cloudinit.util.get_linux_distro`.
+
+Example output:
+
+- centos, 7.5, core
+- debian, 9, stretch
+- freebsd, 12.0-release-p10,
+- opensuse, 42.3, x86_64
+- opensuse-tumbleweed, 20180920, x86_64
+- redhat, 7.5, 'maipo'
+- sles, 12.3, x86_64
+- ubuntu, 20.04, focal
v1.instance_id
--------------
@@ -126,6 +149,14 @@ Examples output:
- i-<hash>
+v1.kernel_release
+-----------------
+This shall be the running kernel `uname -r`
+
+Example output:
+
+- 5.3.0-1010-aws
+
v1.local_hostname
-----------------
The internal or local hostname of the system.
@@ -135,6 +166,17 @@ Examples output:
- ip-10-41-41-70
- <user-provided-hostname>
+v1.machine
+----------
+This shall be the running cpu machine architecture `uname -m`
+
+Example output:
+
+- x86_64
+- i686
+- ppc64le
+- s390x
+
v1.platform
-------------
An attempt to identify the cloud platfrom instance that the system is running
@@ -154,7 +196,7 @@ v1.subplatform
Additional platform details describing the specific source or type of metadata
used. The format of subplatform will be:
-``<subplatform_type> (<url_file_or_dev_path>``
+``<subplatform_type> (<url_file_or_dev_path>)``
Examples output:
@@ -171,6 +213,15 @@ Examples output:
- ['ssh-rsa AA...', ...]
+v1.python_version
+-----------------
+The version of python that is running cloud-init as determined by
+`cloudinit.util.system_info`
+
+Example output:
+
+- 3.7.6
+
v1.region
---------
The physical region/data center in which the instance is deployed.
@@ -192,164 +243,265 @@ Examples output:
Example Output
--------------
-Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2
-instance:
+Below is an example of ``/run/cloud-init/instance-data-sensitive.json`` on an
+EC2 instance:
.. sourcecode:: json
{
+ "_beta_keys": [
+ "subplatform"
+ ],
+ "availability_zone": "us-east-1b",
"base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": "Merged cloud-init system config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/",
+ "_log": [
+ "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
+ "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n",
+ "[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(\"/dev/log\", handlers.SysLogHandler.LOG_USER)\n"
+ ],
+ "cloud_config_modules": [
+ "emit_upstart",
+ "snap",
+ "ssh-import-id",
+ "locale",
+ "set-passwords",
+ "grub-dpkg",
+ "apt-pipelining",
+ "apt-configure",
+ "ubuntu-advantage",
+ "ntp",
+ "timezone",
+ "disable-ec2-metadata",
+ "runcmd",
+ "byobu"
+ ],
+ "cloud_final_modules": [
+ "package-update-upgrade-install",
+ "fan",
+ "landscape",
+ "lxd",
+ "ubuntu-drivers",
+ "puppet",
+ "chef",
+ "mcollective",
+ "salt-minion",
+ "rightscale_userdata",
+ "scripts-vendor",
+ "scripts-per-once",
+ "scripts-per-boot",
+ "scripts-per-instance",
+ "scripts-user",
+ "ssh-authkey-fingerprints",
+ "keys-to-console",
+ "phone-home",
+ "final-message",
+ "power-state-change"
+ ],
+ "cloud_init_modules": [
+ "migrator",
+ "seed_random",
+ "bootcmd",
+ "write-files",
+ "growpart",
+ "resizefs",
+ "disk_setup",
+ "mounts",
+ "set_hostname",
+ "update_hostname",
+ "update_etc_hosts",
+ "ca-certs",
+ "rsyslog",
+ "users-groups",
+ "ssh"
+ ],
+ "datasource_list": [
+ "Ec2",
+ "None"
+ ],
+ "def_log_file": "/var/log/cloud-init.log",
+ "disable_root": true,
+ "log_cfgs": [
+ [
+ "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
+ "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+ ]
+ ],
+ "output": {
+ "all": "| tee -a /var/log/cloud-init-output.log"
+ },
+ "preserve_hostname": false,
+ "syslog_fix_perms": [
+ "syslog:adm",
+ "root:adm",
+ "root:wheel",
+ "root:root"
+ ],
+ "users": [
+ "default"
+ ],
+ "vendor_data": {
+ "enabled": true,
+ "prefix": []
+ }
+ },
+ "cloud_name": "aws",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
"ds": {
"_doc": "EXPERIMENTAL: The structure and format of content scoped under the 'ds' key may change in subsequent releases of cloud-init.",
"_metadata_api_version": "2016-09-02",
"dynamic": {
- "instance-identity": {
+ "instance_identity": {
"document": {
- "accountId": "437526006925",
+ "accountId": "329910648901",
"architecture": "x86_64",
- "availabilityZone": "us-east-2b",
+ "availabilityZone": "us-east-1b",
"billingProducts": null,
"devpayProductCodes": null,
- "imageId": "ami-079638aae7046bdd2",
- "instanceId": "i-075f088c72ad3271c",
+ "imageId": "ami-02e8aa396f8be3b6d",
+ "instanceId": "i-0929128ff2f73a2f1",
"instanceType": "t2.micro",
"kernelId": null,
"marketplaceProductCodes": null,
- "pendingTime": "2018-10-05T20:10:43Z",
- "privateIp": "10.41.41.95",
+ "pendingTime": "2020-02-27T20:46:18Z",
+ "privateIp": "172.31.81.43",
"ramdiskId": null,
- "region": "us-east-2",
+ "region": "us-east-1",
"version": "2017-09-30"
},
"pkcs7": [
- "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggHbewog",
- "ICJkZXZwYXlQcm9kdWN0Q29kZXMiIDogbnVsbCwKICAibWFya2V0cGxhY2VQcm9kdWN0Q29kZXMi",
- "IDogbnVsbCwKICAicHJpdmF0ZUlwIiA6ICIxMC40MS40MS45NSIsCiAgInZlcnNpb24iIDogIjIw",
- "MTctMDktMzAiLAogICJpbnN0YW5jZUlkIiA6ICJpLTA3NWYwODhjNzJhZDMyNzFjIiwKICAiYmls",
- "bGluZ1Byb2R1Y3RzIiA6IG51bGwsCiAgImluc3RhbmNlVHlwZSIgOiAidDIubWljcm8iLAogICJh",
- "Y2NvdW50SWQiIDogIjQzNzUyNjAwNjkyNSIsCiAgImF2YWlsYWJpbGl0eVpvbmUiIDogInVzLWVh",
- "c3QtMmIiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJyYW1kaXNrSWQiIDogbnVsbCwKICAiYXJj",
- "aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJpbWFnZUlkIiA6ICJhbWktMDc5NjM4YWFlNzA0NmJk",
- "ZDIiLAogICJwZW5kaW5nVGltZSIgOiAiMjAxOC0xMC0wNVQyMDoxMDo0M1oiLAogICJyZWdpb24i",
- "IDogInVzLWVhc3QtMiIKfQAAAAAAADGCARcwggETAgEBMGkwXDELMAkGA1UEBhMCVVMxGTAXBgNV",
- "BAgTEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0FtYXpvbiBX",
- "ZWIgU2VydmljZXMgTExDAgkAlrpI2eVeGmcwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkq",
- "hkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE4MTAwNTIwMTA0OFowIwYJKoZIhvcNAQkEMRYEFK0k",
- "Tz6n1A8/zU1AzFj0riNQORw2MAkGByqGSM44BAMELjAsAhRNrr174y98grPBVXUforN/6wZp8AIU",
- "JLZBkrB2GJA8A4WJ1okq++jSrBIAAAAAAAA="
+ "MIAGCSqGSIb3DQ...",
+ "REDACTED",
+ "AhQUgq0iPWqPTVnT96tZE6L1XjjLHQAAAAAAAA=="
],
"rsa2048": [
- "MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0BBwGggCSABIIB",
- "23sKICAiZGV2cGF5UHJvZHVjdENvZGVzIiA6IG51bGwsCiAgIm1hcmtldHBsYWNlUHJvZHVjdENv",
- "ZGVzIiA6IG51bGwsCiAgInByaXZhdGVJcCIgOiAiMTAuNDEuNDEuOTUiLAogICJ2ZXJzaW9uIiA6",
- "ICIyMDE3LTA5LTMwIiwKICAiaW5zdGFuY2VJZCIgOiAiaS0wNzVmMDg4YzcyYWQzMjcxYyIsCiAg",
- "ImJpbGxpbmdQcm9kdWN0cyIgOiBudWxsLAogICJpbnN0YW5jZVR5cGUiIDogInQyLm1pY3JvIiwK",
- "ICAiYWNjb3VudElkIiA6ICI0Mzc1MjYwMDY5MjUiLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1",
- "cy1lYXN0LTJiIiwKICAia2VybmVsSWQiIDogbnVsbCwKICAicmFtZGlza0lkIiA6IG51bGwsCiAg",
- "ImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLTA3OTYzOGFhZTcw",
- "NDZiZGQyIiwKICAicGVuZGluZ1RpbWUiIDogIjIwMTgtMTAtMDVUMjA6MTA6NDNaIiwKICAicmVn",
- "aW9uIiA6ICJ1cy1lYXN0LTIiCn0AAAAAAAAxggH/MIIB+wIBATBpMFwxCzAJBgNVBAYTAlVTMRkw",
- "FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6",
- "b24gV2ViIFNlcnZpY2VzIExMQwIJAM07oeX4xevdMA0GCWCGSAFlAwQCAQUAoGkwGAYJKoZIhvcN",
- "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTgxMDA1MjAxMDQ4WjAvBgkqhkiG9w0B",
- "CQQxIgQgkYz0pZk3zJKBi4KP4egeOKJl/UYwu5UdE7id74pmPwMwDQYJKoZIhvcNAQEBBQAEggEA",
- "dC3uIGGNul1OC1mJKSH3XoBWsYH20J/xhIdftYBoXHGf2BSFsrs9ZscXd2rKAKea4pSPOZEYMXgz",
- "lPuT7W0WU89N3ZKviy/ReMSRjmI/jJmsY1lea6mlgcsJXreBXFMYucZvyeWGHdnCjamoKWXkmZlM",
- "mSB1gshWy8Y7DzoKviYPQZi5aI54XK2Upt4kGme1tH1NI2Cq+hM4K+adxTbNhS3uzvWaWzMklUuU",
- "QHX2GMmjAVRVc8vnA8IAsBCJJp+gFgYzi09IK+cwNgCFFPADoG6jbMHHf4sLB3MUGpiA+G9JlCnM",
- "fmkjI2pNRB8spc0k4UG4egqLrqCz67WuK38tjwAAAAAAAA=="
+ "MIAGCSqGSIb...",
+ "REDACTED",
+ "clYQvuE45xXm7Yreg3QtQbrP//owl1eZHj6s350AAAAAAAA="
],
"signature": [
- "Tsw6h+V3WnxrNVSXBYIOs1V4j95YR1mLPPH45XnhX0/Ei3waJqf7/7EEKGYP1Cr4PTYEULtZ7Mvf",
- "+xJpM50Ivs2bdF7o0c4vnplRWe3f06NI9pv50dr110j/wNzP4MZ1pLhJCqubQOaaBTF3LFutgRrt",
- "r4B0mN3p7EcqD8G+ll0="
+ "dA+QV+LLCWCRNddnrKleYmh2GvYo+t8urDkdgmDSsPi",
+ "REDACTED",
+ "kDT4ygyJLFkd3b4qjAs="
]
}
},
- "meta-data": {
- "ami-id": "ami-079638aae7046bdd2",
- "ami-launch-index": "0",
- "ami-manifest-path": "(unknown)",
- "block-device-mapping": {
+ "meta_data": {
+ "ami_id": "ami-02e8aa396f8be3b6d",
+ "ami_launch_index": "0",
+ "ami_manifest_path": "(unknown)",
+ "block_device_mapping": {
"ami": "/dev/sda1",
- "ephemeral0": "sdb",
- "ephemeral1": "sdc",
"root": "/dev/sda1"
},
- "hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "instance-action": "none",
- "instance-id": "i-075f088c72ad3271c",
- "instance-type": "t2.micro",
- "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "local-ipv4": "10.41.41.95",
- "mac": "06:74:8f:39:cd:a6",
+ "hostname": "ip-172-31-81-43.ec2.internal",
+ "instance_action": "none",
+ "instance_id": "i-0929128ff2f73a2f1",
+ "instance_type": "t2.micro",
+ "local_hostname": "ip-172-31-81-43.ec2.internal",
+ "local_ipv4": "172.31.81.43",
+ "mac": "12:7e:c9:93:29:af",
"metrics": {
"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
},
"network": {
"interfaces": {
"macs": {
- "06:74:8f:39:cd:a6": {
- "device-number": "0",
- "interface-id": "eni-052058bbd7831eaae",
- "ipv4-associations": {
- "18.218.221.122": "10.41.41.95"
- },
- "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "local-ipv4s": "10.41.41.95",
- "mac": "06:74:8f:39:cd:a6",
- "owner-id": "437526006925",
- "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com",
- "public-ipv4s": "18.218.221.122",
- "security-group-ids": "sg-828247e9",
- "security-groups": "Cloud-init integration test secgroup",
- "subnet-id": "subnet-282f3053",
- "subnet-ipv4-cidr-block": "10.41.41.0/24",
- "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64",
- "vpc-id": "vpc-252ef24d",
- "vpc-ipv4-cidr-block": "10.41.0.0/16",
- "vpc-ipv4-cidr-blocks": "10.41.0.0/16",
- "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56"
- }
+ "12:7e:c9:93:29:af": {
+ "device_number": "0",
+ "interface_id": "eni-0c07a0474339b801d",
+ "ipv4_associations": {
+ "3.89.187.177": "172.31.81.43"
+ },
+ "local_hostname": "ip-172-31-81-43.ec2.internal",
+ "local_ipv4s": "172.31.81.43",
+ "mac": "12:7e:c9:93:29:af",
+ "owner_id": "329910648901",
+ "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com",
+ "public_ipv4s": "3.89.187.177",
+ "security_group_ids": "sg-0100038b68aa79986",
+ "security_groups": "launch-wizard-3",
+ "subnet_id": "subnet-04e2d12a",
+ "subnet_ipv4_cidr_block": "172.31.80.0/20",
+ "vpc_id": "vpc-210b4b5b",
+ "vpc_ipv4_cidr_block": "172.31.0.0/16",
+ "vpc_ipv4_cidr_blocks": "172.31.0.0/16"
+ }
}
}
},
"placement": {
- "availability-zone": "us-east-2b"
+ "availability_zone": "us-east-1b"
},
"profile": "default-hvm",
- "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com",
- "public-ipv4": "18.218.221.122",
- "public-keys": {
- "cloud-init-integration": [
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration"
- ]
- },
- "reservation-id": "r-0594a20e31f6cfe46",
- "security-groups": "Cloud-init integration test secgroup",
+ "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com",
+ "public_ipv4": "3.89.187.177",
+ "reservation_id": "r-0c481643d15766a02",
+ "security_groups": "launch-wizard-3",
"services": {
"domain": "amazonaws.com",
"partition": "aws"
}
}
},
+ "instance_id": "i-0929128ff2f73a2f1",
+ "kernel_release": "5.3.0-1010-aws",
+ "local_hostname": "ip-172-31-81-43",
+ "machine": "x86_64",
+ "platform": "ec2",
+ "public_ssh_keys": [],
+ "python_version": "3.7.6",
+ "region": "us-east-1",
"sensitive_keys": [],
+ "subplatform": "metadata (http://169.254.169.254)",
+ "sys_info": {
+ "dist": [
+ "ubuntu",
+ "20.04",
+ "focal"
+ ],
+ "platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "python": "3.7.6",
+ "release": "5.3.0-1010-aws",
+ "system": "Linux",
+ "uname": [
+ "Linux",
+ "ip-172-31-81-43",
+ "5.3.0-1010-aws",
+ "#11-Ubuntu SMP Thu Jan 16 07:59:32 UTC 2020",
+ "x86_64",
+ "x86_64"
+ ],
+ "variant": "ubuntu"
+ },
+ "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "userdata": "#cloud-config\nssh_import_id: [<my-launchpad-id>]\n...",
"v1": {
"_beta_keys": [
"subplatform"
],
- "availability-zone": "us-east-2b",
- "availability_zone": "us-east-2b",
+ "availability_zone": "us-east-1b",
"cloud_name": "aws",
- "instance_id": "i-075f088c72ad3271c",
- "local_hostname": "ip-10-41-41-95",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance_id": "i-0929128ff2f73a2f1",
+ "kernel": "5.3.0-1010-aws",
+ "local_hostname": "ip-172-31-81-43",
+ "machine": "x86_64",
"platform": "ec2",
- "public_ssh_keys": [
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration"
- ],
- "region": "us-east-2",
- "subplatform": "metadata (http://169.254.169.254)"
- }
+ "public_ssh_keys": [],
+ "python": "3.7.6",
+ "region": "us-east-1",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "variant": "ubuntu"
+ },
+ "variant": "ubuntu",
+ "vendordata": ""
}
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 1520ba9a..8eeadebf 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -25,17 +25,23 @@ For example, OpenStack may provide network config in the MetaData Service.
**System Config**
-A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files.
+A ``network:`` entry in ``/etc/cloud/cloud.cfg.d/*`` configuration files.
**Kernel Command Line**
-``ip=`` or ``network-config=<YAML config string>``
+``ip=`` or ``network-config=<Base64 encoded YAML config string>``
User-data cannot change an instance's network configuration. In the absence
of network configuration in any of the above sources , `Cloud-init`_ will
write out a network configuration that will issue a DHCP request on a "first"
network interface.
+.. note::
+
+ The network-config value is expected to be a Base64 encoded YAML string in
+ :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it
+ can be compressed with ``gzip`` prior to Base64 encoding.
+
Disabling Network Configuration
===============================
@@ -48,19 +54,19 @@ on other methods, such as embedded configuration or other customizations.
**Kernel Command Line**
-`Cloud-init`_ will check for a parameter ``network-config`` and the
-value is expected to be YAML string in the :ref:`network_config_v1` format.
-The YAML string may optionally be ``Base64`` encoded, and optionally
-compressed with ``gzip``.
+`Cloud-init`_ will check additionally check for the parameter
+``network-config=disabled`` which will automatically disable any network
+configuration.
Example disabling kernel command line entry: ::
- network-config={config: disabled}
+ network-config=disabled
**cloud config**
-In the combined cloud-init configuration dictionary. ::
+In the combined cloud-init configuration dictionary, merged from
+``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
network:
config: disabled
@@ -191,7 +197,7 @@ supplying an updated configuration in cloud-config. ::
system_info:
network:
- renderers: ['netplan', 'eni', 'sysconfig', 'freebsd']
+ renderers: ['netplan', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
Network Configuration Tools
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index aee3d7fc..f03b5969 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -467,11 +467,11 @@ Set region in platforms.yaml
.. code-block:: yaml
azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
+ enabled: true
+ region: West US 2
+ vm_size: Standard_DS1_v2
+ storage_sku: standard_lrs
+ tag: ci
Architecture
@@ -546,38 +546,38 @@ The following demonstrates merge behavior:
.. code-block:: yaml
defaults:
- list_item:
- - list_entry_1
- - list_entry_2
- int_item_1: 123
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: 2
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: b
+ list_item:
+ - list_entry_1
+ - list_entry_2
+ int_item_1: 123
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: 2
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: b
overrides:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- dict_item:
- subkey_2: false
- subkey_dict:
- subsubkey_2: 'new value'
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ dict_item:
+ subkey_2: false
+ subkey_dict:
+ subsubkey_2: 'new value'
result:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: false
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: 'new value'
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: false
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: 'new value'
Image Config
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 897d6110..44e45c1b 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -5,7 +5,6 @@
# the packages/pkg-deps.json file as well.
#
-unittest2
# ec2 backend
boto3==1.5.9
diff --git a/packages/bddeb b/packages/bddeb
index 95602a02..02ac2975 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -59,15 +59,9 @@ def run_helper(helper, args=None, strip=True):
return stdout
-def write_debian_folder(root, templ_data, is_python2, cloud_util_deps):
+def write_debian_folder(root, templ_data, cloud_util_deps):
"""Create a debian package directory with all rendered template files."""
print("Creating a debian/ folder in %r" % (root))
- if is_python2:
- pyver = "2"
- python = "python"
- else:
- pyver = "3"
- python = "python3"
deb_dir = util.abs_join(root, 'debian')
@@ -83,30 +77,23 @@ def write_debian_folder(root, templ_data, is_python2, cloud_util_deps):
# Write out the control file template
reqs_output = run_helper(
- 'read-dependencies',
- args=['--distro', 'debian', '--python-version', pyver])
+ 'read-dependencies', args=['--distro', 'debian'])
reqs = reqs_output.splitlines()
test_reqs = run_helper(
'read-dependencies',
['--requirements-file', 'test-requirements.txt',
- '--system-pkg-names', '--python-version', pyver]).splitlines()
+ '--system-pkg-names']).splitlines()
requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else []
# We consolidate all deps as Build-Depends as our package build runs all
# tests so we need all runtime dependencies anyway.
# NOTE: python package was moved to the front after debuild -S would fail with
# 'Please add apropriate interpreter' errors (as in debian bug 861132)
- requires.extend([python] + reqs + test_reqs)
+ requires.extend(['python3'] + reqs + test_reqs)
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
- params={'build_depends': ','.join(requires),
- 'python': python})
-
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'rules.in'),
- util.abs_join(deb_dir, 'rules'),
- params={'python': python, 'pyver': pyver})
+ params={'build_depends': ','.join(requires)})
def read_version():
@@ -177,6 +164,11 @@ def main():
# output like 0.7.6-1022-g36e92d3
ver_data = read_version()
+ if ver_data['is_release_branch_ci']:
+ # If we're performing CI for a new release branch, we don't yet
+ # have the tag required to generate version_long; use version
+ # instead.
+ ver_data['version_long'] = ver_data['version']
# This is really only a temporary archive
# since we will extract it then add in the debian
@@ -192,7 +184,9 @@ def main():
break
if path is None:
print("Creating a temp tarball using the 'make-tarball' helper")
- run_helper('make-tarball', ['--long', '--output=' + tarball_fp])
+ run_helper('make-tarball',
+ ['--version', ver_data['version_long'],
+ '--output=' + tarball_fp])
print("Extracting temporary tarball %r" % (tarball))
cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
@@ -201,8 +195,7 @@ def main():
xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long'])
templ_data.update(ver_data)
- write_debian_folder(xdir, templ_data, is_python2=args.python2,
- cloud_util_deps=args.cloud_utils)
+ write_debian_folder(xdir, templ_data, cloud_util_deps=args.cloud_utils)
print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args),
xdir))
diff --git a/packages/brpm b/packages/brpm
index a154ef29..1be8804c 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import argparse
import glob
@@ -42,7 +42,7 @@ def run_helper(helper, args=None, strip=True):
return stdout
-def read_dependencies(distro, requirements_file='requirements.txt'):
+def read_dependencies(distro):
"""Returns the Python package depedencies from requirements.txt files.
@returns a tuple of (requirements, test_requirements)
diff --git a/packages/debian/control.in b/packages/debian/control.in
index e9ed64f3..72895b47 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -10,11 +10,10 @@ Standards-Version: 3.9.6
Package: cloud-init
Architecture: all
Depends: ${misc:Depends},
- ${${python}:Depends},
+ ${python3:Depends},
iproute2,
isc-dhcp-client
Recommends: eatmydata, sudo, software-properties-common, gdisk
-XB-Python-Version: ${python:Versions}
Description: Init scripts for cloud instances
Cloud instances need special scripts to run during initialisation
to retrieve and install ssh keys and to let the user run various scripts.
diff --git a/packages/debian/rules.in b/packages/debian/rules
index e542c7f1..d138deeb 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules
@@ -1,12 +1,10 @@
-## template:basic
#!/usr/bin/make -f
INIT_SYSTEM ?= systemd
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
-PYVER ?= python${pyver}
DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
%:
- dh $@ --with $(PYVER),systemd --buildsystem pybuild
+ dh $@ --with python3,systemd --buildsystem pybuild
override_dh_install:
dh_install
@@ -19,7 +17,7 @@ override_dh_install:
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
- http_proxy= make PYVER=python${pyver} check
+ http_proxy= make check
else
@echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
endif
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 72409dd8..80028396 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -6,52 +6,31 @@
"dh-systemd"
],
"renames" : {
- "pyyaml" : {
- "2" : "python-yaml",
- "3" : "python3-yaml"
- },
- "contextlib2" : {
- "2" : "python-contextlib2"
- },
- "pyserial" : {
- "2" : "python-serial",
- "3" : "python3-serial"
- }
+ "pyyaml" : "python3-yaml",
+ "pyserial" : "python3-serial"
},
"requires" : [
"procps"
]
},
+ "centos" : {
+ "build-requires" : [
+ "python3-devel"
+ ],
+ "requires" : [
+ "e2fsprogs",
+ "iproute",
+ "net-tools",
+ "procps",
+ "rsyslog",
+ "shadow-utils",
+ "sudo"
+ ]
+ },
"redhat" : {
"build-requires" : [
- "python-devel",
- "python-setuptools"
+ "python3-devel"
],
- "renames" : {
- "jinja2" : {
- "3" : "python34-jinja2"
- },
- "jsonschema" : {
- "3" : "python34-jsonschema"
- },
- "pyflakes" : {
- "2" : "pyflakes",
- "3" : "python34-pyflakes"
- },
- "pyyaml" : {
- "2" : "PyYAML",
- "3" : "python34-PyYAML"
- },
- "pyserial" : {
- "2" : "pyserial"
- },
- "requests" : {
- "3" : "python34-requests"
- },
- "six" : {
- "3" : "python34-six"
- }
- },
"requires" : [
"e2fsprogs",
"iproute",
@@ -64,9 +43,6 @@
},
"suse" : {
"renames" : {
- "pyyaml" : {
- "2" : "python-yaml"
- }
},
"build-requires" : [
"fdupes",
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 057a5784..4cff2c97 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -1,6 +1,4 @@
## template: jinja
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-
%define use_systemd (0%{?fedora} && 0%{?fedora} >= 18) || (0%{?rhel} && 0%{?rhel} >= 7)
%if %{use_systemd}
@@ -94,11 +92,11 @@ ssh keys and to let the user run various scripts.
{% endfor %}
%build
-%{__python} setup.py build
+%{__python3} setup.py build
%install
-%{__python} setup.py install -O1 \
+%{__python3} setup.py install -O1 \
--skip-build --root $RPM_BUILD_ROOT \
--init-system=%{init_system}
@@ -109,7 +107,7 @@ cp -p tools/21-cloudinit.conf \
$RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
# Remove the tests
-rm -rf $RPM_BUILD_ROOT%{python_sitelib}/tests
+rm -rf $RPM_BUILD_ROOT%{python3_sitelib}/tests
# Required dirs...
mkdir -p $RPM_BUILD_ROOT/%{_sharedstatedir}/cloud
@@ -213,4 +211,4 @@ fi
%dir %{_sharedstatedir}/cloud
# Python code is here...
-%{python_sitelib}/*
+%{python3_sitelib}/*
diff --git a/requirements.txt b/requirements.txt
index dd10d85d..5817da3b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -32,6 +32,3 @@ jsonpatch
# For validating cloud-config sections per schema definitions
jsonschema
-
-# For Python 2/3 compatibility
-six
diff --git a/setup.py b/setup.py
index 01a67b95..cbacf48e 100755
--- a/setup.py
+++ b/setup.py
@@ -15,6 +15,7 @@ import os
import shutil
import sys
import tempfile
+import platform
import setuptools
from setuptools.command.install import install
@@ -33,23 +34,6 @@ def is_f(p):
def is_generator(p):
return '-generator' in p
-def tiny_p(cmd, capture=True):
- # Darn python 2.6 doesn't have check_output (argggg)
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- if not capture:
- stdout = None
- stderr = None
- sp = subprocess.Popen(cmd, stdout=stdout,
- stderr=stderr, stdin=None,
- universal_newlines=True)
- (out, err) = sp.communicate()
- ret = sp.returncode
- if ret not in [0]:
- raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" %
- (cmd, ret, out, err))
- return (out, err)
-
def pkg_config_read(library, var):
fallbacks = {
@@ -60,7 +44,7 @@ def pkg_config_read(library, var):
}
cmd = ['pkg-config', '--variable=%s' % var, library]
try:
- (path, err) = tiny_p(cmd)
+ path = subprocess.check_output(cmd).decode('utf-8')
path = path.strip()
except Exception:
path = fallbacks[library][var]
@@ -82,14 +66,14 @@ def in_virtualenv():
def get_version():
cmd = [sys.executable, 'tools/read-version']
- (ver, _e) = tiny_p(cmd)
- return str(ver).strip()
+ ver = subprocess.check_output(cmd)
+ return ver.decode('utf-8').strip()
def read_requires():
cmd = [sys.executable, 'tools/read-dependencies']
- (deps, _e) = tiny_p(cmd)
- return str(deps).splitlines()
+ deps = subprocess.check_output(cmd)
+ return deps.decode('utf-8').splitlines()
def render_tmpl(template, mode=None):
@@ -117,10 +101,11 @@ def render_tmpl(template, mode=None):
bname = os.path.basename(template).rstrip(tmpl_ext)
fpath = os.path.join(tmpd, bname)
if VARIANT:
- tiny_p([sys.executable, './tools/render-cloudcfg', '--variant',
- VARIANT, template, fpath])
+ subprocess.run([sys.executable, './tools/render-cloudcfg', '--variant',
+ VARIANT, template, fpath])
else:
- tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath])
+ subprocess.run(
+ [sys.executable, './tools/render-cloudcfg', template, fpath])
if mode:
os.chmod(fpath, mode)
# return path relative to setup.py
@@ -136,6 +121,7 @@ if '--distro' in sys.argv:
INITSYS_FILES = {
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
+ 'sysvinit_netbsd': [f for f in glob('sysvinit/netbsd/*') if is_f(f)],
'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
'sysvinit_openrc': [f for f in glob('sysvinit/gentoo/*') if is_f(f)],
'sysvinit_suse': [f for f in glob('sysvinit/suse/*') if is_f(f)],
@@ -152,6 +138,7 @@ INITSYS_FILES = {
INITSYS_ROOTS = {
'sysvinit': 'etc/rc.d/init.d',
'sysvinit_freebsd': 'usr/local/etc/rc.d',
+ 'sysvinit_netbsd': 'usr/local/etc/rc.d',
'sysvinit_deb': 'etc/init.d',
'sysvinit_openrc': 'etc/init.d',
'sysvinit_suse': 'etc/init.d',
@@ -228,7 +215,7 @@ class InitsysInstallData(install):
if self.init_system and isinstance(self.init_system, str):
self.init_system = self.init_system.split(",")
- if len(self.init_system) == 0:
+ if len(self.init_system) == 0 and not platform.system().endswith('BSD'):
self.init_system = ['systemd']
bad = [f for f in self.init_system if f not in INITSYS_TYPES]
@@ -272,7 +259,7 @@ data_files = [
(USR + '/share/doc/cloud-init/examples/seed',
[f for f in glob('doc/examples/seed/*') if is_f(f)]),
]
-if os.uname()[0] != 'FreeBSD':
+if not platform.system().endswith('BSD'):
data_files.extend([
(ETC + '/NetworkManager/dispatcher.d/',
['tools/hook-network-manager']),
diff --git a/sysvinit/freebsd/cloudconfig b/sysvinit/freebsd/cloudconfig
index e4064fa3..fb604f4d 100755
--- a/sysvinit/freebsd/cloudconfig
+++ b/sysvinit/freebsd/cloudconfig
@@ -22,4 +22,7 @@ cloudconfig_start()
}
load_rc_config $name
+
+: ${cloudconfig_enable="NO"}
+
run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudfinal b/sysvinit/freebsd/cloudfinal
index b6894c39..72047653 100755
--- a/sysvinit/freebsd/cloudfinal
+++ b/sysvinit/freebsd/cloudfinal
@@ -22,4 +22,7 @@ cloudfinal_start()
}
load_rc_config $name
+
+: ${cloudfinal_enable="NO"}
+
run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
index 6bf7fa5b..aa5bd118 100755
--- a/sysvinit/freebsd/cloudinit
+++ b/sysvinit/freebsd/cloudinit
@@ -1,7 +1,7 @@
#!/bin/sh
# PROVIDE: cloudinit
-# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal devd
+# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd
# BEFORE: cloudconfig cloudfinal
. /etc/rc.subr
@@ -22,4 +22,7 @@ cloudinit_start()
}
load_rc_config $name
+
+: ${cloudinit_enable="NO"}
+
run_rc_command "$1"
diff --git a/sysvinit/freebsd/cloudinitlocal b/sysvinit/freebsd/cloudinitlocal
index 7a034b3b..cb67b4a2 100755
--- a/sysvinit/freebsd/cloudinitlocal
+++ b/sysvinit/freebsd/cloudinitlocal
@@ -22,4 +22,7 @@ cloudlocal_start()
}
load_rc_config $name
+
+: ${cloudinitlocal_enable="NO"}
+
run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudconfig b/sysvinit/netbsd/cloudconfig
new file mode 100755
index 00000000..5cd7eb31
--- /dev/null
+++ b/sysvinit/netbsd/cloudconfig
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# PROVIDE: cloudconfig
+# REQUIRE: cloudinit
+# BEFORE: sshd
+
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinit"
+start_cmd="start_cloud_init"
+start_cloud_init()
+{
+ /usr/pkg/bin/cloud-init modules --mode config
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudfinal b/sysvinit/netbsd/cloudfinal
new file mode 100755
index 00000000..72f3e472
--- /dev/null
+++ b/sysvinit/netbsd/cloudfinal
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# PROVIDE: cloudfinal
+# REQUIRE: LOGIN cloudconfig
+
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinit"
+start_cmd="start_cloud_init"
+start_cloud_init()
+{
+ /usr/pkg/bin/cloud-init modules --mode final
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudinit b/sysvinit/netbsd/cloudinit
new file mode 100755
index 00000000..266afc2a
--- /dev/null
+++ b/sysvinit/netbsd/cloudinit
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+# PROVIDE: cloudinit
+# REQUIRE: cloudinitlocal
+
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinit"
+start_cmd="start_cloud_init"
+start_cloud_init()
+{
+ /usr/pkg/bin/cloud-init init
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/sysvinit/netbsd/cloudinitlocal b/sysvinit/netbsd/cloudinitlocal
new file mode 100755
index 00000000..1f30e70b
--- /dev/null
+++ b/sysvinit/netbsd/cloudinitlocal
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# PROVIDE: cloudinitlocal
+# REQUIRE: NETWORKING
+
+# After NETWORKING because we don't want staticroute to wipe
+# the route set by the DHCP client toward the meta-data server.
+$_rc_subr_loaded . /etc/rc.subr
+
+name="cloudinitlocal"
+start_cmd="start_cloud_init_local"
+start_cloud_init_local()
+{
+ /usr/pkg/bin/cloud-init init -l
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/templates/hosts.suse.tmpl b/templates/hosts.suse.tmpl
index 8e664dbf..5d7953f0 100644
--- a/templates/hosts.suse.tmpl
+++ b/templates/hosts.suse.tmpl
@@ -13,7 +13,7 @@ you need to add the following to config:
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
# The following lines are desirable for IPv4 capable hosts
-127.0.0.1 {{fqdn}} {{hostname}}
+127.0.1.1 {{fqdn}} {{hostname}}
127.0.0.1 localhost.localdomain localhost
127.0.0.1 localhost4.localdomain4 localhost4
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
index bfae80db..f870be67 100644
--- a/templates/resolv.conf.tmpl
+++ b/templates/resolv.conf.tmpl
@@ -21,10 +21,18 @@ domain {{domain}}
sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
{% endif %}
+{#
+ Flags and options are required to be on the
+ same line preceded by "options" keyword
+#}
{% if options or flags %}
-options {% for flag in flags %}{{flag}} {% endfor %}
-{% for key, value in options.items() -%}
- {{key}}:{{value}}
+options
+{%- for flag in flags %}
+ {{flag-}}
+{% endfor %}
+
+{%- for key, value in options.items()|sort %}
+ {{key}}:{{value-}}
{% endfor %}
{% endif %}
diff --git a/test-requirements.txt b/test-requirements.txt
index d9d41b57..0a6a04d4 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,13 +1,7 @@
# Needed generally in tests
httpretty>=0.7.1
-mock
-nose
-unittest2
-coverage
-
-# Only needed if you want to know the test times
-# nose-timer
+pytest
+pytest-cov
# Only really needed on older versions of python
-contextlib2
setuptools
diff --git a/tests/cloud_tests/__init__.py b/tests/cloud_tests/__init__.py
index dd436989..6c632f99 100644
--- a/tests/cloud_tests/__init__.py
+++ b/tests/cloud_tests/__init__.py
@@ -22,7 +22,8 @@ def _initialize_logging():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
- '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ '%(asctime)s - %(pathname)s:%(funcName)s:%(lineno)s '
+ '[%(levelname)s]: %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
diff --git a/tests/cloud_tests/config.py b/tests/cloud_tests/config.py
index 8bd569fd..06536edc 100644
--- a/tests/cloud_tests/config.py
+++ b/tests/cloud_tests/config.py
@@ -114,7 +114,7 @@ def load_os_config(platform_name, os_name, require_enabled=False,
feature_conf = main_conf['features']
feature_groups = conf.get('feature_groups', [])
overrides = merge_config(get(conf, 'features'), feature_overrides)
- conf['arch'] = c_util.get_architecture()
+ conf['arch'] = c_util.get_dpkg_architecture()
conf['features'] = merge_feature_groups(
feature_conf, feature_groups, overrides)
diff --git a/tests/cloud_tests/platforms/__init__.py b/tests/cloud_tests/platforms/__init__.py
index 6a410b84..e506baa0 100644
--- a/tests/cloud_tests/platforms/__init__.py
+++ b/tests/cloud_tests/platforms/__init__.py
@@ -6,6 +6,7 @@ from .ec2 import platform as ec2
from .lxd import platform as lxd
from .nocloudkvm import platform as nocloudkvm
from .azurecloud import platform as azurecloud
+from ..util import emit_dots_on_travis
PLATFORMS = {
'ec2': ec2.EC2Platform,
@@ -17,7 +18,8 @@ PLATFORMS = {
def get_image(platform, config):
"""Get image from platform object using os_name."""
- return platform.get_image(config)
+ with emit_dots_on_travis():
+ return platform.get_image(config)
def get_instance(snapshot, *args, **kwargs):
diff --git a/tests/cloud_tests/platforms/azurecloud/image.py b/tests/cloud_tests/platforms/azurecloud/image.py
index 96a946f3..aad2bca1 100644
--- a/tests/cloud_tests/platforms/azurecloud/image.py
+++ b/tests/cloud_tests/platforms/azurecloud/image.py
@@ -21,26 +21,26 @@ class AzureCloudImage(Image):
@param image_id: image id used to boot instance
"""
super(AzureCloudImage, self).__init__(platform, config)
- self.image_id = image_id
self._img_instance = None
+ self.image_id = image_id
@property
def _instance(self):
"""Internal use only, returns a running instance"""
- LOG.debug('creating instance')
if not self._img_instance:
self._img_instance = self.platform.create_instance(
self.properties, self.config, self.features,
self.image_id, user_data=None)
+ self._img_instance.start(wait=True, wait_for_cloud_init=True)
return self._img_instance
def destroy(self):
"""Delete the instance used to create a custom image."""
- LOG.debug('deleting VM that was used to create image')
if self._img_instance:
- LOG.debug('Deleting instance %s', self._img_instance.name)
+ LOG.debug('Deleting backing instance %s',
+ self._img_instance.vm_name)
delete_vm = self.platform.compute_client.virtual_machines.delete(
- self.platform.resource_group.name, self.image_id)
+ self.platform.resource_group.name, self._img_instance.vm_name)
delete_vm.wait()
super(AzureCloudImage, self).destroy()
@@ -48,7 +48,7 @@ class AzureCloudImage(Image):
def _execute(self, *args, **kwargs):
"""Execute command in image, modifying image."""
LOG.debug('executing commands on image')
- self._instance.start()
+ self._instance.start(wait=True)
return self._instance._execute(*args, **kwargs)
def push_file(self, local_path, remote_path):
@@ -72,21 +72,26 @@ class AzureCloudImage(Image):
Otherwise runs the clean script, deallocates, generalizes
and creates custom image from instance.
"""
- LOG.debug('creating image from VM')
+ LOG.debug('creating snapshot of image')
if not self._img_instance:
+ LOG.debug('No existing image, snapshotting base image')
return AzureCloudSnapshot(self.platform, self.properties,
self.config, self.features,
- self.image_id, delete_on_destroy=False)
+ self._instance.vm_name,
+ delete_on_destroy=False)
+ LOG.debug('creating snapshot from instance: %s', self._img_instance)
if self.config.get('boot_clean_script'):
self._img_instance.run_script(self.config.get('boot_clean_script'))
+ LOG.debug('deallocating instance %s', self._instance.vm_name)
deallocate = self.platform.compute_client.virtual_machines.deallocate(
- self.platform.resource_group.name, self.image_id)
+ self.platform.resource_group.name, self._instance.vm_name)
deallocate.wait()
+ LOG.debug('generalizing instance %s', self._instance.vm_name)
self.platform.compute_client.virtual_machines.generalize(
- self.platform.resource_group.name, self.image_id)
+ self.platform.resource_group.name, self._instance.vm_name)
image_params = {
"location": self.platform.location,
@@ -96,13 +101,16 @@ class AzureCloudImage(Image):
}
}
}
+ LOG.debug('updating resource group image %s', self._instance.vm_name)
self.platform.compute_client.images.create_or_update(
- self.platform.resource_group.name, self.image_id,
+ self.platform.resource_group.name, self._instance.vm_name,
image_params)
+ LOG.debug('destroying self')
self.destroy()
+ LOG.debug('snapshot complete')
return AzureCloudSnapshot(self.platform, self.properties, self.config,
- self.features, self.image_id)
+ self.features, self._instance.vm_name)
# vi: ts=4 expandtab
diff --git a/tests/cloud_tests/platforms/azurecloud/instance.py b/tests/cloud_tests/platforms/azurecloud/instance.py
index 3d77a1a7..f1e28a96 100644
--- a/tests/cloud_tests/platforms/azurecloud/instance.py
+++ b/tests/cloud_tests/platforms/azurecloud/instance.py
@@ -41,6 +41,7 @@ class AzureCloudInstance(Instance):
self.ssh_ip = None
self.instance = None
self.image_id = image_id
+ self.vm_name = 'ci-azure-i-%s' % self.platform.tag
self.user_data = user_data
self.ssh_key_file = os.path.join(
platform.config['data_dir'], platform.config['private_key'])
@@ -74,16 +75,18 @@ class AzureCloudInstance(Instance):
self.image_id
)
image_exists = True
- LOG.debug('image found, launching instance')
+ LOG.debug('image found, launching instance, image_id=%s',
+ self.image_id)
except CloudError:
- LOG.debug(
- 'image not found, launching instance with base image')
+ LOG.debug(('image not found, launching instance with base image, '
+ 'image_id=%s'), self.image_id)
pass
vm_params = {
+ 'name': self.vm_name,
'location': self.platform.location,
'os_profile': {
- 'computer_name': 'CI',
+ 'computer_name': 'CI-%s' % self.platform.tag,
'admin_username': self.ssh_username,
"customData": self.user_data,
"linuxConfiguration": {
@@ -129,7 +132,9 @@ class AzureCloudInstance(Instance):
try:
self.instance = self.platform.compute_client.virtual_machines.\
create_or_update(self.platform.resource_group.name,
- self.image_id, vm_params)
+ self.vm_name, vm_params)
+ LOG.debug('creating instance %s from image_id=%s', self.vm_name,
+ self.image_id)
except CloudError:
raise RuntimeError('failed creating instance:\n{}'.format(
traceback.format_exc()))
diff --git a/tests/cloud_tests/platforms/azurecloud/platform.py b/tests/cloud_tests/platforms/azurecloud/platform.py
index 77f159eb..cb62a74b 100644
--- a/tests/cloud_tests/platforms/azurecloud/platform.py
+++ b/tests/cloud_tests/platforms/azurecloud/platform.py
@@ -74,8 +74,9 @@ class AzureCloudPlatform(Platform):
@param user_data: test user-data to pass to instance
@return_value: cloud_tests.instances instance
"""
- user_data = str(base64.b64encode(
- user_data.encode('utf-8')), 'utf-8')
+ if user_data is not None:
+ user_data = str(base64.b64encode(
+ user_data.encode('utf-8')), 'utf-8')
return AzureCloudInstance(self, properties, config, features,
image_id, user_data)
diff --git a/tests/cloud_tests/platforms/nocloudkvm/platform.py b/tests/cloud_tests/platforms/nocloudkvm/platform.py
index 85933463..2d1480f5 100644
--- a/tests/cloud_tests/platforms/nocloudkvm/platform.py
+++ b/tests/cloud_tests/platforms/nocloudkvm/platform.py
@@ -29,9 +29,13 @@ class NoCloudKVMPlatform(Platform):
"""
(url, path) = s_util.path_from_mirror_url(img_conf['mirror_url'], None)
- filter = filters.get_filters(['arch=%s' % c_util.get_architecture(),
- 'release=%s' % img_conf['release'],
- 'ftype=disk1.img'])
+ filter = filters.get_filters(
+ [
+ 'arch=%s' % c_util.get_dpkg_architecture(),
+ 'release=%s' % img_conf['release'],
+ 'ftype=disk1.img',
+ ]
+ )
mirror_config = {'filters': filter,
'keep_items': False,
'max_items': 1,
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index 7ddc5b85..187f3ac3 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -30,8 +30,10 @@ default_release_config:
mirror_url: https://cloud-images.ubuntu.com/daily
mirror_dir: '/srv/citest/images'
keyring: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
- # The OS version formatted as Major.Minor is used to compare releases
- version: null # Each release needs to define this, for example 16.04
+ # The OS version formatted as Major.Minor is used to compare releases.
+ # Each release needs to define this, for example "16.04". Quoting is
+ # necessary to ensure the version is treated as a string.
+ version: null
ec2:
# Choose from: [ebs, instance-store]
@@ -131,12 +133,28 @@ features:
releases:
# UBUNTU =================================================================
+ focal:
+ # EOL: Apr 2025
+ default:
+ enabled: true
+ release: focal
+ version: "20.04"
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: focal
+ setup_overrides: null
+ override_templates: false
eoan:
# EOL: Jul 2020
default:
enabled: true
release: eoan
- version: 19.10
+ version: "19.10"
os: ubuntu
feature_groups:
- base
@@ -152,7 +170,7 @@ releases:
default:
enabled: true
release: disco
- version: 19.04
+ version: "19.04"
os: ubuntu
feature_groups:
- base
@@ -168,7 +186,7 @@ releases:
default:
enabled: true
release: cosmic
- version: 18.10
+ version: "18.10"
os: ubuntu
feature_groups:
- base
@@ -184,7 +202,7 @@ releases:
default:
enabled: true
release: bionic
- version: 18.04
+ version: "18.04"
os: ubuntu
feature_groups:
- base
@@ -200,7 +218,7 @@ releases:
default:
enabled: true
release: artful
- version: 17.10
+ version: "17.10"
os: ubuntu
feature_groups:
- base
@@ -216,7 +234,7 @@ releases:
default:
enabled: true
release: xenial
- version: 16.04
+ version: "16.04"
os: ubuntu
feature_groups:
- base
@@ -232,7 +250,7 @@ releases:
default:
enabled: true
release: trusty
- version: 14.04
+ version: "14.04"
os: ubuntu
feature_groups:
- base
diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py
index a8aaba15..69e66e3f 100644
--- a/tests/cloud_tests/setup_image.py
+++ b/tests/cloud_tests/setup_image.py
@@ -229,7 +229,7 @@ def setup_image(args, image):
except Exception as e:
info = "N/A (%s)" % e
- LOG.info('setting up %s (%s)', image, info)
+ LOG.info('setting up image %s (info %s)', image, info)
res = stage.run_stage(
'set up for {}'.format(image), calls, continue_after_error=False)
return res
diff --git a/tests/cloud_tests/testcases/__init__.py b/tests/cloud_tests/testcases/__init__.py
index 6bb39f77..e8c371ca 100644
--- a/tests/cloud_tests/testcases/__init__.py
+++ b/tests/cloud_tests/testcases/__init__.py
@@ -4,7 +4,7 @@
import importlib
import inspect
-import unittest2
+import unittest
from cloudinit.util import read_conf
@@ -48,7 +48,7 @@ def get_test_class(test_name, test_data, test_conf):
def __str__(self):
return "%s (%s)" % (self._testMethodName,
- unittest2.util.strclass(self._realclass))
+ unittest.util.strclass(self._realclass))
@classmethod
def setUpClass(cls):
@@ -62,9 +62,9 @@ def get_suite(test_name, data, conf):
@return_value: a test suite
"""
- suite = unittest2.TestSuite()
+ suite = unittest.TestSuite()
suite.addTest(
- unittest2.defaultTestLoader.loadTestsFromTestCase(
+ unittest.defaultTestLoader.loadTestsFromTestCase(
get_test_class(test_name, data, conf)))
return suite
diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py
index fd12d87b..7b67f54e 100644
--- a/tests/cloud_tests/testcases/base.py
+++ b/tests/cloud_tests/testcases/base.py
@@ -5,15 +5,15 @@
import crypt
import json
import re
-import unittest2
+import unittest
from cloudinit import util as c_util
-SkipTest = unittest2.SkipTest
+SkipTest = unittest.SkipTest
-class CloudTestCase(unittest2.TestCase):
+class CloudTestCase(unittest.TestCase):
"""Base test class for verifiers."""
# data gets populated in get_suite.setUpClass
@@ -172,9 +172,7 @@ class CloudTestCase(unittest2.TestCase):
'Skipping instance-data.json test.'
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
- self.assertItemsEqual(
- [],
- instance_data['base64_encoded_keys'])
+ self.assertCountEqual(['merged_cfg'], instance_data['sensitive_keys'])
ds = instance_data.get('ds', {})
v1_data = instance_data.get('v1', {})
metadata = ds.get('meta-data', {})
@@ -201,6 +199,23 @@ class CloudTestCase(unittest2.TestCase):
self.assertIn('i-', v1_data['instance_id'])
self.assertIn('ip-', v1_data['local_hostname'])
self.assertIsNotNone(v1_data['region'], 'expected ec2 region')
+ self.assertIsNotNone(
+ re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release']))
+ self.assertEqual(
+ 'redacted for non-root user', instance_data['merged_cfg'])
+ self.assertEqual(self.os_cfg['os'], v1_data['variant'])
+ self.assertEqual(self.os_cfg['os'], v1_data['distro'])
+ self.assertEqual(
+ self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
+ "Unexpected sys_info dist value")
+ self.assertEqual(self.os_name, v1_data['distro_release'])
+ self.assertEqual(
+ str(self.os_cfg['version']), v1_data['distro_version'])
+ self.assertEqual('x86_64', v1_data['machine'])
+ self.assertIsNotNone(
+ re.match(r'3.\d\.\d', v1_data['python_version']),
+ "unexpected python version: {ver}".format(
+ ver=v1_data["python_version"]))
def test_instance_data_json_lxd(self):
"""Validate instance-data.json content by lxd platform.
@@ -222,7 +237,7 @@ class CloudTestCase(unittest2.TestCase):
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
v1_data = instance_data.get('v1', {})
- self.assertItemsEqual([], sorted(instance_data['base64_encoded_keys']))
+ self.assertCountEqual([], sorted(instance_data['base64_encoded_keys']))
self.assertEqual('unknown', v1_data['cloud_name'])
self.assertEqual('lxd', v1_data['platform'])
self.assertEqual(
@@ -237,6 +252,23 @@ class CloudTestCase(unittest2.TestCase):
self.assertIsNone(
v1_data['region'],
'found unexpected lxd region %s' % v1_data['region'])
+ self.assertIsNotNone(
+ re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
+ self.assertEqual(
+ 'redacted for non-root user', instance_data['merged_cfg'])
+ self.assertEqual(self.os_cfg['os'], v1_data['variant'])
+ self.assertEqual(self.os_cfg['os'], v1_data['distro'])
+ self.assertEqual(
+ self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
+ "Unexpected sys_info dist value")
+ self.assertEqual(self.os_name, v1_data['distro_release'])
+ self.assertEqual(
+ str(self.os_cfg['version']), v1_data['distro_version'])
+ self.assertEqual('x86_64', v1_data['machine'])
+ self.assertIsNotNone(
+ re.match(r'3.\d\.\d', v1_data['python_version']),
+ "unexpected python version: {ver}".format(
+ ver=v1_data["python_version"]))
def test_instance_data_json_kvm(self):
"""Validate instance-data.json content by nocloud-kvm platform.
@@ -259,7 +291,7 @@ class CloudTestCase(unittest2.TestCase):
' OS: %s not bionic or newer' % self.os_name)
instance_data = json.loads(out)
v1_data = instance_data.get('v1', {})
- self.assertItemsEqual([], instance_data['base64_encoded_keys'])
+ self.assertCountEqual([], instance_data['base64_encoded_keys'])
self.assertEqual('unknown', v1_data['cloud_name'])
self.assertEqual('nocloud', v1_data['platform'])
subplatform = v1_data['subplatform']
@@ -278,6 +310,23 @@ class CloudTestCase(unittest2.TestCase):
self.assertIsNone(
v1_data['region'],
'found unexpected lxd region %s' % v1_data['region'])
+ self.assertIsNotNone(
+ re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release']))
+ self.assertEqual(
+ 'redacted for non-root user', instance_data['merged_cfg'])
+ self.assertEqual(self.os_cfg['os'], v1_data['variant'])
+ self.assertEqual(self.os_cfg['os'], v1_data['distro'])
+ self.assertEqual(
+ self.os_cfg['os'], instance_data["sys_info"]['dist'][0],
+ "Unexpected sys_info dist value")
+ self.assertEqual(self.os_name, v1_data['distro_release'])
+ self.assertEqual(
+ str(self.os_cfg['version']), v1_data['distro_version'])
+ self.assertEqual('x86_64', v1_data['machine'])
+ self.assertIsNotNone(
+ re.match(r'3.\d\.\d', v1_data['python_version']),
+ "unexpected python version: {ver}".format(
+ ver=v1_data["python_version"]))
class PasswordListTest(CloudTestCase):
diff --git a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
index 0bec305e..68ca95b5 100644
--- a/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
+++ b/tests/cloud_tests/testcases/examples/install_run_chef_recipes.yaml
@@ -8,43 +8,44 @@ cloud_config: |
#cloud-config
# Key from https://packages.chef.io/chef.asc
apt:
- source1:
- source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.12 (Darwin)
- Comment: GPGTools - http://gpgtools.org
+ sources:
+ source1:
+ source: "deb http://packages.chef.io/repos/apt/stable $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.12 (Darwin)
+ Comment: GPGTools - http://gpgtools.org
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
- PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
- CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
- AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
- Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
- SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
- OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
- Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
- IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
- twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
- DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
- WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
- 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
- dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
- MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
- 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
- zA==
- =IxPr
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg
+ PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK
+ CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid
+ AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd
+ Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz
+ SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK
+ OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/
+ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY
+ IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu
+ twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8
+ DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE
+ WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS
+ 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA
+ dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC
+ MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD
+ 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K
+ zA==
+ =IxPr
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py
index 0f4c3d08..7d341773 100644
--- a/tests/cloud_tests/testcases/modules/ntp_chrony.py
+++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""cloud-init Integration Test Verify Script."""
-import unittest2
+import unittest
from tests.cloud_tests.testcases import base
@@ -13,7 +13,7 @@ class TestNtpChrony(base.CloudTestCase):
"""Skip this suite of tests on lxd and artful or older."""
if self.platform == 'lxd':
if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0:
- raise unittest2.SkipTest(
+ raise unittest.SkipTest(
'No support for chrony on containers <= artful.'
' LP: #1589780')
return super(TestNtpChrony, self).setUp()
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 06f7d865..e65771b1 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -5,6 +5,7 @@
import base64
import copy
import glob
+import multiprocessing
import os
import random
import shlex
@@ -12,7 +13,9 @@ import shutil
import string
import subprocess
import tempfile
+import time
import yaml
+from contextlib import contextmanager
from cloudinit import util as c_util
from tests.cloud_tests import LOG
@@ -118,6 +121,36 @@ def current_verbosity():
return max(min(3 - int(LOG.level / 10), 2), 0)
+@contextmanager
+def emit_dots_on_travis():
+ """
+ A context manager that emits a dot every 10 seconds if running on Travis.
+
+ Travis will kill jobs that don't emit output for a certain amount of time.
+ This context manager spins up a background process which will emit a dot to
+ stdout every 10 seconds to avoid being killed.
+
+ It should be wrapped selectively around operations that are known to take a
+ long time.
+ """
+ if os.environ.get('TRAVIS') != "true":
+ # If we aren't on Travis, don't do anything.
+ yield
+ return
+
+ def emit_dots():
+ while True:
+ print(".")
+ time.sleep(10)
+
+ dot_process = multiprocessing.Process(target=emit_dots)
+ dot_process.start()
+ try:
+ yield
+ finally:
+ dot_process.terminate()
+
+
def is_writable_dir(path):
"""Make sure dir is writable.
diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py
index 7018f4d5..0295af40 100644
--- a/tests/cloud_tests/verify.py
+++ b/tests/cloud_tests/verify.py
@@ -3,7 +3,7 @@
"""Verify test results."""
import os
-import unittest2
+import unittest
from tests.cloud_tests import (config, LOG, util, testcases)
@@ -18,7 +18,7 @@ def verify_data(data_dir, platform, os_name, tests):
@return_value: {<test_name>: {passed: True/False, failures: []}}
"""
base_dir = os.sep.join((data_dir, platform, os_name))
- runner = unittest2.TextTestRunner(verbosity=util.current_verbosity())
+ runner = unittest.TextTestRunner(verbosity=util.current_verbosity())
res = {}
for test_name in tests:
LOG.debug('verifying test data for %s', test_name)
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index b92ffc79..9045e743 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -109,7 +109,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
self.paths, sub_handlers=[script_handler, cloudconfig_handler])
- self.assertItemsEqual(
+ self.assertCountEqual(
['text/cloud-config', 'text/cloud-config-jsonp',
'text/x-shellscript'],
h.sub_handlers)
@@ -120,7 +120,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
cloudconfig_handler = CloudConfigPartHandler(self.paths)
h = JinjaTemplatePartHandler(
self.paths, sub_handlers=[script_handler, cloudconfig_handler])
- self.assertItemsEqual(
+ self.assertCountEqual(
['text/cloud-config', 'text/cloud-config-jsonp',
'text/x-shellscript'],
h.sub_handlers)
@@ -302,7 +302,7 @@ class TestConvertJinjaInstanceData(CiTestCase):
expected_data.update({'v1key1': 'v1.1', 'v2key1': 'v2.1'})
converted_data = convert_jinja_instance_data(data=data)
- self.assertItemsEqual(
+ self.assertCountEqual(
['ds', 'v1', 'v2', 'v1key1', 'v2key1'], converted_data.keys())
self.assertEqual(
expected_data,
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index d283f136..43d996b9 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -1,8 +1,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from collections import namedtuple
import os
-import six
+import io
+from collections import namedtuple
from cloudinit.cmd import main as cli
from cloudinit.tests import helpers as test_helpers
@@ -18,7 +18,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def setUp(self):
super(TestCLI, self).setUp()
- self.stderr = six.StringIO()
+ self.stderr = io.StringIO()
self.patchStdoutAndStderr(stderr=self.stderr)
def _call_main(self, sysv_args=None):
@@ -147,7 +147,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def test_conditional_subcommands_from_entry_point_sys_argv(self):
"""Subcommands from entry-point are properly parsed from sys.argv."""
- stdout = six.StringIO()
+ stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
expected_errors = [
@@ -178,7 +178,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def test_collect_logs_subcommand_parser(self):
"""The subcommand cloud-init collect-logs calls the subparser."""
# Provide -h param to collect-logs to avoid having to mock behavior.
- stdout = six.StringIO()
+ stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
self._call_main(['cloud-init', 'collect-logs', '-h'])
self.assertIn('usage: cloud-init collect-log', stdout.getvalue())
@@ -186,7 +186,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def test_clean_subcommand_parser(self):
"""The subcommand cloud-init clean calls the subparser."""
# Provide -h param to clean to avoid having to mock behavior.
- stdout = six.StringIO()
+ stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
self._call_main(['cloud-init', 'clean', '-h'])
self.assertIn('usage: cloud-init clean', stdout.getvalue())
@@ -194,7 +194,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
def test_status_subcommand_parser(self):
"""The subcommand cloud-init status calls the subparser."""
# Provide -h param to clean to avoid having to mock behavior.
- stdout = six.StringIO()
+ stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
self._call_main(['cloud-init', 'status', '-h'])
self.assertIn('usage: cloud-init status', stdout.getvalue())
@@ -214,14 +214,14 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self.assertEqual(1, exit_code)
# Known whitebox output from schema subcommand
self.assertEqual(
- 'Expected either --config-file argument or --doc\n',
+ 'Expected either --config-file argument or --docs\n',
self.stderr.getvalue())
def test_wb_devel_schema_subcommand_doc_content(self):
"""Validate that doc content is sane from known examples."""
- stdout = six.StringIO()
+ stdout = io.StringIO()
self.patchStdoutAndStderr(stdout=stdout)
- self._call_main(['cloud-init', 'devel', 'schema', '--doc'])
+ self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
expected_doc_sections = [
'**Supported distros:** all',
'**Supported distros:** centos, debian, fedora',
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index 2a1095b9..bfd07ecf 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -1,7 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
from cloudinit.tests import helpers as test_helpers
from cloudinit.cs_utils import Cepko
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index e55feb22..a4261609 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -5,13 +5,8 @@
import gzip
import logging
import os
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-
-from six import BytesIO, StringIO
+from io import BytesIO, StringIO
+from unittest import mock
from email import encoders
from email.mime.application import MIMEApplication
@@ -218,6 +213,40 @@ c: d
self.assertEqual(1, len(cc))
self.assertEqual('c', cc['a'])
+ def test_cloud_config_as_x_shell_script(self):
+ blob_cc = '''
+#cloud-config
+a: b
+c: d
+'''
+ message_cc = MIMEBase("text", "x-shellscript")
+ message_cc.set_payload(blob_cc)
+
+ blob_jp = '''
+#cloud-config-jsonp
+[
+ { "op": "replace", "path": "/a", "value": "c" },
+ { "op": "remove", "path": "/c" }
+]
+'''
+
+ message_jp = MIMEBase('text', "cloud-config-jsonp")
+ message_jp.set_payload(blob_jp)
+
+ message = MIMEMultipart()
+ message.attach(message_cc)
+ message.attach(message_jp)
+
+ self.reRoot()
+ ci = stages.Init()
+ ci.datasource = FakeDataSource(str(message))
+ ci.fetch()
+ ci.consume_data()
+ cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
+ cc = util.load_yaml(cc_contents)
+ self.assertEqual(1, len(cc))
+ self.assertEqual('c', cc['a'])
+
def test_vendor_user_yaml_cloud_config(self):
vendor_blob = '''
#cloud-config
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
index e9213ca1..1e66fcdb 100644
--- a/tests/unittests/test_datasource/test_aliyun.py
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -2,8 +2,8 @@
import functools
import httpretty
-import mock
import os
+from unittest import mock
from cloudinit import helpers
from cloudinit.sources import DataSourceAliYun as ay
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index a809fd87..2f8561cb 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -383,8 +383,6 @@ class TestGetMetadataFromIMDS(HttprettyTestCase):
class TestAzureDataSource(CiTestCase):
- with_logs = True
-
def setUp(self):
super(TestAzureDataSource, self).setUp()
self.tmp = self.tmp_dir()
@@ -530,14 +528,14 @@ scbus-1 on xpt0 bus 0
def tags_exists(x, y):
for tag in x.keys():
- self.assertIn(tag, y)
+ assert tag in y
for tag in y.keys():
- self.assertIn(tag, x)
+ assert tag in x
def tags_equal(x, y):
for x_val in x.values():
y_val = y.get(x_val.tag)
- self.assertEqual(x_val.text, y_val.text)
+ assert x_val.text == y_val.text
old_cnt = create_tag_index(oxml)
new_cnt = create_tag_index(nxml)
@@ -651,7 +649,7 @@ scbus-1 on xpt0 bus 0
crawled_metadata = dsrc.crawl_metadata()
- self.assertItemsEqual(
+ self.assertCountEqual(
crawled_metadata.keys(),
['cfg', 'files', 'metadata', 'userdata_raw'])
self.assertEqual(crawled_metadata['cfg'], expected_cfg)
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 007df09f..6344b0bb 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-import unittest2
+import unittest
from textwrap import dedent
from cloudinit.sources.helpers import azure as azure_helper
@@ -332,7 +332,7 @@ class TestOpenSSLManagerActions(CiTestCase):
path = 'tests/data/azure'
return os.path.join(path, name)
- @unittest2.skip("todo move to cloud_test")
+ @unittest.skip("todo move to cloud_test")
def test_pubkey_extract(self):
cert = load_file(self._data_file('pubkey_extract_cert'))
good_key = load_file(self._data_file('pubkey_extract_ssh_key'))
@@ -344,7 +344,7 @@ class TestOpenSSLManagerActions(CiTestCase):
fingerprint = sslmgr._get_fingerprint_from_cert(cert)
self.assertEqual(good_fingerprint, fingerprint)
- @unittest2.skip("todo move to cloud_test")
+ @unittest.skip("todo move to cloud_test")
@mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml')
def test_parse_certificates(self, mock_decrypt_certs):
"""Azure control plane puts private keys as well as certificates
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
index d62d542b..7aa3b1d1 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/test_datasource/test_cloudsigma.py
@@ -3,6 +3,7 @@
import copy
from cloudinit.cs_utils import Cepko
+from cloudinit import distros
from cloudinit import helpers
from cloudinit import sources
from cloudinit.sources import DataSourceCloudSigma
@@ -47,8 +48,11 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
self.paths = helpers.Paths({'run_dir': self.tmp_dir()})
self.add_patch(DS_PATH + '.is_running_in_cloudsigma',
"m_is_container", return_value=True)
+
+ distro_cls = distros.fetch("ubuntu")
+ distro = distro_cls("ubuntu", cfg={}, paths=self.paths)
self.datasource = DataSourceCloudSigma.DataSourceCloudSigma(
- "", "", paths=self.paths)
+ sys_cfg={}, distro=distro, paths=self.paths)
self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
def test_get_hostname(self):
diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py
index 34a089f2..d413d1cd 100644
--- a/tests/unittests/test_datasource/test_ec2.py
+++ b/tests/unittests/test_datasource/test_ec2.py
@@ -3,7 +3,8 @@
import copy
import httpretty
import json
-import mock
+import requests
+from unittest import mock
from cloudinit import helpers
from cloudinit.sources import DataSourceEc2 as ec2
@@ -37,6 +38,8 @@ DYNAMIC_METADATA = {
# python3 -c 'import json
# from cloudinit.ec2_utils import get_instance_metadata as gm
# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))'
+# Note that the MAC addresses have been modified to sort in the opposite order
+# to the device-number attribute, to test LP: #1876312
DEFAULT_METADATA = {
"ami-id": "ami-8b92b4ee",
"ami-launch-index": "0",
@@ -76,7 +79,7 @@ DEFAULT_METADATA = {
"vpc-ipv4-cidr-blocks": "172.31.0.0/16",
"vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56"
},
- "06:17:04:d7:26:0A": {
+ "06:17:04:d7:26:08": {
"device-number": "1", # Only IPv4 local config
"interface-id": "eni-e44ef49f",
"ipv4-associations": {"": "172.3.3.16"},
@@ -84,7 +87,7 @@ DEFAULT_METADATA = {
"local-hostname": ("ip-172-3-3-16.us-east-2."
"compute.internal"),
"local-ipv4s": "172.3.3.16",
- "mac": "06:17:04:d7:26:0A",
+ "mac": "06:17:04:d7:26:08",
"owner-id": "950047163771",
"public-hostname": ("ec2-172-3-3-16.us-east-2."
"compute.amazonaws.com"),
@@ -112,6 +115,122 @@ DEFAULT_METADATA = {
"services": {"domain": "amazonaws.com", "partition": "aws"},
}
+# collected from api version 2018-09-24/ with
+# python3 -c 'import json
+# from cloudinit.ec2_utils import get_instance_metadata as gm
+# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))'
+
+NIC1_MD_IPV4_IPV6_MULTI_IP = {
+ "device-number": "0",
+ "interface-id": "eni-0d6335689899ce9cc",
+ "ipv4-associations": {
+ "18.218.219.181": "172.31.44.13"
+ },
+ "ipv6s": [
+ "2600:1f16:292:100:c187:593c:4349:136",
+ "2600:1f16:292:100:f153:12a3:c37c:11f9",
+ "2600:1f16:292:100:f152:2222:3333:4444"
+ ],
+ "local-hostname": ("ip-172-31-44-13.us-east-2."
+ "compute.internal"),
+ "local-ipv4s": [
+ "172.31.44.13",
+ "172.31.45.70"
+ ],
+ "mac": "0a:07:84:3d:6e:38",
+ "owner-id": "329910648901",
+ "public-hostname": ("ec2-18-218-219-181.us-east-2."
+ "compute.amazonaws.com"),
+ "public-ipv4s": "18.218.219.181",
+ "security-group-ids": "sg-0c387755222ba8d2e",
+ "security-groups": "launch-wizard-4",
+ "subnet-id": "subnet-9d7ba0d1",
+ "subnet-ipv4-cidr-block": "172.31.32.0/20",
+ "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64",
+ "vpc-id": "vpc-a07f62c8",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16",
+ "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56"
+}
+
+NIC2_MD = {
+ "device-number": "1",
+ "interface-id": "eni-043cdce36ded5e79f",
+ "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal",
+ "local-ipv4s": "172.31.47.221",
+ "mac": "0a:75:69:92:e2:16",
+ "owner-id": "329910648901",
+ "security-group-ids": "sg-0d68fef37d8cc9b77",
+ "security-groups": "launch-wizard-17",
+ "subnet-id": "subnet-9d7ba0d1",
+ "subnet-ipv4-cidr-block": "172.31.32.0/20",
+ "vpc-id": "vpc-a07f62c8",
+ "vpc-ipv4-cidr-block": "172.31.0.0/16",
+ "vpc-ipv4-cidr-blocks": "172.31.0.0/16"
+}
+
+SECONDARY_IP_METADATA_2018_09_24 = {
+ "ami-id": "ami-0986c2ac728528ac2",
+ "ami-launch-index": "0",
+ "ami-manifest-path": "(unknown)",
+ "block-device-mapping": {
+ "ami": "/dev/sda1",
+ "root": "/dev/sda1"
+ },
+ "events": {
+ "maintenance": {
+ "history": "[]",
+ "scheduled": "[]"
+ }
+ },
+ "hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "identity-credentials": {
+ "ec2": {
+ "info": {
+ "AccountId": "329910648901",
+ "Code": "Success",
+ "LastUpdated": "2019-07-06T14:22:56Z"
+ }
+ }
+ },
+ "instance-action": "none",
+ "instance-id": "i-069e01e8cc43732f8",
+ "instance-type": "t2.micro",
+ "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal",
+ "local-ipv4": "172.31.44.13",
+ "mac": "0a:07:84:3d:6e:38",
+ "metrics": {
+ "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
+ },
+ "network": {
+ "interfaces": {
+ "macs": {
+ "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP,
+ }
+ }
+ },
+ "placement": {
+ "availability-zone": "us-east-2c"
+ },
+ "profile": "default-hvm",
+ "public-hostname": (
+ "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"),
+ "public-ipv4": "18.218.219.181",
+ "public-keys": {
+ "yourkeyname,e": [
+ "ssh-rsa AAAAW...DZ yourkeyname"
+ ]
+ },
+ "reservation-id": "r-09b4917135cdd33be",
+ "security-groups": "launch-wizard-4",
+ "services": {
+ "domain": "amazonaws.com",
+ "partition": "aws"
+ }
+}
+
+M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.'
+
def _register_ssh_keys(rfunc, base_url, keys_data):
"""handle ssh key inconsistencies.
@@ -200,6 +319,7 @@ def register_mock_metaserver(base_url, data):
class TestEc2(test_helpers.HttprettyTestCase):
with_logs = True
+ maxDiff = None
valid_platform_data = {
'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412',
@@ -265,30 +385,23 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(instance_id_url, None)
return ds
- def test_network_config_property_returns_version_1_network_data(self):
- """network_config property returns network version 1 for metadata.
-
- Only one device is configured even when multiple exist in metadata.
- """
+ def test_network_config_property_returns_version_2_network_data(self):
+ """network_config property returns network version 2 for metadata"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
md={'md': DEFAULT_METADATA})
- find_fallback_path = (
- 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic')
+ find_fallback_path = M_PATH_NET + 'find_fallback_nic'
with mock.patch(find_fallback_path) as m_find_fallback:
m_find_fallback.return_value = 'eth9'
ds.get_data()
mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- expected = {'version': 1, 'config': [
- {'mac_address': '06:17:04:d7:26:09', 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}],
- 'type': 'physical'}]}
- patch_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac')
- get_interface_mac_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
@@ -297,30 +410,59 @@ class TestEc2(test_helpers.HttprettyTestCase):
m_get_mac.return_value = mac1
self.assertEqual(expected, ds.network_config)
- def test_network_config_property_set_dhcp4_on_private_ipv4(self):
- """network_config property configures dhcp4 on private ipv4 nics.
+ def test_network_config_property_set_dhcp4(self):
+ """network_config property configures dhcp4 on nics with local-ipv4s.
- Only one device is configured even when multiple exist in metadata.
+ Only one device is configured based on get_interfaces_by_mac even when
+ multiple MACs exist in metadata.
"""
ds = self._setup_ds(
platform_data=self.valid_platform_data,
sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
md={'md': DEFAULT_METADATA})
- find_fallback_path = (
- 'cloudinit.sources.DataSourceEc2.net.find_fallback_nic')
+ find_fallback_path = M_PATH_NET + 'find_fallback_nic'
with mock.patch(find_fallback_path) as m_find_fallback:
m_find_fallback.return_value = 'eth9'
ds.get_data()
- mac1 = '06:17:04:d7:26:0A' # IPv4 only in DEFAULT_METADATA
- expected = {'version': 1, 'config': [
- {'mac_address': '06:17:04:d7:26:0A', 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical'}]}
- patch_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac')
- get_interface_mac_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+ mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
+ with mock.patch(patch_path) as m_get_interfaces_by_mac:
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ with mock.patch(get_interface_mac_path) as m_get_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
+ m_find_fallback.return_value = 'eth9'
+ m_get_mac.return_value = mac1
+ self.assertEqual(expected, ds.network_config)
+
+ def test_network_config_property_secondary_private_ips(self):
+ """network_config property configures any secondary ipv4 addresses.
+
+ Only one device is configured based on get_interfaces_by_mac even when
+ multiple MACs exist in metadata.
+ """
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': True}}},
+ md={'md': SECONDARY_IP_METADATA_2018_09_24})
+ find_fallback_path = M_PATH_NET + 'find_fallback_nic'
+ with mock.patch(find_fallback_path) as m_find_fallback:
+ m_find_fallback.return_value = 'eth9'
+ ds.get_data()
+
+ mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': mac1}, 'set-name': 'eth9',
+ 'addresses': ['172.31.45.70/20',
+ '2600:1f16:292:100:f152:2222:3333:4444/128',
+ '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
+ 'dhcp4': True, 'dhcp6': True}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
+ get_interface_mac_path = M_PATH_NET + 'get_interface_mac'
with mock.patch(patch_path) as m_get_interfaces_by_mac:
with mock.patch(find_fallback_path) as m_find_fallback:
with mock.patch(get_interface_mac_path) as m_get_mac:
@@ -356,21 +498,18 @@ class TestEc2(test_helpers.HttprettyTestCase):
register_mock_metaserver(
'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA)
mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA
- get_interface_mac_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interface_mac')
+ get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac'
ds.fallback_nic = 'eth9'
- with mock.patch(get_interface_mac_path) as m_get_interface_mac:
- m_get_interface_mac.return_value = mac1
+ with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac:
+ m_get_interfaces_by_mac.return_value = {mac1: 'eth9'}
nc = ds.network_config # Will re-crawl network metadata
self.assertIsNotNone(nc)
self.assertIn(
'Refreshing stale metadata from prior to upgrade',
self.logs.getvalue())
- expected = {'version': 1, 'config': [
- {'mac_address': '06:17:04:d7:26:09',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}],
- 'type': 'physical'}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
self.assertEqual(expected, ds.network_config)
def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self):
@@ -429,6 +568,69 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertTrue(ds.get_data())
self.assertFalse(ds.is_classic_instance())
+ def test_aws_inaccessible_imds_service_fails_with_retries(self):
+ """Inaccessibility of http://169.254.169.254 are retried."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md=None)
+
+ conn_error = requests.exceptions.ConnectionError(
+ '[Errno 113] no route to host')
+
+ mock_success = mock.MagicMock(contents=b'fakesuccess')
+ mock_success.ok.return_value = True
+
+ with mock.patch('cloudinit.url_helper.readurl') as m_readurl:
+ m_readurl.side_effect = (conn_error, conn_error, mock_success)
+ with mock.patch('cloudinit.url_helper.time.sleep'):
+ self.assertTrue(ds.wait_for_metadata_service())
+
+ # Just one /latest/api/token request
+ self.assertEqual(3, len(m_readurl.call_args_list))
+ for readurl_call in m_readurl.call_args_list:
+ self.assertIn('latest/api/token', readurl_call[0][0])
+
+ def test_aws_token_403_fails_without_retries(self):
+ """Verify that 403s fetching AWS tokens are not retried."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md=None)
+ token_url = self.data_url('latest', data_item='api/token')
+ httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403)
+ self.assertFalse(ds.get_data())
+ # Just one /latest/api/token request
+ logs = self.logs.getvalue()
+ failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0'
+ expected_logs = [
+ 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is'
+ ' disabled. Aborting.',
+ "WARNING: IMDS's HTTP endpoint is probably disabled",
+ failed_put_log
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+ self.assertEqual(
+ 1, len([l for l in logs.splitlines() if failed_put_log in l]))
+
+ def test_aws_token_redacted(self):
+ """Verify that aws tokens are redacted when logged."""
+ ds = self._setup_ds(
+ platform_data=self.valid_platform_data,
+ sys_cfg={'datasource': {'Ec2': {'strict_id': False}}},
+ md={'md': DEFAULT_METADATA})
+ self.assertTrue(ds.get_data())
+ all_logs = self.logs.getvalue().splitlines()
+ REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'"
+ REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'"
+ logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log]
+ logs_with_redacted = [log for log in all_logs if REDACT_TOK in log]
+ logs_with_token = [log for log in all_logs if 'API-TOKEN' in log]
+ self.assertEqual(1, len(logs_with_redacted_ttl))
+ self.assertEqual(81, len(logs_with_redacted))
+ self.assertEqual(0, len(logs_with_token))
+
@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
def test_valid_platform_with_strict_true(self, m_dhcp):
"""Valid platform data should return true with strict_id true."""
@@ -547,23 +749,64 @@ class TestEc2(test_helpers.HttprettyTestCase):
self.assertIn('Crawl of metadata service took', self.logs.getvalue())
+class TestGetSecondaryAddresses(test_helpers.CiTestCase):
+
+ mac = '06:17:04:d7:26:ff'
+ with_logs = True
+
+ def test_md_with_no_secondary_addresses(self):
+ """Empty list is returned when nic metadata contains no secondary ip"""
+ self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac))
+
+ def test_md_with_secondary_v4_and_v6_addresses(self):
+ """All secondary addresses are returned from nic metadata"""
+ self.assertEqual(
+ ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128',
+ '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
+ ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac))
+
+ def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self):
+ """Any invalid subnet-ipv(4|6)-cidr-block values use defaults"""
+ invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP)
+ invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected"
+ invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is"
+ self.assertEqual(
+ ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128',
+ '2600:1f16:292:100:f153:12a3:c37c:11f9/128'],
+ ec2.get_secondary_addresses(invalid_cidr_md, self.mac))
+ expected_logs = [
+ "WARNING: Could not parse subnet-ipv4-cidr-block"
+ " something-unexpected for mac 06:17:04:d7:26:ff."
+ " ipv4 network config prefix defaults to /24",
+ "WARNING: Could not parse subnet-ipv6-cidr-block"
+ " not/sure/what/this/is for mac 06:17:04:d7:26:ff."
+ " ipv6 network config prefix defaults to /128"
+ ]
+ logs = self.logs.getvalue()
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+
class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
def setUp(self):
super(TestConvertEc2MetadataNetworkConfig, self).setUp()
self.mac1 = '06:17:04:d7:26:09'
+ interface_dict = copy.deepcopy(
+ DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1])
+ # These tests are written assuming the base interface doesn't have IPv6
+ interface_dict.pop('ipv6s')
self.network_metadata = {
- 'interfaces': {'macs': {
- self.mac1: {'public-ipv4s': '172.31.2.16'}}}}
+ 'interfaces': {'macs': {self.mac1: interface_dict}}}
def test_convert_ec2_metadata_network_config_skips_absent_macs(self):
"""Any mac absent from metadata is skipped by network config."""
macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'}
# DE:AD:BE:EF:FF:FF represented by OS but not in metadata
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -577,15 +820,15 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
network_metadata_ipv6['interfaces']['macs'][self.mac1])
nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
nic1_metadata.pop('public-ipv4s')
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp6'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
network_metadata_ipv6, macs_to_nics))
- def test_convert_ec2_metadata_network_config_handles_local_dhcp4(self):
+ def test_convert_ec2_metadata_network_config_local_only_dhcp4(self):
"""Config dhcp4 when there are no public addresses in public-ipv4s."""
macs_to_nics = {self.mac1: 'eth9'}
network_metadata_ipv6 = copy.deepcopy(self.network_metadata)
@@ -593,9 +836,9 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
network_metadata_ipv6['interfaces']['macs'][self.mac1])
nic1_metadata['local-ipv4s'] = '172.3.3.15'
nic1_metadata.pop('public-ipv4s')
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -610,16 +853,16 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
nic1_metadata['public-ipv4s'] = ''
# When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config.
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9', 'subnets': [{'type': 'dhcp4'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
network_metadata_ipv6, macs_to_nics, fallback_nic='eth9'))
def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self):
- """When dhcp6 is public and dhcp4 is set to local enable both."""
+ """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4."""
macs_to_nics = {self.mac1: 'eth9'}
network_metadata_both = copy.deepcopy(self.network_metadata)
nic1_metadata = (
@@ -627,10 +870,35 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
nic1_metadata.pop('public-ipv4s')
nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
+ self.assertEqual(
+ expected,
+ ec2.convert_ec2_metadata_network_config(
+ network_metadata_both, macs_to_nics))
+
+ def test_convert_ec2_metadata_network_config_handles_multiple_nics(self):
+ """DHCP route-metric increases on secondary NICs for IPv4 and IPv6."""
+ mac2 = '06:17:04:d7:26:08'
+ macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'}
+ network_metadata_both = copy.deepcopy(self.network_metadata)
+ # Add 2nd nic info
+ network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD
+ nic1_metadata = (
+ network_metadata_both['interfaces']['macs'][self.mac1])
+ nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
+ nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg
+ nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc
+ expected = {'version': 2, 'ethernets': {
+ 'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100},
+ 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}},
+ 'eth10': {
+ 'match': {'macaddress': mac2}, 'set-name': 'eth10',
+ 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200},
+ 'dhcp6': False}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -643,10 +911,9 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
nic1_metadata = (
network_metadata_both['interfaces']['macs'][self.mac1])
nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64'
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}]}]}
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1}, 'set-name': 'eth9',
+ 'dhcp4': True, 'dhcp6': True}}}
self.assertEqual(
expected,
ec2.convert_ec2_metadata_network_config(
@@ -654,12 +921,10 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase):
def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self):
"""Convert Ec2 Metadata calls get_interfaces_by_mac by default."""
- expected = {'version': 1, 'config': [
- {'mac_address': self.mac1, 'type': 'physical',
- 'name': 'eth9',
- 'subnets': [{'type': 'dhcp4'}]}]}
- patch_path = (
- 'cloudinit.sources.DataSourceEc2.net.get_interfaces_by_mac')
+ expected = {'version': 2, 'ethernets': {'eth9': {
+ 'match': {'macaddress': self.mac1},
+ 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}}
+ patch_path = M_PATH_NET + 'get_interfaces_by_mac'
with mock.patch(patch_path) as m_get_interfaces_by_mac:
m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'}
self.assertEqual(
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 67744d32..4afbccff 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -7,11 +7,11 @@
import datetime
import httpretty
import json
-import mock
import re
+from unittest import mock
+from urllib.parse import urlparse
from base64 import b64encode, b64decode
-from six.moves.urllib_parse import urlparse
from cloudinit import distros
from cloudinit import helpers
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index c84d067e..41b6c27b 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -1,11 +1,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
from copy import copy
-import mock
import os
import shutil
import tempfile
import yaml
+from unittest import mock
from cloudinit.sources import DataSourceMAAS
from cloudinit import url_helper
@@ -158,7 +158,6 @@ class TestMAASDataSource(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper")
class TestGetOauthHelper(CiTestCase):
- with_logs = True
base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY',
'token_key': 'FAKE_TOKEN_KEY',
'token_secret': 'FAKE_TOKEN_SECRET',
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 18bea0b9..2e6b53ff 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -288,8 +288,23 @@ class TestNoCloudDataSource(CiTestCase):
self.mocks.enter_context(
mock.patch.object(util, 'is_FreeBSD', return_value=True))
+ def _mfind_devs_with_freebsd(
+ criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ if not criteria:
+ return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
+ if criteria.startswith("LABEL="):
+ return ["/dev/msdosfs/foo", "/dev/iso9660/foo"]
+ elif criteria == "TYPE=vfat":
+ return ["/dev/msdosfs/foo"]
+ elif criteria == "TYPE=iso9660":
+ return ["/dev/iso9660/foo"]
+ return []
+
self.mocks.enter_context(
- mock.patch.object(os.path, 'exists', return_value=True))
+ mock.patch.object(
+ util, 'find_devs_with_freebsd',
+ side_effect=_mfind_devs_with_freebsd))
dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths)
ret = dsrc._get_devices('foo')
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index bb399f6d..de896a9e 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -355,6 +355,7 @@ class TestOpenNebulaDataSource(CiTestCase):
util.find_devs_with = orig_find_devs_with
+@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={}))
class TestOpenNebulaNetwork(unittest.TestCase):
system_nics = ('eth0', 'ens3')
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index a731f1ed..b534457c 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -8,12 +8,11 @@ import copy
import httpretty as hp
import json
import re
+from io import StringIO
+from urllib.parse import urlparse
from cloudinit.tests import helpers as test_helpers
-from six.moves.urllib.parse import urlparse
-from six import StringIO, text_type
-
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET
@@ -511,6 +510,24 @@ class TestDetectOpenStack(test_helpers.CiTestCase):
'Expected detect_openstack == True on OpenTelekomCloud')
@test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
+ def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi,
+ m_is_x86):
+ """Return True on OpenStack reporting SAP CCloud VM asset-tag."""
+ m_is_x86.return_value = True
+
+ def fake_dmi_read(dmi_key):
+ if dmi_key == 'system-product-name':
+ return 'VMware Virtual Platform' # SAP CCloud uses VMware
+ if dmi_key == 'chassis-asset-tag':
+ return 'SAP CCloud VM'
+ assert False, 'Unexpected dmi read of %s' % dmi_key
+
+ m_dmi.side_effect = fake_dmi_read
+ self.assertTrue(
+ ds.detect_openstack(),
+ 'Expected detect_openstack == True on SAP CCloud VM')
+
+ @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data')
def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi,
m_is_x86):
"""Return True on OpenStack reporting Oracle cloud asset-tag."""
@@ -569,8 +586,7 @@ class TestMetadataReader(test_helpers.HttprettyTestCase):
'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
def register(self, path, body=None, status=200):
- content = (body if not isinstance(body, text_type)
- else body.encode('utf-8'))
+ content = body if not isinstance(body, str) else body.encode('utf-8')
hp.register_uri(
hp.GET, self.burl + "openstack" + path, status=status,
body=content)
diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py
index aabf1f18..553af62e 100644
--- a/tests/unittests/test_datasource/test_rbx.py
+++ b/tests/unittests/test_datasource/test_rbx.py
@@ -4,6 +4,7 @@ from cloudinit import helpers
from cloudinit import distros
from cloudinit.sources import DataSourceRbxCloud as ds
from cloudinit.tests.helpers import mock, CiTestCase, populate_dir
+from cloudinit import util
DS_PATH = "cloudinit.sources.DataSourceRbxCloud"
@@ -199,6 +200,35 @@ class TestRbxDataSource(CiTestCase):
m_subp.call_args_list
)
+ @mock.patch(
+ DS_PATH + '.util.subp',
+ side_effect=util.ProcessExecutionError()
+ )
+ def test_continue_on_arping_error(self, m_subp):
+ """Continue when command error"""
+ items = [
+ {
+ 'destination': '172.17.0.2',
+ 'source': '172.16.6.104'
+ },
+ {
+ 'destination': '172.17.0.2',
+ 'source': '172.16.6.104',
+ },
+ ]
+ ds.gratuitous_arp(items, self._fetch_distro('ubuntu'))
+ self.assertEqual([
+ mock.call([
+ 'arping', '-c', '2', '-S',
+ '172.16.6.104', '172.17.0.2'
+ ]),
+ mock.call([
+ 'arping', '-c', '2', '-S',
+ '172.16.6.104', '172.17.0.2'
+ ])
+ ], m_subp.call_args_list
+ )
+
def populate_cloud_metadata(path, data):
populate_dir(path, {'cloud.json': json.dumps(data)})
diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py
index f96bf0a2..1b4dd0ad 100644
--- a/tests/unittests/test_datasource/test_scaleway.py
+++ b/tests/unittests/test_datasource/test_scaleway.py
@@ -7,6 +7,7 @@ import requests
from cloudinit import helpers
from cloudinit import settings
+from cloudinit import sources
from cloudinit.sources import DataSourceScaleway
from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase
@@ -403,3 +404,51 @@ class TestDataSourceScaleway(HttprettyTestCase):
netcfg = self.datasource.network_config
self.assertEqual(netcfg, '0xdeadbeef')
+
+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
+ @mock.patch('cloudinit.util.get_cmdline')
+ def test_network_config_unset(self, m_get_cmdline, fallback_nic):
+ """
+ _network_config will be set to sources.UNSET after the first boot.
+ Make sure it behave correctly.
+ """
+ m_get_cmdline.return_value = 'scaleway'
+ fallback_nic.return_value = 'ens2'
+ self.datasource.metadata['ipv6'] = None
+ self.datasource._network_config = sources.UNSET
+
+ resp = {'version': 1,
+ 'config': [{
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]}]
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+
+ @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning')
+ @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic')
+ @mock.patch('cloudinit.util.get_cmdline')
+ def test_network_config_cached_none(self, m_get_cmdline, fallback_nic,
+ logwarning):
+ """
+ network_config() should return config data if cached data is None
+ rather than sources.UNSET
+ """
+ m_get_cmdline.return_value = 'scaleway'
+ fallback_nic.return_value = 'ens2'
+ self.datasource.metadata['ipv6'] = None
+ self.datasource._network_config = None
+
+ resp = {'version': 1,
+ 'config': [{
+ 'type': 'physical',
+ 'name': 'ens2',
+ 'subnets': [{'type': 'dhcp4'}]}]
+ }
+
+ netcfg = self.datasource.network_config
+ self.assertEqual(netcfg, resp)
+ logwarning.assert_called_with('Found None as cached _network_config. '
+ 'Resetting to %s', sources.UNSET)
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index d5b1c29c..f1ab1d7a 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -12,8 +12,6 @@ order to validate return responses.
'''
-from __future__ import print_function
-
from binascii import crc32
import json
import multiprocessing
@@ -22,7 +20,7 @@ import os.path
import re
import signal
import stat
-import unittest2
+import unittest
import uuid
from cloudinit import serial
@@ -33,8 +31,6 @@ from cloudinit.sources.DataSourceSmartOS import (
identify_file)
from cloudinit.event import EventType
-import six
-
from cloudinit import helpers as c_helpers
from cloudinit.util import (
b64e, subp, ProcessExecutionError, which, write_file)
@@ -798,7 +794,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase):
return self.serial.write.call_args[0][0]
def test_get_metadata_writes_bytes(self):
- self.assertIsInstance(self._get_written_line(), six.binary_type)
+ self.assertIsInstance(self._get_written_line(), bytes)
def test_get_metadata_line_starts_with_v2(self):
foo = self._get_written_line()
@@ -1097,11 +1093,11 @@ class TestNetworkConversion(CiTestCase):
self.assertEqual(expected, found)
-@unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
- "Only supported on KVM and bhyve guests under SmartOS")
-@unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
- "Requires write access to " + SERIAL_DEVICE)
-@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
+@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
+ "Only supported on KVM and bhyve guests under SmartOS")
+@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
+ "Requires write access to " + SERIAL_DEVICE)
+@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
This class tests locking on an actual serial port, and as such can only
diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py
new file mode 100644
index 00000000..b38e4af5
--- /dev/null
+++ b/tests/unittests/test_distros/test_bsd_utils.py
@@ -0,0 +1,66 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.bsd_utils as bsd_utils
+
+from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock)
+
+RC_FILE = """
+if something; then
+ do something here
+fi
+hostname={hostname}
+"""
+
+
+class TestBsdUtils(CiTestCase):
+
+ def setUp(self):
+ super().setUp()
+ patches = ExitStack()
+ self.addCleanup(patches.close)
+
+ self.load_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, 'load_file'))
+
+ self.write_file = patches.enter_context(
+ mock.patch.object(bsd_utils.util, 'write_file'))
+
+ def test_get_rc_config_value(self):
+ self.load_file.return_value = 'hostname=foo\n'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+ self.load_file.assert_called_with('/etc/rc.conf')
+
+ self.load_file.return_value = 'hostname=foo'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+
+ self.load_file.return_value = 'hostname="foo"'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+
+ self.load_file.return_value = "hostname='foo'"
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo')
+
+ self.load_file.return_value = 'hostname=\'foo"'
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"")
+
+ self.load_file.return_value = ''
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None)
+
+ self.load_file.return_value = RC_FILE.format(hostname='foo')
+ self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo")
+
+ def test_set_rc_config_value_unchanged(self):
+ # bsd_utils.set_rc_config_value('hostname', 'foo')
+ # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
+
+ self.load_file.return_value = RC_FILE.format(hostname='foo')
+ self.write_file.assert_not_called()
+
+ def test_set_rc_config_value(self):
+ bsd_utils.set_rc_config_value('hostname', 'foo')
+ self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n')
+
+ self.load_file.return_value = RC_FILE.format(hostname='foo')
+ bsd_utils.set_rc_config_value('hostname', 'bar')
+ self.write_file.assert_called_with(
+ '/etc/rc.conf',
+ RC_FILE.format(hostname='bar'))
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 7e0da4f2..02b334e3 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -8,11 +8,7 @@ from cloudinit.tests import helpers
import os
import shutil
import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
+from unittest import mock
unknown_arch_info = {
'arches': ['default'],
diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/test_distros/test_netbsd.py
new file mode 100644
index 00000000..11a68d2a
--- /dev/null
+++ b/tests/unittests/test_distros/test_netbsd.py
@@ -0,0 +1,17 @@
+import cloudinit.distros.netbsd
+
+import pytest
+import unittest.mock as mock
+
+
+@pytest.mark.parametrize('with_pkgin', (True, False))
+@mock.patch("cloudinit.distros.netbsd.os")
+def test_init(m_os, with_pkgin):
+ print(with_pkgin)
+ m_os.path.exists.return_value = with_pkgin
+ cfg = {}
+
+ distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None)
+ expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None
+ assert distro.pkg_cmd_upgrade_prefix == expectation
+ assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index aeaadaa0..ccf66161 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -2,13 +2,9 @@
import copy
import os
-from six import StringIO
+from io import StringIO
from textwrap import dedent
-
-try:
- from unittest import mock
-except ImportError:
- import mock
+from unittest import mock
from cloudinit import distros
from cloudinit.distros.parsers.sys_conf import SysConf
@@ -489,7 +485,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -498,7 +493,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
DEVICE=eth1
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -517,13 +511,11 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
BOOTPROTO=none
DEFROUTE=yes
DEVICE=eth0
- IPADDR6=2607:f0d0:1002:0011::2/64
IPV6ADDR=2607:f0d0:1002:0011::2/64
IPV6INIT=yes
IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -532,7 +524,6 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase):
DEVICE=eth1
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -577,26 +568,14 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
"""Opensuse uses apply_network_config and renders sysconfig"""
expected_cfgs = {
self.ifcfg_path('eth0'): dedent("""\
- BOOTPROTO=none
- DEFROUTE=yes
- DEVICE=eth0
- GATEWAY=192.168.1.254
+ BOOTPROTO=static
IPADDR=192.168.1.5
NETMASK=255.255.255.0
- NM_CONTROLLED=no
- ONBOOT=yes
STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
"""),
self.ifcfg_path('eth1'): dedent("""\
- BOOTPROTO=dhcp
- DEVICE=eth1
- NM_CONTROLLED=no
- ONBOOT=yes
+ BOOTPROTO=dhcp4
STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
"""),
}
self._apply_and_verify(self.distro.apply_network_config,
@@ -607,27 +586,13 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase):
"""Opensuse uses apply_network_config and renders sysconfig w/ipv6"""
expected_cfgs = {
self.ifcfg_path('eth0'): dedent("""\
- BOOTPROTO=none
- DEFROUTE=yes
- DEVICE=eth0
+ BOOTPROTO=static
IPADDR6=2607:f0d0:1002:0011::2/64
- IPV6ADDR=2607:f0d0:1002:0011::2/64
- IPV6INIT=yes
- IPV6_DEFAULTGW=2607:f0d0:1002:0011::1
- NM_CONTROLLED=no
- ONBOOT=yes
STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
"""),
self.ifcfg_path('eth1'): dedent("""\
- BOOTPROTO=dhcp
- DEVICE=eth1
- NM_CONTROLLED=no
- ONBOOT=yes
+ BOOTPROTO=dhcp4
STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
"""),
}
self._apply_and_verify(self.distro.apply_network_config,
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index fa4b6cfe..a6faf0ef 100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -1,12 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
from cloudinit import distros
from cloudinit.distros import ug_util
from cloudinit import helpers
from cloudinit import settings
from cloudinit.tests.helpers import TestCase
-import mock
bcfg = {
diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py
index 36d7fbbf..f0e96b44 100644
--- a/tests/unittests/test_ds_identify.py
+++ b/tests/unittests/test_ds_identify.py
@@ -447,6 +447,10 @@ class TestDsIdentify(DsIdentifyBase):
"""Open Telecom identification."""
self._test_ds_found('OpenStack-OpenTelekom')
+ def test_openstack_sap_ccloud(self):
+ """SAP Converged Cloud identification"""
+ self._test_ds_found('OpenStack-SAPCCloud')
+
def test_openstack_asset_tag_nova(self):
"""OpenStack identification via asset tag OpenStack Nova."""
self._test_ds_found('OpenStack-AssetTag-Nova')
@@ -834,6 +838,12 @@ VALID_CFG = {
'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'},
'mocks': [MOCK_VIRT_IS_XEN],
},
+ 'OpenStack-SAPCCloud': {
+ # SAP CCloud hosts use OpenStack on VMware
+ 'ds': 'OpenStack',
+ 'files': {P_CHASSIS_ASSET_TAG: 'SAP CCloud VM\n'},
+ 'mocks': [MOCK_VIRT_IS_VMWARE],
+ },
'OpenStack-AssetTag-Nova': {
# VMware vSphere can't modify product-name, LP: #1669875
'ds': 'OpenStack',
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py
index e1a5d2c8..1492361e 100644
--- a/tests/unittests/test_filters/test_launch_index.py
+++ b/tests/unittests/test_filters/test_launch_index.py
@@ -1,11 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
+from itertools import filterfalse
from cloudinit.tests import helpers
-from six.moves import filterfalse
-
from cloudinit.filters import launch_index
from cloudinit import user_data as ud
from cloudinit import util
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
index 23bd6e10..69009a44 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py
@@ -7,11 +7,7 @@ import logging
import os
import shutil
import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
+from unittest import mock
from cloudinit import cloud
from cloudinit import distros
@@ -78,7 +74,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
get_rel = rpatcher.start()
get_rel.return_value = {'codename': "fakerelease"}
self.addCleanup(rpatcher.stop)
- apatcher = mock.patch("cloudinit.util.get_architecture")
+ apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
get_arch.return_value = 'amd64'
self.addCleanup(apatcher.stop)
diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
index f7608c28..0aa3d51a 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py
@@ -7,12 +7,8 @@ import logging
import os
import shutil
import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-from mock import call
+from unittest import mock
+from unittest.mock import call
from cloudinit import cloud
from cloudinit import distros
@@ -106,7 +102,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
get_rel = rpatcher.start()
get_rel.return_value = {'codename': "fakerel"}
self.addCleanup(rpatcher.stop)
- apatcher = mock.patch("cloudinit.util.get_architecture")
+ apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
get_arch.return_value = 'amd64'
self.addCleanup(apatcher.stop)
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py
index a3132fbd..866752ef 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py
@@ -9,12 +9,8 @@ import os
import re
import shutil
import tempfile
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-from mock import call
+from unittest import mock
+from unittest.mock import call
from cloudinit.config import cc_apt_configure
from cloudinit import gpg
@@ -77,7 +73,7 @@ class TestAptSourceConfig(TestCase):
get_rel = rpatcher.start()
get_rel.return_value = {'codename': self.release}
self.addCleanup(rpatcher.stop)
- apatcher = mock.patch("cloudinit.util.get_architecture")
+ apatcher = mock.patch("cloudinit.util.get_dpkg_architecture")
get_arch = apatcher.start()
get_arch.return_value = 'amd64'
self.addCleanup(apatcher.stop)
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index 2f21b6dc..4762dbef 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -11,13 +11,8 @@ import shutil
import socket
import tempfile
-from unittest import TestCase
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-from mock import call
+from unittest import TestCase, mock
+from unittest.mock import call
from cloudinit import cloud
from cloudinit import distros
@@ -453,14 +448,14 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertFalse(os.path.isfile(self.aptlistfile2))
self.assertFalse(os.path.isfile(self.aptlistfile3))
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture")
- def test_apt_v3_list_rename(self, m_get_architecture):
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_list_rename(self, m_get_dpkg_architecture):
"""test_apt_v3_list_rename - Test find mirror and apt list renaming"""
pre = "/var/lib/apt/lists"
# filenames are archive dependent
arch = 's390x'
- m_get_architecture.return_value = arch
+ m_get_dpkg_architecture.return_value = arch
component = "ubuntu-ports"
archive = "ports.ubuntu.com"
@@ -487,16 +482,17 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
with mock.patch.object(os, 'rename') as mockren:
with mock.patch.object(glob, 'glob',
return_value=[fromfn]):
- cc_apt_configure.rename_apt_lists(mirrors, TARGET)
+ cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch)
mockren.assert_any_call(fromfn, tofn)
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture")
- def test_apt_v3_list_rename_non_slash(self, m_get_architecture):
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture):
target = os.path.join(self.tmp, "rename_non_slash")
apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS)
- m_get_architecture.return_value = 'amd64'
+ arch = 'amd64'
+ m_get_dpkg_architecture.return_value = arch
mirror_path = "some/random/path/"
primary = "http://test.ubuntu.com/" + mirror_path
@@ -532,7 +528,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
fpath = os.path.join(apt_lists_d, opre + suff)
util.write_file(fpath, content=fpath)
- cc_apt_configure.rename_apt_lists(mirrors, target)
+ cc_apt_configure.rename_apt_lists(mirrors, target, arch)
found = sorted(os.listdir(apt_lists_d))
self.assertEqual(expected, found)
@@ -625,10 +621,12 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
self.assertEqual(mirrors['SECURITY'],
smir)
- @mock.patch("cloudinit.config.cc_apt_configure.util.get_architecture")
- def test_apt_v3_get_def_mir_non_intel_no_arch(self, m_get_architecture):
+ @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture")
+ def test_apt_v3_get_def_mir_non_intel_no_arch(
+ self, m_get_dpkg_architecture
+ ):
arch = 'ppc64el'
- m_get_architecture.return_value = arch
+ m_get_dpkg_architecture.return_value = arch
expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports',
'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'}
self.assertEqual(expected, cc_apt_configure.get_default_mirrors())
@@ -672,7 +670,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"security": [{'arches': ["default"],
"search": ["sfailme", smir]}]}
- with mock.patch.object(cc_apt_configure, 'search_for_mirror',
+ with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
side_effect=[pmir, smir]) as mocksearch:
mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None,
'amd64')
@@ -711,7 +709,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
mockgm.assert_has_calls(calls)
# should not be called, since primary is specified
- with mock.patch.object(cc_apt_configure,
+ with mock.patch.object(cc_apt_configure.util,
'search_for_mirror') as mockse:
mirrors = cc_apt_configure.find_apt_mirror_info(cfg, None, arch)
mockse.assert_not_called()
@@ -976,7 +974,7 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
mocksdns.assert_has_calls(calls)
# first return is for the non-dns call before
- with mock.patch.object(cc_apt_configure, 'search_for_mirror',
+ with mock.patch.object(cc_apt_configure.util, 'search_for_mirror',
side_effect=[None, pmir, None, smir]) as mockse:
mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch)
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 06e14db0..286ef771 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -11,15 +11,8 @@ import logging
import shutil
import tempfile
import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
+from contextlib import ExitStack
+from unittest import mock
class TestNoConfig(unittest.TestCase):
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
index f4311268..513c18b5 100644
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ b/tests/unittests/test_handler/test_handler_chef.py
@@ -4,7 +4,6 @@ import httpretty
import json
import logging
import os
-import six
from cloudinit import cloud
from cloudinit.config import cc_chef
@@ -66,18 +65,18 @@ class TestInstallChefOmnibus(HttprettyTestCase):
cc_chef.install_chef_from_omnibus()
expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES,
'url': cc_chef.OMNIBUS_URL}
- self.assertItemsEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1])
cc_chef.install_chef_from_omnibus(retries=10)
expected_kwargs = {'retries': 10,
'url': cc_chef.OMNIBUS_URL}
- self.assertItemsEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
+ self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1])
expected_subp_kwargs = {
'args': ['-v', '2.0'],
'basename': 'chef-omnibus-install',
'blob': m_rdurl.return_value.contents,
'capture': False
}
- self.assertItemsEqual(
+ self.assertCountEqual(
expected_subp_kwargs,
m_subp_blob.call_args_list[0][1])
@@ -98,7 +97,7 @@ class TestInstallChefOmnibus(HttprettyTestCase):
'blob': response,
'capture': False
}
- self.assertItemsEqual(expected_kwargs, called_kwargs)
+ self.assertCountEqual(expected_kwargs, called_kwargs)
class TestChef(FilesystemMockingTestCase):
@@ -178,7 +177,7 @@ class TestChef(FilesystemMockingTestCase):
continue
# the value from the cfg overrides that in the default
val = cfg['chef'].get(k, v)
- if isinstance(val, six.string_types):
+ if isinstance(val, str):
self.assertIn(val, c)
c = util.load_file(cc_chef.CHEF_FB_PATH)
self.assertEqual({}, json.loads(c))
diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py
index 5afcacaf..0e51f17a 100644
--- a/tests/unittests/test_handler/test_handler_disk_setup.py
+++ b/tests/unittests/test_handler/test_handler_disk_setup.py
@@ -222,4 +222,22 @@ class TestMkfsCommandHandling(CiTestCase):
'-L', 'without_cmd', '-F', 'are', 'added'],
shell=False)
+ @mock.patch('cloudinit.config.cc_disk_setup.util.which')
+ def test_mkswap(self, m_which, subp, *args):
+ """mkfs observes extra_opts and overwrite settings when cmd is not
+ present."""
+ m_which.side_effect = iter([None, '/sbin/mkswap'])
+ cc_disk_setup.mkfs({
+ 'filesystem': 'swap',
+ 'device': '/dev/xdb1',
+ 'label': 'swap',
+ 'overwrite': True,
+ })
+
+ self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')],
+ m_which.call_args_list)
+ subp.assert_called_once_with(
+ ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False)
+
+#
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py
index d854afcb..e3778b11 100644
--- a/tests/unittests/test_handler/test_handler_etc_hosts.py
+++ b/tests/unittests/test_handler/test_handler_etc_hosts.py
@@ -44,8 +44,8 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.0.1\tcloud-init.test.us\tcloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+ if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
if '192.168.1.1\tblah.blah.us\tblah' not in contents:
self.assertIsNone('Default etc/hosts content modified')
@@ -64,7 +64,7 @@ class TestHostsFile(t_help.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_update_etc_hosts.handle('test', cfg, cc, LOG, [])
contents = util.load_file('%s/etc/hosts' % self.tmp)
- if '127.0.0.1 cloud-init.test.us cloud-init' not in contents:
- self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
+ if '127.0.1.1 cloud-init.test.us cloud-init' not in contents:
+ self.assertIsNone('No entry for 127.0.1.1 in etc/hosts')
if '::1 cloud-init.test.us cloud-init' not in contents:
self.assertIsNone('No entry for 127.0.0.1 in etc/hosts')
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index 1f39ebe7..501bcca5 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -11,15 +11,8 @@ import logging
import os
import re
import unittest
-
-try:
- from unittest import mock
-except ImportError:
- import mock
-try:
- from contextlib import ExitStack
-except ImportError:
- from contextlib2 import ExitStack
+from contextlib import ExitStack
+from unittest import mock
# growpart:
# mode: auto # off, on, auto, 'growpart'
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
index e29a06f9..407aa6c4 100644
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -17,21 +17,18 @@ from cloudinit.tests import helpers as t_help
from configobj import ConfigObj
-from six import BytesIO
-
import logging
-import mock
import os
import shutil
import tempfile
+from io import BytesIO
+from unittest import mock
LOG = logging.getLogger(__name__)
class TestLocale(t_help.FilesystemMockingTestCase):
- with_logs = True
-
def setUp(self):
super(TestLocale, self).setUp()
self.new_root = tempfile.mkdtemp()
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index b63db616..40b521e5 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -5,10 +5,7 @@ from cloudinit.sources import DataSourceNoCloud
from cloudinit import (distros, helpers, cloud)
from cloudinit.tests import helpers as t_help
-try:
- from unittest import mock
-except ImportError:
- import mock
+from unittest import mock
class TestLxd(t_help.CiTestCase):
diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py
index 7eec7352..c013a538 100644
--- a/tests/unittests/test_handler/test_handler_mcollective.py
+++ b/tests/unittests/test_handler/test_handler_mcollective.py
@@ -10,8 +10,8 @@ import configobj
import logging
import os
import shutil
-from six import BytesIO
import tempfile
+from io import BytesIO
LOG = logging.getLogger(__name__)
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
index 0fb160be..35e72bd1 100644
--- a/tests/unittests/test_handler/test_handler_mounts.py
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -1,16 +1,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os.path
+from unittest import mock
from cloudinit.config import cc_mounts
from cloudinit.tests import helpers as test_helpers
-try:
- from unittest import mock
-except ImportError:
- import mock
-
class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
@@ -131,6 +127,12 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
cc_mounts.sanitize_devname(
'ephemeral0.1', lambda x: disk_path, mock.Mock()))
+ def test_network_device_returns_network_device(self):
+ disk_path = 'netdevice:/path'
+ self.assertEqual(
+ disk_path,
+ cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
+
class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
@@ -181,6 +183,18 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase):
return dev
+ def test_swap_integrity(self):
+ '''Ensure that the swap file is correctly created and can
+ swapon successfully. Fixing the corner case of:
+ kernel: swapon: swapfile has holes'''
+
+ fstab = '/swap.img swap swap defaults 0 0\n'
+
+ with open(cc_mounts.FSTAB_PATH, 'w') as fd:
+ fd.write(fstab)
+ cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']}
+ cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, [])
+
def test_fstab_no_swap_device(self):
'''Ensure that cloud-init adds a discovered swap partition
to /etc/fstab.'''
diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py
index 1494177d..04aa7d03 100644
--- a/tests/unittests/test_handler/test_handler_puppet.py
+++ b/tests/unittests/test_handler/test_handler_puppet.py
@@ -16,8 +16,6 @@ LOG = logging.getLogger(__name__)
@mock.patch('cloudinit.config.cc_puppet.os')
class TestAutostartPuppet(CiTestCase):
- with_logs = True
-
def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_util):
"""Update /etc/default/puppet to autostart if it exists."""
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index f60dedc2..abecc53b 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -12,8 +12,7 @@ from cloudinit.config import cc_seed_random
import gzip
import tempfile
-
-from six import BytesIO
+from io import BytesIO
from cloudinit import cloud
from cloudinit import distros
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index d09ec23a..58abf51a 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -13,8 +13,8 @@ from configobj import ConfigObj
import logging
import os
import shutil
-from six import BytesIO
import tempfile
+from io import BytesIO
LOG = logging.getLogger(__name__)
diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py
index ddbf4a79..410e6f77 100644
--- a/tests/unittests/test_handler/test_handler_spacewalk.py
+++ b/tests/unittests/test_handler/test_handler_spacewalk.py
@@ -6,11 +6,7 @@ from cloudinit import util
from cloudinit.tests import helpers
import logging
-
-try:
- from unittest import mock
-except ImportError:
- import mock
+from unittest import mock
LOG = logging.getLogger(__name__)
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py
index 27eedded..50c45363 100644
--- a/tests/unittests/test_handler/test_handler_timezone.py
+++ b/tests/unittests/test_handler/test_handler_timezone.py
@@ -18,8 +18,8 @@ from cloudinit.tests import helpers as t_help
from configobj import ConfigObj
import logging
import shutil
-from six import BytesIO
import tempfile
+from io import BytesIO
LOG = logging.getLogger(__name__)
diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py
index bc8756ca..727681d3 100644
--- a/tests/unittests/test_handler/test_handler_write_files.py
+++ b/tests/unittests/test_handler/test_handler_write_files.py
@@ -1,17 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config.cc_write_files import write_files, decode_perms
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.tests.helpers import CiTestCase, FilesystemMockingTestCase
-
import base64
+import copy
import gzip
+import io
import shutil
-import six
import tempfile
+from cloudinit.config.cc_write_files import (
+ handle, decode_perms, write_files)
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.tests.helpers import (
+ CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema)
+
LOG = logging.getLogger(__name__)
YAML_TEXT = """
@@ -37,13 +40,90 @@ YAML_CONTENT_EXPECTED = {
'/tmp/message': "hi mom line 1\nhi mom line 2\n",
}
+VALID_SCHEMA = {
+ 'write_files': [
+ {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
+ 'path': '/some', 'permissions': '0777'}
+ ]
+}
+
+INVALID_SCHEMA = { # Dropped required path key
+ 'write_files': [
+ {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff',
+ 'permissions': '0777'}
+ ]
+}
+
+
+@skipUnlessJsonSchema()
+@mock.patch('cloudinit.config.cc_write_files.write_files')
+class TestWriteFilesSchema(CiTestCase):
+
+ with_logs = True
+
+ def test_schema_validation_warns_missing_path(self, m_write_files):
+ """The only required file item property is 'path'."""
+ cc = self.tmp_cloud('ubuntu')
+ valid_config = {'write_files': [{'path': '/some/path'}]}
+ handle('cc_write_file', valid_config, cc, LOG, [])
+ self.assertNotIn('Invalid config:', self.logs.getvalue())
+ handle('cc_write_file', INVALID_SCHEMA, cc, LOG, [])
+ self.assertIn('Invalid config:', self.logs.getvalue())
+ self.assertIn("'path' is a required property", self.logs.getvalue())
+
+ def test_schema_validation_warns_non_string_type_for_files(
+ self, m_write_files):
+ """Schema validation warns of non-string values for each file item."""
+ cc = self.tmp_cloud('ubuntu')
+ for key in VALID_SCHEMA['write_files'][0].keys():
+ if key == 'append':
+ key_type = 'boolean'
+ else:
+ key_type = 'string'
+ invalid_config = copy.deepcopy(VALID_SCHEMA)
+ invalid_config['write_files'][0][key] = 1
+ handle('cc_write_file', invalid_config, cc, LOG, [])
+ self.assertIn(
+ mock.call('cc_write_file', invalid_config['write_files']),
+ m_write_files.call_args_list)
+ self.assertIn(
+ 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type),
+ self.logs.getvalue())
+ self.assertIn('Invalid config:', self.logs.getvalue())
+
+ def test_schema_validation_warns_on_additional_undefined_propertes(
+ self, m_write_files):
+ """Schema validation warns on additional undefined file properties."""
+ cc = self.tmp_cloud('ubuntu')
+ invalid_config = copy.deepcopy(VALID_SCHEMA)
+ invalid_config['write_files'][0]['bogus'] = 'value'
+ handle('cc_write_file', invalid_config, cc, LOG, [])
+ self.assertIn(
+ "Invalid config:\nwrite_files.0: Additional properties"
+ " are not allowed ('bogus' was unexpected)",
+ self.logs.getvalue())
+
class TestWriteFiles(FilesystemMockingTestCase):
+
+ with_logs = True
+
def setUp(self):
super(TestWriteFiles, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
+ @skipUnlessJsonSchema()
+ def test_handler_schema_validation_warns_non_array_type(self):
+ """Schema validation warns of non-array value."""
+ invalid_config = {'write_files': 1}
+ cc = self.tmp_cloud('ubuntu')
+ with self.assertRaises(TypeError):
+ handle('cc_write_file', invalid_config, cc, LOG, [])
+ self.assertIn(
+ 'Invalid config:\nwrite_files: 1 is not of type \'array\'',
+ self.logs.getvalue())
+
def test_simple(self):
self.patchUtils(self.tmp)
expected = "hello world\n"
@@ -138,7 +218,7 @@ class TestDecodePerms(CiTestCase):
def _gzip_bytes(data):
- buf = six.BytesIO()
+ buf = io.BytesIO()
fp = None
try:
fp = gzip.GzipFile(fileobj=buf, mode="wb")
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
index b90a3af3..7c61bbf9 100644
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py
@@ -1,15 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.config import cc_yum_add_repo
-from cloudinit import util
-
-from cloudinit.tests import helpers
-
+import configparser
import logging
import shutil
-from six import StringIO
import tempfile
+from cloudinit import util
+from cloudinit.config import cc_yum_add_repo
+from cloudinit.tests import helpers
+
LOG = logging.getLogger(__name__)
@@ -54,7 +53,8 @@ class TestConfig(helpers.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
- parser = self.parse_and_read(StringIO(contents))
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
expected = {
'epel_testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
@@ -90,7 +90,8 @@ class TestConfig(helpers.FilesystemMockingTestCase):
self.patchUtils(self.tmp)
cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo")
- parser = self.parse_and_read(StringIO(contents))
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
expected = {
'puppetlabs_products': {
'name': 'Puppet Labs Products El 6 - $basearch',
diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py
index 72ab6c08..0fb1de1a 100644
--- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_zypper_add_repo.py
@@ -1,17 +1,15 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import configparser
import glob
+import logging
import os
-from cloudinit.config import cc_zypper_add_repo
from cloudinit import util
-
+from cloudinit.config import cc_zypper_add_repo
from cloudinit.tests import helpers
from cloudinit.tests.helpers import mock
-import logging
-from six import StringIO
-
LOG = logging.getLogger(__name__)
@@ -66,7 +64,8 @@ class TestConfig(helpers.FilesystemMockingTestCase):
root_d = self.tmp_dir()
cc_zypper_add_repo._write_repos(cfg['repos'], root_d)
contents = util.load_file("%s/testing-foo.repo" % root_d)
- parser = self.parse_and_read(StringIO(contents))
+ parser = configparser.ConfigParser()
+ parser.read_string(contents)
expected = {
'testing-foo': {
'name': 'test-foo',
diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py
index e69a47a9..e19d13b8 100644
--- a/tests/unittests/test_handler/test_schema.py
+++ b/tests/unittests/test_handler/test_schema.py
@@ -1,5 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
+import cloudinit
from cloudinit.config.schema import (
CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file,
get_schema_doc, get_schema, validate_cloudconfig_file,
@@ -10,7 +10,9 @@ from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from copy import copy
import os
-from six import StringIO
+import pytest
+from io import StringIO
+from pathlib import Path
from textwrap import dedent
from yaml import safe_load
@@ -20,15 +22,18 @@ class GetSchemaTest(CiTestCase):
def test_get_schema_coalesces_known_schema(self):
"""Every cloudconfig module with schema is listed in allOf keyword."""
schema = get_schema()
- self.assertItemsEqual(
+ self.assertCountEqual(
[
+ 'cc_apt_configure',
'cc_bootcmd',
+ 'cc_locale',
'cc_ntp',
'cc_resizefs',
'cc_runcmd',
'cc_snap',
'cc_ubuntu_advantage',
'cc_ubuntu_drivers',
+ 'cc_write_files',
'cc_zypper_add_repo'
],
[subschema['id'] for subschema in schema['allOf']])
@@ -38,7 +43,7 @@ class GetSchemaTest(CiTestCase):
schema['$schema'])
# FULL_SCHEMA is updated by the get_schema call
from cloudinit.config.schema import FULL_SCHEMA
- self.assertItemsEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys())
+ self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys())
def test_get_schema_returns_global_when_set(self):
"""When FULL_SCHEMA global is already set, get_schema returns it."""
@@ -110,6 +115,23 @@ class ValidateCloudConfigSchemaTest(CiTestCase):
str(context_mgr.exception))
+class TestCloudConfigExamples:
+ schema = get_schema()
+ params = [
+ (schema["id"], example)
+ for schema in schema["allOf"] for example in schema["examples"]]
+
+ @pytest.mark.parametrize("schema_id,example", params)
+ @skipUnlessJsonSchema()
+ def test_validateconfig_schema_of_example(self, schema_id, example):
+ """ For a given example in a config module we test if it is valid
+ according to the unified schema of all config modules
+ """
+ config_load = safe_load(example)
+ validate_cloudconfig_schema(
+ config_load, self.schema, strict=True)
+
+
class ValidateCloudConfigFileTest(CiTestCase):
"""Tests for validate_cloudconfig_file."""
@@ -268,6 +290,41 @@ class GetSchemaDocTest(CiTestCase):
"""),
get_schema_doc(full_schema))
+ def test_get_schema_doc_properly_parse_description(self):
+ """get_schema_doc description properly formatted"""
+ full_schema = copy(self.required_schema)
+ full_schema.update(
+ {'properties': {
+ 'p1': {
+ 'type': 'string',
+ 'description': dedent("""\
+ This item
+ has the
+ following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is
+ option1""")
+ }
+ }}
+ )
+
+ self.assertIn(
+ dedent("""
+ **Config schema**:
+ **p1:** (string) This item has the following options:
+
+ - option1
+ - option2
+ - option3
+
+ The default value is option1
+ """),
+ get_schema_doc(full_schema))
+
def test_get_schema_doc_raises_key_errors(self):
"""get_schema_doc raises KeyErrors on missing keys."""
for key in self.required_schema:
@@ -345,34 +402,30 @@ class MainTest(CiTestCase):
def test_main_missing_args(self):
"""Main exits non-zero and reports an error on missing parameters."""
- with mock.patch('sys.exit', side_effect=self.sys_exit):
- with mock.patch('sys.argv', ['mycmd']):
- with mock.patch('sys.stderr', new_callable=StringIO) as \
- m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- main()
+ with mock.patch('sys.argv', ['mycmd']):
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with self.assertRaises(SystemExit) as context_manager:
+ main()
self.assertEqual(1, context_manager.exception.code)
self.assertEqual(
- 'Expected either --config-file argument or --doc\n',
+ 'Expected either --config-file argument or --docs\n',
m_stderr.getvalue())
def test_main_absent_config_file(self):
"""Main exits non-zero when config file is absent."""
myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE']
- with mock.patch('sys.exit', side_effect=self.sys_exit):
- with mock.patch('sys.argv', myargs):
- with mock.patch('sys.stderr', new_callable=StringIO) as \
- m_stderr:
- with self.assertRaises(SystemExit) as context_manager:
- main()
+ with mock.patch('sys.argv', myargs):
+ with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr:
+ with self.assertRaises(SystemExit) as context_manager:
+ main()
self.assertEqual(1, context_manager.exception.code)
self.assertEqual(
'Configfile NOT_A_FILE does not exist\n',
m_stderr.getvalue())
def test_main_prints_docs(self):
- """When --doc parameter is provided, main generates documentation."""
- myargs = ['mycmd', '--doc']
+ """When --docs parameter is provided, main generates documentation."""
+ myargs = ['mycmd', '--docs', 'all']
with mock.patch('sys.argv', myargs):
with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout:
self.assertEqual(0, main(), 'Expected 0 exit code')
@@ -430,4 +483,23 @@ class CloudTestsIntegrationTest(CiTestCase):
if errors:
raise AssertionError(', '.join(errors))
+
+def _get_schema_doc_examples():
+ examples_dir = Path(
+ cloudinit.__file__).parent.parent / 'doc' / 'examples'
+ assert examples_dir.is_dir()
+
+ all_text_files = (f for f in examples_dir.glob('cloud-config*.txt')
+ if not f.name.startswith('cloud-config-archive'))
+ return all_text_files
+
+
+class TestSchemaDocExamples:
+ schema = get_schema()
+
+ @pytest.mark.parametrize("example_path", _get_schema_doc_examples())
+ @skipUnlessJsonSchema()
+ def test_schema_doc_examples(self, example_path):
+ validate_cloudconfig_file(str(example_path), self.schema)
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py
index cd6296d6..e069a487 100644
--- a/tests/unittests/test_log.py
+++ b/tests/unittests/test_log.py
@@ -2,14 +2,15 @@
"""Tests for cloudinit.log """
-from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT
-from cloudinit import log as ci_logging
-from cloudinit.tests.helpers import CiTestCase
import datetime
+import io
import logging
-import six
import time
+from cloudinit import log as ci_logging
+from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT
+from cloudinit.tests.helpers import CiTestCase
+
class TestCloudInitLogger(CiTestCase):
@@ -18,7 +19,7 @@ class TestCloudInitLogger(CiTestCase):
# of sys.stderr, we'll plug in a StringIO() object so we can see
# what gets logged
logging.Formatter.converter = time.gmtime
- self.ci_logs = six.StringIO()
+ self.ci_logs = io.StringIO()
self.ci_root = logging.getLogger()
console = logging.StreamHandler(self.ci_logs)
console.setFormatter(logging.Formatter(ci_logging.DEF_CON_FORMAT))
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 3a5072c7..10871bcf 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -13,13 +13,11 @@ import glob
import os
import random
import re
-import six
import string
SOURCE_PAT = "source*.*yaml"
EXPECTED_PAT = "expected%s.yaml"
-TYPES = [dict, str, list, tuple, None]
-TYPES.extend(six.integer_types)
+TYPES = [dict, str, list, tuple, None, int]
def _old_mergedict(src, cand):
@@ -85,7 +83,7 @@ def _make_dict(current_depth, max_depth, rand):
pass
if t in [tuple]:
base = tuple(base)
- elif t in six.integer_types:
+ elif t in [int]:
base = rand.randint(0, 2 ** 8)
elif t in [str]:
base = _random_str(rand)
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 01119e0a..e075a64c 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -24,6 +24,7 @@ import re
import textwrap
from yaml.serializer import Serializer
+import pytest
DHCP_CONTENT_1 = """
DEVICE='eth0'
@@ -81,7 +82,7 @@ DHCP6_EXPECTED_1 = {
STATIC_CONTENT_1 = """
DEVICE='eth1'
-PROTO='static'
+PROTO='none'
IPV4ADDR='10.0.0.2'
IPV4BROADCAST='10.0.0.255'
IPV4NETMASK='255.255.255.0'
@@ -489,18 +490,11 @@ OS_SAMPLES = [
"""
# Created by cloud-init on instance boot automatically, do not edit.
#
-BOOTPROTO=none
-DEFROUTE=yes
-DEVICE=eth0
-GATEWAY=172.19.3.254
-HWADDR=fa:16:3e:ed:9a:59
+BOOTPROTO=static
IPADDR=172.19.1.34
+LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
-NM_CONTROLLED=no
-ONBOOT=yes
STARTMODE=auto
-TYPE=Ethernet
-USERCTL=no
""".lstrip()),
('etc/resolv.conf',
"""
@@ -532,7 +526,6 @@ IPADDR=172.19.1.34
NETMASK=255.255.252.0
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
""".lstrip()),
@@ -591,20 +584,13 @@ dns = none
"""
# Created by cloud-init on instance boot automatically, do not edit.
#
-BOOTPROTO=none
-DEFROUTE=yes
-DEVICE=eth0
-GATEWAY=172.19.3.254
-HWADDR=fa:16:3e:ed:9a:59
+BOOTPROTO=static
IPADDR=172.19.1.34
IPADDR1=10.0.0.10
+LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
NETMASK1=255.255.255.0
-NM_CONTROLLED=no
-ONBOOT=yes
STARTMODE=auto
-TYPE=Ethernet
-USERCTL=no
""".lstrip()),
('etc/resolv.conf',
"""
@@ -638,7 +624,6 @@ NETMASK=255.255.252.0
NETMASK1=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
""".lstrip()),
@@ -717,25 +702,14 @@ dns = none
"""
# Created by cloud-init on instance boot automatically, do not edit.
#
-BOOTPROTO=none
-DEFROUTE=yes
-DEVICE=eth0
-GATEWAY=172.19.3.254
-HWADDR=fa:16:3e:ed:9a:59
+BOOTPROTO=static
IPADDR=172.19.1.34
IPADDR6=2001:DB8::10/64
-IPADDR6_0=2001:DB9::10/64
+IPADDR6_1=2001:DB9::10/64
IPADDR6_2=2001:DB10::10/64
-IPV6ADDR=2001:DB8::10/64
-IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
-IPV6INIT=yes
-IPV6_DEFAULTGW=2001:DB8::1
+LLADDR=fa:16:3e:ed:9a:59
NETMASK=255.255.252.0
-NM_CONTROLLED=no
-ONBOOT=yes
STARTMODE=auto
-TYPE=Ethernet
-USERCTL=no
""".lstrip()),
('etc/resolv.conf',
"""
@@ -764,9 +738,6 @@ DEVICE=eth0
GATEWAY=172.19.3.254
HWADDR=fa:16:3e:ed:9a:59
IPADDR=172.19.1.34
-IPADDR6=2001:DB8::10/64
-IPADDR6_0=2001:DB9::10/64
-IPADDR6_2=2001:DB10::10/64
IPV6ADDR=2001:DB8::10/64
IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64"
IPV6INIT=yes
@@ -774,7 +745,6 @@ IPV6_DEFAULTGW=2001:DB8::1
NETMASK=255.255.252.0
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
""".lstrip()),
@@ -884,14 +854,25 @@ NETWORK_CONFIGS = {
via: 65.61.151.37
set-name: eth99
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=cf:d6:af:48:e8:80
+ STARTMODE=auto"""),
+ 'ifcfg-eth99': textwrap.dedent("""\
+ BOOTPROTO=dhcp4
+ LLADDR=c0:d6:9f:2c:e8:80
+ IPADDR=192.168.21.3
+ NETMASK=255.255.255.0
+ STARTMODE=auto"""),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-eth1': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=eth1
HWADDR=cf:d6:af:48:e8:80
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no"""),
'ifcfg-eth99': textwrap.dedent("""\
@@ -909,7 +890,6 @@ NETWORK_CONFIGS = {
METRIC=10000
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no"""),
},
@@ -963,6 +943,12 @@ NETWORK_CONFIGS = {
dhcp4: true
dhcp6: true
""").rstrip(' '),
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DHCLIENT6_MODE=managed
+ STARTMODE=auto""")
+ },
'yaml': textwrap.dedent("""\
version: 1
config:
@@ -1013,18 +999,26 @@ NETWORK_CONFIGS = {
address: 2001:1::1/64
mtu: 1500
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=static
+ IPADDR=192.168.14.2
+ IPADDR6=2001:1::1/64
+ NETMASK=255.255.255.0
+ STARTMODE=auto
+ MTU=9000
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
IPADDR=192.168.14.2
- IPADDR6=2001:1::1/64
IPV6ADDR=2001:1::1/64
IPV6INIT=yes
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
MTU=9000
@@ -1032,6 +1026,23 @@ NETWORK_CONFIGS = {
"""),
},
},
+ 'v6_and_v4': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ DHCLIENT6_MODE=managed
+ STARTMODE=auto""")
+ },
+ 'yaml': textwrap.dedent("""\
+ version: 1
+ config:
+ - type: 'physical'
+ name: 'iface0'
+ subnets:
+ - type: dhcp6
+ - type: dhcp4
+ """).rstrip(' '),
+ },
'dhcpv6_only': {
'expected_eni': textwrap.dedent("""\
auto lo
@@ -1055,7 +1066,14 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'dhcp6'}
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=managed
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
@@ -1064,7 +1082,6 @@ NETWORK_CONFIGS = {
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -1103,7 +1120,14 @@ NETWORK_CONFIGS = {
dhcp6: true
accept-ra: true
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=managed
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
@@ -1113,7 +1137,6 @@ NETWORK_CONFIGS = {
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -1152,7 +1175,14 @@ NETWORK_CONFIGS = {
dhcp6: true
accept-ra: false
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=managed
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
@@ -1162,7 +1192,6 @@ NETWORK_CONFIGS = {
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -1192,7 +1221,14 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'ipv6_slaac'}
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=info
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
@@ -1201,7 +1237,6 @@ NETWORK_CONFIGS = {
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -1231,7 +1266,14 @@ NETWORK_CONFIGS = {
subnets:
- {'type': 'ipv6_dhcpv6-stateless'}
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=info
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
@@ -1242,7 +1284,6 @@ NETWORK_CONFIGS = {
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -1273,7 +1314,14 @@ NETWORK_CONFIGS = {
- {'type': 'ipv6_dhcpv6-stateful'}
accept-ra: true
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-iface0': textwrap.dedent("""\
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=managed
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-iface0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=iface0
@@ -1283,7 +1331,6 @@ NETWORK_CONFIGS = {
DEVICE=iface0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -1478,7 +1525,80 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
- sacchromyces.maas
- brettanomyces.maas
""").rstrip(' '),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-bond0': textwrap.dedent("""\
+ BONDING_MASTER=yes
+ BONDING_OPTS="mode=active-backup """
+ """xmit_hash_policy=layer3+4 """
+ """miimon=100"
+ BONDING_SLAVE_0=eth1
+ BONDING_SLAVE_1=eth2
+ BOOTPROTO=dhcp6
+ DHCLIENT6_MODE=managed
+ LLADDR=aa:bb:cc:dd:ee:ff
+ STARTMODE=auto"""),
+ 'ifcfg-bond0.200': textwrap.dedent("""\
+ BOOTPROTO=dhcp4
+ ETHERDEVICE=bond0
+ STARTMODE=auto
+ VLAN_ID=200"""),
+ 'ifcfg-br0': textwrap.dedent("""\
+ BRIDGE_AGEINGTIME=250
+ BOOTPROTO=static
+ IPADDR=192.168.14.2
+ IPADDR6=2001:1::1/64
+ LLADDRESS=bb:bb:bb:bb:bb:aa
+ NETMASK=255.255.255.0
+ BRIDGE_PRIORITY=22
+ BRIDGE_PORTS='eth3 eth4'
+ STARTMODE=auto
+ BRIDGE_STP=off"""),
+ 'ifcfg-eth0': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=c0:d6:9f:2c:e8:80
+ STARTMODE=auto"""),
+ 'ifcfg-eth0.101': textwrap.dedent("""\
+ BOOTPROTO=static
+ IPADDR=192.168.0.2
+ IPADDR1=192.168.2.10
+ MTU=1500
+ NETMASK=255.255.255.0
+ NETMASK1=255.255.255.0
+ ETHERDEVICE=eth0
+ STARTMODE=auto
+ VLAN_ID=101"""),
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=none
+ LLADDR=aa:d6:9f:2c:e8:80
+ STARTMODE=hotplug"""),
+ 'ifcfg-eth2': textwrap.dedent("""\
+ BOOTPROTO=none
+ LLADDR=c0:bb:9f:2c:e8:80
+ STARTMODE=hotplug"""),
+ 'ifcfg-eth3': textwrap.dedent("""\
+ BOOTPROTO=static
+ BRIDGE=yes
+ LLADDR=66:bb:9f:2c:e8:80
+ STARTMODE=auto"""),
+ 'ifcfg-eth4': textwrap.dedent("""\
+ BOOTPROTO=static
+ BRIDGE=yes
+ LLADDR=98:bb:9f:2c:e8:80
+ STARTMODE=auto"""),
+ 'ifcfg-eth5': textwrap.dedent("""\
+ BOOTPROTO=dhcp
+ LLADDR=98:bb:9f:2c:e8:8a
+ STARTMODE=manual"""),
+ 'ifcfg-ib0': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=a0:00:02:20:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:15:e2:c1
+ IPADDR=192.168.200.7
+ MTU=9000
+ NETMASK=255.255.255.0
+ STARTMODE=auto
+ TYPE=InfiniBand"""),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-bond0': textwrap.dedent("""\
BONDING_MASTER=yes
BONDING_OPTS="mode=active-backup """
@@ -1493,7 +1613,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
MACADDR=aa:bb:cc:dd:ee:ff
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Bond
USERCTL=no"""),
'ifcfg-bond0.200': textwrap.dedent("""\
@@ -1503,7 +1622,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=bond0
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
VLAN=yes"""),
@@ -1513,7 +1631,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
DEFROUTE=yes
DEVICE=br0
IPADDR=192.168.14.2
- IPADDR6=2001:1::1/64
IPV6ADDR=2001:1::1/64
IPV6INIT=yes
IPV6_DEFAULTGW=2001:4800:78ff:1b::1
@@ -1522,7 +1639,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
PRIO=22
- STARTMODE=auto
STP=no
TYPE=Bridge
USERCTL=no"""),
@@ -1532,7 +1648,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
HWADDR=c0:d6:9f:2c:e8:80
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no"""),
'ifcfg-eth0.101': textwrap.dedent("""\
@@ -1551,7 +1666,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=eth0
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
VLAN=yes"""),
@@ -1562,7 +1676,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
MASTER=bond0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
SLAVE=yes
TYPE=Ethernet
USERCTL=no"""),
@@ -1573,7 +1686,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
MASTER=bond0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
SLAVE=yes
TYPE=Ethernet
USERCTL=no"""),
@@ -1584,7 +1696,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
HWADDR=66:bb:9f:2c:e8:80
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no"""),
'ifcfg-eth4': textwrap.dedent("""\
@@ -1594,7 +1705,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
HWADDR=98:bb:9f:2c:e8:80
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no"""),
'ifcfg-eth5': textwrap.dedent("""\
@@ -1604,7 +1714,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
HWADDR=98:bb:9f:2c:e8:8a
NM_CONTROLLED=no
ONBOOT=no
- STARTMODE=manual
TYPE=Ethernet
USERCTL=no"""),
'ifcfg-ib0': textwrap.dedent("""\
@@ -1616,7 +1725,6 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=InfiniBand
USERCTL=no"""),
},
@@ -2012,58 +2120,29 @@ iface bond0 inet6 static
"""fail_over_mac=active """
"""primary=bond0s0 """
"""primary_reselect=always"
- BONDING_SLAVE0=bond0s0
- BONDING_SLAVE1=bond0s1
- BOOTPROTO=none
- DEFROUTE=yes
- DEVICE=bond0
- GATEWAY=192.168.0.1
- MACADDR=aa:bb:cc:dd:e8:ff
+ BONDING_SLAVE_0=bond0s0
+ BONDING_SLAVE_1=bond0s1
+ BOOTPROTO=static
+ LLADDR=aa:bb:cc:dd:e8:ff
IPADDR=192.168.0.2
IPADDR1=192.168.1.2
IPADDR6=2001:1::1/92
- IPV6ADDR=2001:1::1/92
- IPV6INIT=yes
MTU=9000
NETMASK=255.255.255.0
NETMASK1=255.255.255.0
- NM_CONTROLLED=no
- ONBOOT=yes
STARTMODE=auto
- TYPE=Bond
- USERCTL=no
"""),
'ifcfg-bond0s0': textwrap.dedent("""\
BOOTPROTO=none
- DEVICE=bond0s0
- HWADDR=aa:bb:cc:dd:e8:00
- MASTER=bond0
- NM_CONTROLLED=no
- ONBOOT=yes
- SLAVE=yes
- STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
- """),
- 'ifroute-bond0': textwrap.dedent("""\
- ADDRESS0=10.1.3.0
- GATEWAY0=192.168.0.3
- NETMASK0=255.255.255.0
+ LLADDR=aa:bb:cc:dd:e8:00
+ STARTMODE=hotplug
"""),
'ifcfg-bond0s1': textwrap.dedent("""\
BOOTPROTO=none
- DEVICE=bond0s1
- HWADDR=aa:bb:cc:dd:e8:01
- MASTER=bond0
- NM_CONTROLLED=no
- ONBOOT=yes
- SLAVE=yes
- STARTMODE=auto
- TYPE=Ethernet
- USERCTL=no
+ LLADDR=aa:bb:cc:dd:e8:01
+ STARTMODE=hotplug
"""),
},
-
'expected_sysconfig_rhel': {
'ifcfg-bond0': textwrap.dedent("""\
BONDING_MASTER=yes
@@ -2082,7 +2161,6 @@ iface bond0 inet6 static
MACADDR=aa:bb:cc:dd:e8:ff
IPADDR=192.168.0.2
IPADDR1=192.168.1.2
- IPADDR6=2001:1::1/92
IPV6ADDR=2001:1::1/92
IPV6INIT=yes
MTU=9000
@@ -2090,7 +2168,6 @@ iface bond0 inet6 static
NETMASK1=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Bond
USERCTL=no
"""),
@@ -2102,7 +2179,6 @@ iface bond0 inet6 static
NM_CONTROLLED=no
ONBOOT=yes
SLAVE=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -2125,7 +2201,6 @@ iface bond0 inet6 static
NM_CONTROLLED=no
ONBOOT=yes
SLAVE=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -2156,14 +2231,32 @@ iface bond0 inet6 static
netmask: '::'
network: '::'
"""),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ # TODO RJS: unknown proper BOOTPROTO setting ask Marius
+ 'ifcfg-en0': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=aa:bb:cc:dd:e8:00
+ STARTMODE=auto"""),
+ 'ifcfg-en0.99': textwrap.dedent("""\
+ BOOTPROTO=static
+ IPADDR=192.168.2.2
+ IPADDR1=192.168.1.2
+ IPADDR6=2001:1::bbbb/96
+ MTU=2222
+ NETMASK=255.255.255.0
+ NETMASK1=255.255.255.0
+ STARTMODE=auto
+ ETHERDEVICE=en0
+ VLAN_ID=99
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-en0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=en0
HWADDR=aa:bb:cc:dd:e8:00
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no"""),
'ifcfg-en0.99': textwrap.dedent("""\
@@ -2173,7 +2266,6 @@ iface bond0 inet6 static
GATEWAY=192.168.1.1
IPADDR=192.168.2.2
IPADDR1=192.168.1.2
- IPADDR6=2001:1::bbbb/96
IPV6ADDR=2001:1::bbbb/96
IPV6INIT=yes
IPV6_DEFAULTGW=2001:1::1
@@ -2183,7 +2275,6 @@ iface bond0 inet6 static
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=en0
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
VLAN=yes"""),
@@ -2216,7 +2307,32 @@ iface bond0 inet6 static
subnets:
- type: static
address: 192.168.2.2/24"""),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-br0': textwrap.dedent("""\
+ BOOTPROTO=static
+ IPADDR=192.168.2.2
+ NETMASK=255.255.255.0
+ STARTMODE=auto
+ BRIDGE_STP=off
+ BRIDGE_PRIORITY=22
+ BRIDGE_PORTS='eth0 eth1'
+ """),
+ 'ifcfg-eth0': textwrap.dedent("""\
+ BOOTPROTO=static
+ BRIDGE=yes
+ LLADDR=52:54:00:12:34:00
+ IPADDR6=2001:1::100/96
+ STARTMODE=auto
+ """),
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=static
+ BRIDGE=yes
+ LLADDR=52:54:00:12:34:01
+ IPADDR6=2001:1::101/96
+ STARTMODE=auto
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-br0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=br0
@@ -2225,7 +2341,6 @@ iface bond0 inet6 static
NM_CONTROLLED=no
ONBOOT=yes
PRIO=22
- STARTMODE=auto
STP=no
TYPE=Bridge
USERCTL=no
@@ -2235,12 +2350,10 @@ iface bond0 inet6 static
BRIDGE=br0
DEVICE=eth0
HWADDR=52:54:00:12:34:00
- IPADDR6=2001:1::100/96
IPV6ADDR=2001:1::100/96
IPV6INIT=yes
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -2249,12 +2362,10 @@ iface bond0 inet6 static
BRIDGE=br0
DEVICE=eth1
HWADDR=52:54:00:12:34:01
- IPADDR6=2001:1::101/96
IPV6ADDR=2001:1::101/96
IPV6INIT=yes
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -2320,7 +2431,27 @@ iface bond0 inet6 static
macaddress: 52:54:00:12:34:ff
set-name: eth2
"""),
- 'expected_sysconfig': {
+ 'expected_sysconfig_opensuse': {
+ 'ifcfg-eth0': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=52:54:00:12:34:00
+ IPADDR=192.168.1.2
+ NETMASK=255.255.255.0
+ STARTMODE=manual
+ """),
+ 'ifcfg-eth1': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=52:54:00:12:34:aa
+ MTU=1480
+ STARTMODE=auto
+ """),
+ 'ifcfg-eth2': textwrap.dedent("""\
+ BOOTPROTO=static
+ LLADDR=52:54:00:12:34:ff
+ STARTMODE=manual
+ """),
+ },
+ 'expected_sysconfig_rhel': {
'ifcfg-eth0': textwrap.dedent("""\
BOOTPROTO=none
DEVICE=eth0
@@ -2329,7 +2460,6 @@ iface bond0 inet6 static
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=no
- STARTMODE=manual
TYPE=Ethernet
USERCTL=no
"""),
@@ -2340,7 +2470,6 @@ iface bond0 inet6 static
MTU=1480
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -2350,7 +2479,6 @@ iface bond0 inet6 static
HWADDR=52:54:00:12:34:ff
NM_CONTROLLED=no
ONBOOT=no
- STARTMODE=manual
TYPE=Ethernet
USERCTL=no
"""),
@@ -2681,7 +2809,7 @@ class TestRhelSysConfigRendering(CiTestCase):
header = ('# Created by cloud-init on instance boot automatically, '
'do not edit.\n#\n')
- expected_name = 'expected_sysconfig'
+ expected_name = 'expected_sysconfig_rhel'
def _get_renderer(self):
distro_cls = distros.fetch('rhel')
@@ -2768,7 +2896,6 @@ DEVICE=eth1000
HWADDR=07-1c-c6-75-a4-be
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
""".lstrip()
@@ -2890,7 +3017,6 @@ IPADDR=10.0.2.15
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""
@@ -2922,7 +3048,6 @@ MTU=1500
NETMASK=255.255.240.0
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""
@@ -2937,7 +3062,6 @@ HWADDR=fa:16:3e:b1:ca:29
MTU=9000
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""
@@ -2963,7 +3087,6 @@ BOOTPROTO=dhcp
DEVICE=eth0
NM_CONTROLLED=no
ONBOOT=yes
-STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""
@@ -2972,10 +3095,9 @@ USERCTL=no
self.assertEqual(resolvconf_content, found['/etc/resolv.conf'])
def test_bond_config(self):
- expected_name = 'expected_sysconfig_rhel'
entry = NETWORK_CONFIGS['bond']
found = self._render_and_read(network_config=yaml.load(entry['yaml']))
- self._compare_files_to_expected(entry[expected_name], found)
+ self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
def test_vlan_config(self):
@@ -3163,14 +3285,12 @@ USERCTL=no
GATEWAY=192.168.42.1
HWADDR=52:54:00:ab:cd:ef
IPADDR=192.168.42.100
- IPADDR6=2001:db8::100/32
IPV6ADDR=2001:db8::100/32
IPV6INIT=yes
IPV6_DEFAULTGW=2001:db8::1
NETMASK=255.255.255.0
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -3196,7 +3316,6 @@ USERCTL=no
DEVICE=eno1
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -3209,7 +3328,6 @@ USERCTL=no
NM_CONTROLLED=no
ONBOOT=yes
PHYSDEV=eno1
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
VLAN=yes
@@ -3240,7 +3358,6 @@ USERCTL=no
NETMASK=255.255.255.192
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Bond
USERCTL=no
"""),
@@ -3252,7 +3369,6 @@ USERCTL=no
NM_CONTROLLED=no
ONBOOT=yes
SLAVE=yes
- STARTMODE=auto
TYPE=Bond
USERCTL=no
"""),
@@ -3264,7 +3380,6 @@ USERCTL=no
NM_CONTROLLED=no
ONBOOT=yes
SLAVE=yes
- STARTMODE=auto
TYPE=Bond
USERCTL=no
""")
@@ -3288,7 +3403,6 @@ USERCTL=no
METRIC=100
NM_CONTROLLED=no
ONBOOT=yes
- STARTMODE=auto
TYPE=Ethernet
USERCTL=no
"""),
@@ -3311,7 +3425,7 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
header = ('# Created by cloud-init on instance boot automatically, '
'do not edit.\n#\n')
- expected_name = 'expected_sysconfig'
+ expected_name = 'expected_sysconfig_opensuse'
def _get_renderer(self):
distro_cls = distros.fetch('opensuse')
@@ -3383,92 +3497,89 @@ class TestOpenSuseSysConfigRendering(CiTestCase):
expected_content = """
# Created by cloud-init on instance boot automatically, do not edit.
#
-BOOTPROTO=dhcp
-DEVICE=eth1000
-HWADDR=07-1c-c6-75-a4-be
-NM_CONTROLLED=no
-ONBOOT=yes
+BOOTPROTO=dhcp4
+LLADDR=07-1c-c6-75-a4-be
STARTMODE=auto
-TYPE=Ethernet
-USERCTL=no
""".lstrip()
self.assertEqual(expected_content, content)
- def test_multiple_ipv4_default_gateways(self):
- """ValueError is raised when duplicate ipv4 gateways exist."""
- net_json = {
- "services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
- "type": "ipv4", "netmask": "255.255.252.0",
- "link": "tap1a81968a-79",
- "routes": [{
- "netmask": "0.0.0.0",
- "network": "0.0.0.0",
- "gateway": "172.19.3.254",
- }, {
- "netmask": "0.0.0.0", # A second default gateway
- "network": "0.0.0.0",
- "gateway": "172.20.3.254",
- }],
- "ip_address": "172.19.1.34", "id": "network0"
- }],
- "links": [
- {
- "ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
- },
- ],
- }
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
- render_dir = self.tmp_dir()
- network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
- renderer = self._get_renderer()
- with self.assertRaises(ValueError):
- renderer.render_network_state(ns, target=render_dir)
- self.assertEqual([], os.listdir(render_dir))
-
- def test_multiple_ipv6_default_gateways(self):
- """ValueError is raised when duplicate ipv6 gateways exist."""
- net_json = {
- "services": [{"type": "dns", "address": "172.19.0.12"}],
- "networks": [{
- "network_id": "public-ipv6",
- "type": "ipv6", "netmask": "",
- "link": "tap1a81968a-79",
- "routes": [{
- "gateway": "2001:DB8::1",
- "netmask": "::",
- "network": "::"
- }, {
- "gateway": "2001:DB9::1",
- "netmask": "::",
- "network": "::"
- }],
- "ip_address": "2001:DB8::10", "id": "network1"
- }],
- "links": [
- {
- "ethernet_mac_address": "fa:16:3e:ed:9a:59",
- "mtu": None, "type": "bridge", "id":
- "tap1a81968a-79",
- "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
- },
- ],
- }
- macs = {'fa:16:3e:ed:9a:59': 'eth0'}
- render_dir = self.tmp_dir()
- network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
- ns = network_state.parse_net_config_data(network_cfg,
- skip_broken=False)
- renderer = self._get_renderer()
- with self.assertRaises(ValueError):
- renderer.render_network_state(ns, target=render_dir)
- self.assertEqual([], os.listdir(render_dir))
+ # TODO(rjschwei): re-enable test once route writing is implemented
+ # for SUSE distros
+# def test_multiple_ipv4_default_gateways(self):
+# """ValueError is raised when duplicate ipv4 gateways exist."""
+# net_json = {
+# "services": [{"type": "dns", "address": "172.19.0.12"}],
+# "networks": [{
+# "network_id": "dacd568d-5be6-4786-91fe-750c374b78b4",
+# "type": "ipv4", "netmask": "255.255.252.0",
+# "link": "tap1a81968a-79",
+# "routes": [{
+# "netmask": "0.0.0.0",
+# "network": "0.0.0.0",
+# "gateway": "172.19.3.254",
+# }, {
+# "netmask": "0.0.0.0", # A second default gateway
+# "network": "0.0.0.0",
+# "gateway": "172.20.3.254",
+# }],
+# "ip_address": "172.19.1.34", "id": "network0"
+# }],
+# "links": [
+# {
+# "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+# "mtu": None, "type": "bridge", "id":
+# "tap1a81968a-79",
+# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+# },
+# ],
+# }
+# macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+# render_dir = self.tmp_dir()
+# network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+# ns = network_state.parse_net_config_data(network_cfg,
+# skip_broken=False)
+# renderer = self._get_renderer()
+# with self.assertRaises(ValueError):
+# renderer.render_network_state(ns, target=render_dir)
+# self.assertEqual([], os.listdir(render_dir))
+#
+# def test_multiple_ipv6_default_gateways(self):
+# """ValueError is raised when duplicate ipv6 gateways exist."""
+# net_json = {
+# "services": [{"type": "dns", "address": "172.19.0.12"}],
+# "networks": [{
+# "network_id": "public-ipv6",
+# "type": "ipv6", "netmask": "",
+# "link": "tap1a81968a-79",
+# "routes": [{
+# "gateway": "2001:DB8::1",
+# "netmask": "::",
+# "network": "::"
+# }, {
+# "gateway": "2001:DB9::1",
+# "netmask": "::",
+# "network": "::"
+# }],
+# "ip_address": "2001:DB8::10", "id": "network1"
+# }],
+# "links": [
+# {
+# "ethernet_mac_address": "fa:16:3e:ed:9a:59",
+# "mtu": None, "type": "bridge", "id":
+# "tap1a81968a-79",
+# "vif_id": "1a81968a-797a-400f-8a80-567f997eb93f"
+# },
+# ],
+# }
+# macs = {'fa:16:3e:ed:9a:59': 'eth0'}
+# render_dir = self.tmp_dir()
+# network_cfg = openstack.convert_net_json(net_json, known_macs=macs)
+# ns = network_state.parse_net_config_data(network_cfg,
+# skip_broken=False)
+# renderer = self._get_renderer()
+# with self.assertRaises(ValueError):
+# renderer.render_network_state(ns, target=render_dir)
+# self.assertEqual([], os.listdir(render_dir))
def test_openstack_rendering_samples(self):
for os_sample in OS_SAMPLES:
@@ -3501,18 +3612,11 @@ USERCTL=no
expected = """\
# Created by cloud-init on instance boot automatically, do not edit.
#
-BOOTPROTO=none
-DEFROUTE=yes
-DEVICE=interface0
-GATEWAY=10.0.2.2
-HWADDR=52:54:00:12:34:00
+BOOTPROTO=static
IPADDR=10.0.2.15
+LLADDR=52:54:00:12:34:00
NETMASK=255.255.255.0
-NM_CONTROLLED=no
-ONBOOT=yes
STARTMODE=auto
-TYPE=Ethernet
-USERCTL=no
"""
self.assertEqual(expected, found[nspath + 'ifcfg-interface0'])
# The configuration has no nameserver information make sure we
@@ -3537,12 +3641,7 @@ USERCTL=no
# Created by cloud-init on instance boot automatically, do not edit.
#
BOOTPROTO=dhcp
-DEVICE=eth0
-NM_CONTROLLED=no
-ONBOOT=yes
STARTMODE=auto
-TYPE=Ethernet
-USERCTL=no
"""
self.assertEqual(expected, found[nspath + 'ifcfg-eth0'])
# a dhcp only config should not modify resolv.conf
@@ -3613,6 +3712,30 @@ USERCTL=no
self._compare_files_to_expected(entry[self.expected_name], found)
self._assert_headers(found)
+ def test_simple_render_ipv6_slaac(self):
+ entry = NETWORK_CONFIGS['ipv6_slaac']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_dhcpv6_stateless_config(self):
+ entry = NETWORK_CONFIGS['dhcpv6_stateless']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_render_v4_and_v6(self):
+ entry = NETWORK_CONFIGS['v4_and_v6']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
+ def test_render_v6_and_v4(self):
+ entry = NETWORK_CONFIGS['v6_and_v4']
+ found = self._render_and_read(network_config=yaml.load(entry['yaml']))
+ self._compare_files_to_expected(entry[self.expected_name], found)
+ self._assert_headers(found)
+
class TestEniNetRendering(CiTestCase):
@@ -3895,6 +4018,8 @@ class TestEniNetworkStateToEni(CiTestCase):
class TestCmdlineConfigParsing(CiTestCase):
+ with_logs = True
+
simple_cfg = {
'config': [{"type": "physical", "name": "eth0",
"mac_address": "c0:d6:9f:2c:e8:80",
@@ -3944,6 +4069,21 @@ class TestCmdlineConfigParsing(CiTestCase):
found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
self.assertEqual(found, self.simple_cfg)
+ def test_cmdline_with_net_config_disabled(self):
+ raw_cmdline = 'ro network-config=disabled root=foo'
+ found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
+ self.assertEqual(found, {'config': 'disabled'})
+
+ def test_cmdline_with_net_config_unencoded_logs_error(self):
+ """network-config cannot be unencoded besides 'disabled'."""
+ raw_cmdline = 'ro network-config={config:disabled} root=foo'
+ found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline)
+ self.assertIsNone(found)
+ expected_log = (
+ 'ERROR: Expected base64 encoded kernel commandline parameter'
+ ' network-config. Ignoring network-config={config:disabled}.')
+ self.assertIn(expected_log, self.logs.getvalue())
+
def test_cmdline_with_b64_gz(self):
data = _gzip_data(json.dumps(self.simple_cfg).encode())
encoded_text = base64.b64encode(data).decode()
@@ -4532,6 +4672,51 @@ class TestEniRoundTrip(CiTestCase):
files['/etc/network/interfaces'].splitlines())
+class TestRenderersSelect:
+
+ @pytest.mark.parametrize(
+ 'renderer_selected,netplan,eni,nm,scfg,sys', (
+ # -netplan -ifupdown -nm -scfg -sys raises error
+ (net.RendererNotFoundError, False, False, False, False, False),
+ # -netplan +ifupdown -nm -scfg -sys selects eni
+ ('eni', False, True, False, False, False),
+ # +netplan +ifupdown -nm -scfg -sys selects eni
+ ('eni', True, True, False, False, False),
+ # +netplan -ifupdown -nm -scfg -sys selects netplan
+ ('netplan', True, False, False, False, False),
+ # Ubuntu with Network-Manager installed
+ # +netplan -ifupdown +nm -scfg -sys selects netplan
+ ('netplan', True, False, True, False, False),
+ # Centos/OpenSuse with Network-Manager installed selects sysconfig
+ # -netplan -ifupdown +nm -scfg +sys selects netplan
+ ('sysconfig', False, False, True, False, True),
+ ),
+ )
+ @mock.patch("cloudinit.net.renderers.netplan.available")
+ @mock.patch("cloudinit.net.renderers.sysconfig.available")
+ @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
+ @mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
+ @mock.patch("cloudinit.net.renderers.eni.available")
+ def test_valid_renderer_from_defaults_depending_on_availability(
+ self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail,
+ m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys
+ ):
+ """Assert proper renderer per DEFAULT_PRIORITY given availability."""
+ m_eni_avail.return_value = eni # ifupdown pkg presence
+ m_nm_avail.return_value = nm # network-manager presence
+ m_scfg_avail.return_value = scfg # sysconfig presence
+ m_sys_avail.return_value = sys # sysconfig/ifup/down presence
+ m_netplan_avail.return_value = netplan # netplan presence
+ if isinstance(renderer_selected, str):
+ (renderer_name, _rnd_class) = renderers.select(
+ priority=renderers.DEFAULT_PRIORITY
+ )
+ assert renderer_selected == renderer_name
+ else:
+ with pytest.raises(renderer_selected):
+ renderers.select(priority=renderers.DEFAULT_PRIORITY)
+
+
class TestNetRenderers(CiTestCase):
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.eni.available")
@@ -4575,46 +4760,6 @@ class TestNetRenderers(CiTestCase):
self.assertRaises(net.RendererNotFoundError, renderers.select,
priority=['sysconfig', 'eni'])
- @mock.patch("cloudinit.net.renderers.netplan.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
- @mock.patch("cloudinit.net.renderers.sysconfig.available_nm")
- @mock.patch("cloudinit.net.renderers.eni.available")
- @mock.patch("cloudinit.net.renderers.sysconfig.util.get_linux_distro")
- def test_sysconfig_selected_on_sysconfig_enabled_distros(self, m_distro,
- m_eni, m_sys_nm,
- m_sys_scfg,
- m_sys_avail,
- m_netplan):
- """sysconfig only selected on specific distros (rhel/sles)."""
-
- # Ubuntu with Network-Manager installed
- m_eni.return_value = False # no ifupdown (ifquery)
- m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown
- m_sys_nm.return_value = True # network-manager is installed
- m_netplan.return_value = True # netplan is installed
- m_sys_avail.return_value = False # no sysconfig on Ubuntu
- m_distro.return_value = ('ubuntu', None, None)
- self.assertEqual('netplan', renderers.select(priority=None)[0])
-
- # Centos with Network-Manager installed
- m_eni.return_value = False # no ifupdown (ifquery)
- m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown
- m_sys_nm.return_value = True # network-manager is installed
- m_netplan.return_value = False # netplan is not installed
- m_sys_avail.return_value = True # sysconfig is available on centos
- m_distro.return_value = ('centos', None, None)
- self.assertEqual('sysconfig', renderers.select(priority=None)[0])
-
- # OpenSuse with Network-Manager installed
- m_eni.return_value = False # no ifupdown (ifquery)
- m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown
- m_sys_nm.return_value = True # network-manager is installed
- m_netplan.return_value = False # netplan is not installed
- m_sys_avail.return_value = True # sysconfig is available on opensuse
- m_distro.return_value = ('opensuse', None, None)
- self.assertEqual('sysconfig', renderers.select(priority=None)[0])
-
@mock.patch("cloudinit.net.sysconfig.available_sysconfig")
@mock.patch("cloudinit.util.get_linux_distro")
def test_sysconfig_available_uses_variant_mapping(self, m_distro, m_avail):
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
new file mode 100644
index 00000000..8b1e6042
--- /dev/null
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -0,0 +1,57 @@
+"""Tests for tools/render-cloudcfg"""
+
+import os
+import sys
+
+import pytest
+
+from cloudinit import util
+
+# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
+DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
+ "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+
+
+class TestRenderCloudCfg:
+
+ cmd = [sys.executable, os.path.realpath('tools/render-cloudcfg')]
+ tmpl_path = os.path.realpath('config/cloud.cfg.tmpl')
+
+ @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ def test_variant_sets_distro_in_cloud_cfg(self, variant, tmpdir):
+ outfile = tmpdir.join('outcfg').strpath
+ util.subp(
+ self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+ if variant == 'unknown':
+ variant = 'ubuntu' # Unknown is defaulted to ubuntu
+ assert system_cfg['system_info']['distro'] == variant
+
+ @pytest.mark.parametrize('variant', (DISTRO_VARIANTS))
+ def test_variant_sets_default_user_in_cloud_cfg(self, variant, tmpdir):
+ outfile = tmpdir.join('outcfg').strpath
+ util.subp(
+ self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+
+ default_user_exceptions = {
+ 'amazon': 'ec2-user', 'debian': 'ubuntu', 'unknown': 'ubuntu'}
+ default_user = system_cfg['system_info']['default_user']['name']
+ assert default_user == default_user_exceptions.get(variant, variant)
+
+ @pytest.mark.parametrize('variant,renderers', (
+ ('freebsd', ['freebsd']), ('netbsd', ['netbsd']),
+ ('openbsd', ['openbsd']), ('ubuntu', ['netplan', 'eni', 'sysconfig']))
+ )
+ def test_variant_sets_network_renderer_priority_in_cloud_cfg(
+ self, variant, renderers, tmpdir
+ ):
+ outfile = tmpdir.join('outcfg').strpath
+ util.subp(
+ self.cmd + ['--variant', variant, self.tmpl_path, outfile])
+ with open(outfile) as stream:
+ system_cfg = util.load_yaml(stream.read())
+
+ assert renderers == system_cfg['system_info']['network']['renderers']
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index e15ba6cf..6814030e 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -2,12 +2,12 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
from cloudinit import reporting
from cloudinit.reporting import events
from cloudinit.reporting import handlers
-import mock
-
from cloudinit.tests.helpers import TestCase
diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py
index 3582cf0b..b3e083c6 100644
--- a/tests/unittests/test_reporting_hyperv.py
+++ b/tests/unittests/test_reporting_hyperv.py
@@ -8,7 +8,7 @@ import os
import struct
import time
import re
-import mock
+from unittest import mock
from cloudinit import util
from cloudinit.tests.helpers import CiTestCase
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index b227c20b..0be41924 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from mock import patch
from collections import namedtuple
+from unittest.mock import patch
from cloudinit import ssh_util
from cloudinit.tests import helpers as test_helpers
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index c36e6eb0..cba09830 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -4,8 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
from cloudinit.tests import helpers as test_helpers
import textwrap
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 0e71db82..4b439267 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,27 +1,20 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from __future__ import print_function
-
+import io
+import json
import logging
import os
import re
import shutil
import stat
-import tempfile
-
-import json
-import six
import sys
+import tempfile
import yaml
+from unittest import mock
from cloudinit import importer, util
from cloudinit.tests import helpers
-try:
- from unittest import mock
-except ImportError:
- import mock
-
BASH = util.which('bash')
BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name'
@@ -320,7 +313,7 @@ class TestLoadYaml(helpers.CiTestCase):
def test_python_unicode(self):
# complex type of python/unicode is explicitly allowed
- myobj = {'1': six.text_type("FOOBAR")}
+ myobj = {'1': "FOOBAR"}
safe_yaml = yaml.dump(myobj)
self.assertEqual(util.load_yaml(blob=safe_yaml,
default=self.mydefault),
@@ -663,8 +656,8 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
self.patchOS(self.root)
self.patchUtils(self.root)
self.patchOpen(self.root)
- self.stdout = six.StringIO()
- self.stderr = six.StringIO()
+ self.stdout = io.StringIO()
+ self.stderr = io.StringIO()
self.patchStdoutAndStderr(self.stdout, self.stderr)
def test_stderr_used_by_default(self):
@@ -742,7 +735,6 @@ class TestReadSeeded(helpers.TestCase):
class TestSubp(helpers.CiTestCase):
- with_logs = True
allowed_subp = [BASH, 'cat', helpers.CiTestCase.SUBP_SHELL_TRUE,
BOGUS_COMMAND, sys.executable]
@@ -879,8 +871,8 @@ class TestSubp(helpers.CiTestCase):
"""Raised exc should have stderr, stdout as string if no decode."""
with self.assertRaises(util.ProcessExecutionError) as cm:
util.subp([BOGUS_COMMAND], decode=True)
- self.assertTrue(isinstance(cm.exception.stdout, six.string_types))
- self.assertTrue(isinstance(cm.exception.stderr, six.string_types))
+ self.assertTrue(isinstance(cm.exception.stdout, str))
+ self.assertTrue(isinstance(cm.exception.stderr, str))
def test_bunch_of_slashes_in_path(self):
self.assertEqual("/target/my/path/",
@@ -1177,4 +1169,97 @@ class TestGetProcEnv(helpers.TestCase):
my_ppid = os.getppid()
self.assertEqual(my_ppid, util.get_proc_ppid(my_pid))
+
+@mock.patch('cloudinit.util.subp')
+def test_find_devs_with_openbsd(m_subp):
+ m_subp.return_value = (
+ 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
+ )
+ devlist = util.find_devs_with_openbsd()
+ assert devlist == ['/dev/cd0a', '/dev/sd1i']
+
+
+@mock.patch('cloudinit.util.subp')
+def test_find_devs_with_openbsd_with_criteria(m_subp):
+ m_subp.return_value = (
+ 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', ''
+ )
+ devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660")
+ assert devlist == ['/dev/cd0a']
+
+
+@mock.patch('glob.glob')
+def test_find_devs_with_freebsd(m_glob):
+ def fake_glob(pattern):
+ msdos = ["/dev/msdosfs/EFISYS"]
+ iso9660 = ["/dev/iso9660/config-2"]
+ if pattern == "/dev/msdosfs/*":
+ return msdos
+ elif pattern == "/dev/iso9660/*":
+ return iso9660
+ raise Exception
+ m_glob.side_effect = fake_glob
+
+ devlist = util.find_devs_with_freebsd()
+ assert set(devlist) == set([
+ '/dev/iso9660/config-2', '/dev/msdosfs/EFISYS'])
+ devlist = util.find_devs_with_freebsd(criteria="TYPE=iso9660")
+ assert devlist == ['/dev/iso9660/config-2']
+ devlist = util.find_devs_with_freebsd(criteria="TYPE=vfat")
+ assert devlist == ['/dev/msdosfs/EFISYS']
+
+
+@mock.patch("cloudinit.util.subp")
+def test_find_devs_with_netbsd(m_subp):
+ side_effect_values = [
+ ("ld0 dk0 dk1 cd0", ""),
+ (
+ (
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n"
+ ),
+ "",
+ ),
+ (
+ (
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n"
+ ),
+ "",
+ ),
+ (
+ (
+ "mscdlabel: CDIOREADTOCHEADER: "
+ "Inappropriate ioctl for device\n"
+ "track (ctl=4) at sector 0\n"
+ "disklabel not written\n"
+ ),
+ "",
+ ),
+ (
+ (
+ "track (ctl=4) at sector 0\n"
+ 'ISO filesystem, label "config-2", '
+ "creation time: 2020/03/31 17:29\n"
+ "adding as 'a'\n"
+ ),
+ "",
+ ),
+ ]
+ m_subp.side_effect = side_effect_values
+ devlist = util.find_devs_with_netbsd()
+ assert set(devlist) == set(
+ ["/dev/ld0", "/dev/dk0", "/dev/dk1", "/dev/cd0"]
+ )
+ m_subp.side_effect = side_effect_values
+ devlist = util.find_devs_with_netbsd(criteria="TYPE=iso9660")
+ assert devlist == ["/dev/cd0"]
+ m_subp.side_effect = side_effect_values
+ devlist = util.find_devs_with_netbsd(criteria="TYPE=vfat")
+ assert devlist == ["/dev/ld0", "/dev/dk0", "/dev/dk1"]
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py
index b175a998..394bee9f 100644
--- a/tests/unittests/test_vmware/test_guestcust_util.py
+++ b/tests/unittests/test_vmware/test_guestcust_util.py
@@ -6,8 +6,11 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import util
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
get_tools_config,
+ set_gc_status,
)
from cloudinit.tests.helpers import CiTestCase, mock
@@ -69,4 +72,27 @@ class TestGuestCustUtil(CiTestCase):
get_tools_config('section', 'key', 'defaultVal'),
'e-f')
+ def test_set_gc_status(self):
+ """
+ This test is designed to verify the behavior of set_gc_status
+ """
+ # config is None, return None
+ self.assertEqual(set_gc_status(None, 'Successful'), None)
+
+ # post gc status is NO, return None
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertEqual(set_gc_status(conf, 'Successful'), None)
+
+ # post gc status is YES, subp is called to execute command
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ with mock.patch.object(util, 'subp',
+ return_value=('ok', b'')) as mockobj:
+ self.assertEqual(
+ set_gc_status(conf, 'Successful'), ('ok', b''))
+ mockobj.assert_called_once_with(
+ ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'],
+ rcs=[0])
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
index 16343ed2..c823889c 100644
--- a/tests/unittests/test_vmware_config_file.py
+++ b/tests/unittests/test_vmware_config_file.py
@@ -348,6 +348,14 @@ class TestVmwareConfigFile(CiTestCase):
conf = Config(cf)
self.assertEqual("test-script", conf.custom_script_name)
+ def test_post_gc_status(self):
+ cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg")
+ conf = Config(cf)
+ self.assertFalse(conf.post_gc_status)
+ cf._insertKey("MISC|POST-GC-STATUS", "YES")
+ conf = Config(cf)
+ self.assertTrue(conf.post_gc_status)
+
class TestVmwareNetConfig(CiTestCase):
"""Test conversion of vmware config to cloud-init config."""
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
new file mode 100644
index 00000000..2c924f85
--- /dev/null
+++ b/tools/.github-cla-signers
@@ -0,0 +1,7 @@
+beezly
+bipinbachhao
+dhensby
+lucasmoura
+nishigori
+tomponline
+TheRealFalcon
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
index 6b20d360..32cc1fa6 100644
--- a/tools/.lp-to-git-user
+++ b/tools/.lp-to-git-user
@@ -6,6 +6,7 @@
"askon": "ask0n",
"bitfehler": "bitfehler",
"chad.smith": "blackboxsw",
+ "chcheng": "chengcheng-chcheng",
"d-info-e": "do3meli",
"daniel-thewatkins": "OddBloke",
"eric-lafontaine1": "elafontaine",
@@ -13,10 +14,13 @@
"goneri": "goneri",
"harald-jensas": "hjensas",
"i.galic": "igalic",
+ "kgarloff": "garloff",
+ "killermoehre": "killermoehre",
"larsks": "larsks",
"legovini": "paride",
"louis": "karibou",
"madhuri-rai07": "madhuri-rai07",
+ "momousta": "Moustafa-Moustafa",
"otubo": "otubo",
"pengpengs": "PengpengSun",
"powersj": "powersj",
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
index 876368a9..60beeee0 100755
--- a/tools/build-on-freebsd
+++ b/tools/build-on-freebsd
@@ -29,7 +29,6 @@ pkgs="
$py_prefix-oauthlib
$py_prefix-requests
$py_prefix-serial
- $py_prefix-six
$py_prefix-yaml
sudo
"
diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd
new file mode 100755
index 00000000..d2a7067d
--- /dev/null
+++ b/tools/build-on-netbsd
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+
+# Check dependencies:
+depschecked=/tmp/c-i.dependencieschecked
+pkgs="
+ bash
+ dmidecode
+ py37-configobj
+ py37-jinja2
+ py37-oauthlib
+ py37-requests
+ py37-setuptools
+ py37-yaml
+ sudo
+"
+[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
+
+touch $depschecked
+
+# Build the code and install in /usr/pkg/:
+python3.7 setup.py build
+python3.7 setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
+mv -v /usr/local/etc/rc.d/cloud* /etc/rc.d
+
+# Enable cloud-init in /etc/rc.conf:
+sed -i.bak -e "/^cloud.*=.*/d" /etc/rc.conf
+echo '
+# You can safely remove the following lines starting with "cloud"
+cloudinitlocal="YES"
+cloudinit="YES"
+cloudconfig="YES"
+cloudfinal="YES"' >> /etc/rc.conf
+
+echo "Installation completed."
diff --git a/tools/build-on-openbsd b/tools/build-on-openbsd
new file mode 100755
index 00000000..ca028606
--- /dev/null
+++ b/tools/build-on-openbsd
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+
+# Check dependencies:
+depschecked=/tmp/c-i.dependencieschecked
+pkgs="
+ bash
+ dmidecode
+ py3-configobj
+ py3-jinja2
+ py3-jsonschema
+ py3-oauthlib
+ py3-requests
+ py3-setuptools
+ py3-six
+ py3-yaml
+ sudo--
+"
+[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
+
+touch $depschecked
+
+python3 setup.py build
+python3 setup.py install -O1 --distro openbsd --skip-build
+
+echo "Installation completed."
diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug
index 1f08e0cb..59c573af 100755
--- a/tools/ccfg-merge-debug
+++ b/tools/ccfg-merge-debug
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
from cloudinit import handlers
from cloudinit.handlers import cloud_config as cc_part
diff --git a/tools/ds-identify b/tools/ds-identify
index c93d4a77..071cdc0c 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -1062,6 +1062,10 @@ dscheck_OpenStack() {
return ${DS_FOUND}
fi
+ if dmi_chassis_asset_tag_matches "SAP CCloud VM"; then
+ return ${DS_FOUND}
+ fi
+
# LP: #1669875 : allow identification of OpenStack by asset tag
if dmi_chassis_asset_tag_matches "$nova"; then
return ${DS_FOUND}
diff --git a/tools/make-mime.py b/tools/make-mime.py
index d321479b..e0022302 100755
--- a/tools/make-mime.py
+++ b/tools/make-mime.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
import argparse
import sys
diff --git a/tools/make-tarball b/tools/make-tarball
index 8d540139..462e7d04 100755
--- a/tools/make-tarball
+++ b/tools/make-tarball
@@ -15,24 +15,27 @@ Usage: ${0##*/} [revision]
options:
-h | --help print usage
-o | --output FILE write to file
+ --version VERSION Set the version used in the tarball. Default value is determined with 'git describe'.
--orig-tarball Write file cloud-init_<version>.orig.tar.gz
--long Use git describe --long for versioning
EOF
}
short_opts="ho:v"
-long_opts="help,output:,orig-tarball,long"
+long_opts="help,output:,version:,orig-tarball,long"
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; }
long_opt=""
orig_opt=""
+version=""
while [ $# -ne 0 ]; do
cur=$1; next=$2
case "$cur" in
-h|--help) Usage; exit 0;;
-o|--output) output=$next; shift;;
+ --version) version=$next; shift;;
--long) long_opt="--long";;
--orig-tarball) orig_opt=".orig";;
--) shift; break;;
@@ -41,7 +44,12 @@ while [ $# -ne 0 ]; do
done
rev=${1:-HEAD}
-version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev)
+if [ -z "$version" ]; then
+ version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev)
+elif [ ! -z "$long_opt" ]; then
+ echo "WARNING: --long has no effect when --version is passed" >&2
+ exit 1
+fi
archive_base="cloud-init-$version"
if [ -z "$output" ]; then
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index 724f7fc4..a58e0260 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
# Provides a somewhat random, somewhat compat, somewhat useful mock version of
# http://docs.amazonwebservices.com
diff --git a/tools/pipremove b/tools/pipremove
index f8f4ff11..e1213edd 100755
--- a/tools/pipremove
+++ b/tools/pipremove
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
import subprocess
import sys
diff --git a/tools/read-dependencies b/tools/read-dependencies
index b4656e69..666e24f5 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""List pip dependencies or system package dependencies for cloud-init."""
# You might be tempted to rewrite this as a shell script, but you
@@ -9,7 +9,7 @@ try:
from argparse import ArgumentParser
except ImportError:
raise RuntimeError(
- 'Could not import python-argparse. Please install python-argparse '
+ 'Could not import argparse. Please install python3-argparse '
'package to continue')
import json
@@ -73,8 +73,8 @@ DISTRO_INSTALL_PKG_CMD = {
# List of base system packages required to enable ci automation
CI_SYSTEM_BASE_PKGS = {
'common': ['make', 'sudo', 'tar'],
- 'redhat': ['python-tox'],
- 'centos': ['python-tox'],
+ 'redhat': ['python3-tox'],
+ 'centos': ['python3-tox'],
'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
'debian': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild']}
@@ -99,7 +99,7 @@ def get_parser():
parser.add_argument(
'-s', '--system-pkg-names', action='store_true', default=False,
dest='system_pkg_names',
- help='The name of the distro to generate package deps for.')
+ help='Generate distribution package names (python3-pkgname).')
parser.add_argument(
'-i', '--install', action='store_true', default=False,
dest='install',
@@ -109,12 +109,6 @@ def get_parser():
dest='test_distro',
help='Additionally install continuous integration system packages '
'required for build and test automation.')
- parser.add_argument(
- '-v', '--python-version', type=str, dest='python_version',
- default=None, choices=["2", "3"],
- help='Override the version of python we want to generate system '
- 'package dependencies for. Defaults to the version of python '
- 'this script is called with')
return parser
@@ -132,6 +126,9 @@ def get_package_deps_from_json(topdir, distro):
deps = json.loads(stream.read())
if distro is None:
return {}
+ if deps.get(distro): # If we have a specific distro defined, use it.
+ return deps[distro]
+ # Use generic distro dependency map via DISTRO_PKG_TYPE_MAP
return deps[DISTRO_PKG_TYPE_MAP[distro]]
@@ -155,27 +152,20 @@ def parse_pip_requirements(requirements_path):
return dep_names
-def translate_pip_to_system_pkg(pip_requires, renames, python_ver):
+def translate_pip_to_system_pkg(pip_requires, renames):
"""Translate pip package names to distro-specific package names.
@param pip_requires: List of versionless pip package names to translate.
@param renames: Dict containg special case renames from pip name to system
package name for the distro.
- @param python_ver: Optional python version string "2" or "3". When None,
- use the python version that is calling this script via sys.version_info.
"""
- if python_ver is None:
- python_ver = str(sys.version_info[0])
- if python_ver == "2":
- prefix = "python-"
- else:
- prefix = "python3-"
+ prefix = "python3-"
standard_pkg_name = "{0}{1}"
translated_names = []
for pip_name in pip_requires:
pip_name = pip_name.lower()
# Find a rename if present for the distro package and python version
- rename = renames.get(pip_name, {}).get(python_ver, None)
+ rename = renames.get(pip_name, "")
if rename:
translated_names.append(rename)
else:
@@ -222,7 +212,7 @@ def main(distro):
deps_from_json = get_package_deps_from_json(topd, args.distro)
renames = deps_from_json.get('renames', {})
translated_pip_names = translate_pip_to_system_pkg(
- pip_pkg_names, renames, args.python_version)
+ pip_pkg_names, renames)
all_deps = []
if args.distro:
all_deps.extend(
diff --git a/tools/read-version b/tools/read-version
index 6dca659e..02c90643 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import json
@@ -65,7 +65,13 @@ output_json = '--json' in sys.argv
src_version = ci_version.version_string()
version_long = None
-if is_gitdir(_tdir) and which("git"):
+# If we're performing CI for a new release branch (which our tooling creates
+# with an "upstream/" prefix), then we don't want to enforce strict version
+# matching because we know it will fail.
+is_release_branch_ci = (
+ os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/")
+)
+if is_gitdir(_tdir) and which("git") and not is_release_branch_ci:
flags = []
if use_tags:
flags = ['--tags']
@@ -113,6 +119,7 @@ data = {
'extra': extra,
'commit': commit,
'distance': distance,
+ 'is_release_branch_ci': is_release_branch_ci,
}
if output_json:
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index 3d5fa725..9322b2c3 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -4,8 +4,9 @@ import argparse
import os
import sys
-VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel",
- "suse", "ubuntu", "unknown"]
+VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
+ "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+
if "avoid-pep8-E402-import-not-top-of-file":
_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
diff --git a/tools/run-container b/tools/run-container
index 1d24e15b..7212550e 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -35,9 +35,6 @@ Usage: ${0##*/} [ options ] [images:]image-ref
tested. Inside container, changes are in
local-changes.diff.
-k | --keep keep container after tests
- --pyexe V python version to use. Default=auto.
- Should be name of an executable.
- ('python2' or 'python3')
-p | --package build a binary package (.deb or .rpm)
-s | --source-package build source package (debuild -S or srpm)
-u | --unittest run unit tests
@@ -262,32 +259,23 @@ prep() {
# we need some very basic things not present in the container.
# - git
# - tar (CentOS 6 lxc container does not have it)
- # - python-argparse (or python3)
+ # - python3
local needed="" pair="" pkg="" cmd="" needed=""
local pairs="tar:tar git:git"
- local pyexe="$1"
get_os_info
- local py2pkg="python2" py3pkg="python3"
+ local py3pkg="python3"
case "$OS_NAME" in
opensuse)
- py2pkg="python-base"
py3pkg="python3-base";;
esac
- case "$pyexe" in
- python2) pairs="$pairs python2:$py2pkg";;
- python3) pairs="$pairs python3:$py3pkg";;
- esac
+ pairs="$pairs python3:$py3pkg"
for pair in $pairs; do
pkg=${pair#*:}
cmd=${pair%%:*}
command -v "$cmd" >/dev/null 2>&1 || needed="${needed} $pkg"
done
- if [ "$OS_NAME" = "centos" -a "$pyexe" = "python2" ]; then
- python -c "import argparse" >/dev/null 2>&1 ||
- needed="${needed} python-argparse"
- fi
needed=${needed# }
if [ -z "$needed" ]; then
error "No prep packages needed"
@@ -299,16 +287,8 @@ prep() {
install_packages "$@"
}
-nose() {
- local pyexe="$1" cmd=""
- shift
- get_os_info
- if [ "$OS_NAME/$OS_VERSION" = "centos/6" ]; then
- cmd="nosetests"
- else
- cmd="$pyexe -m nose"
- fi
- ${cmd} "$@"
+pytest() {
+ python3 -m pytest "$@"
}
is_done_cloudinit() {
@@ -367,7 +347,7 @@ wait_for_boot() {
run_self_inside "$name" wait_inside "$name" "$wtime" "$VERBOSITY" ||
{ errorrc "wait inside $name failed."; return; }
- if [ ! -z "${http_proxy-}" ]; then
+ if [ -n "${http_proxy-}" ]; then
if [ "$OS_NAME" = "centos" ]; then
debug 1 "configuring proxy ${http_proxy}"
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
@@ -411,7 +391,7 @@ run_self_inside_as_cd() {
main() {
local short_opts="a:hknpsuv"
- local long_opts="artifacts:,dirty,help,keep,name:,pyexe:,package,source-package,unittest,verbose"
+ local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose"
local getopt_out=""
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
@@ -420,7 +400,7 @@ main() {
local cur="" next=""
local package=false srcpackage=false unittest="" name=""
- local dirty=false pyexe="auto" artifact_d="."
+ local dirty=false artifact_d="."
while [ $# -ne 0 ]; do
cur="${1:-}"; next="${2:-}";
@@ -430,7 +410,6 @@ main() {
-h|--help) Usage ; exit 0;;
-k|--keep) KEEP=true;;
-n|--name) name="$next"; shift;;
- --pyexe) pyexe=$next; shift;;
-p|--package) package=true;;
-s|--source-package) srcpackage=true;;
-u|--unittest) unittest=1;;
@@ -470,16 +449,8 @@ main() {
get_os_info_in "$name" ||
{ errorrc "failed to get os_info in $name"; return; }
- if [ "$pyexe" = "auto" ]; then
- case "$OS_NAME/$OS_VERSION" in
- centos/*|opensuse/*) pyexe=python2;;
- *) pyexe=python3;;
- esac
- debug 1 "set pyexe=$pyexe for $OS_NAME/$OS_VERSION"
- fi
-
# prep the container (install very basic dependencies)
- run_self_inside "$name" prep "$pyexe" ||
+ run_self_inside "$name" prep ||
{ errorrc "Failed to prep container $name"; return; }
# add the user
@@ -492,9 +463,8 @@ main() {
return
}
- inside_as_cd "$name" root "$cdir" \
- $pyexe ./tools/read-dependencies "--distro=${OS_NAME}" \
- --test-distro || {
+ local rdcmd=(python3 tools/read-dependencies "--distro=${OS_NAME}" --install --test-distro)
+ inside_as_cd "$name" root "$cdir" "${rdcmd[@]}" || {
errorrc "FAIL: failed to install dependencies with read-dependencies"
return
}
@@ -507,10 +477,10 @@ main() {
if [ -n "$unittest" ]; then
debug 1 "running unit tests."
- run_self_inside_as_cd "$name" "$user" "$cdir" nose "$pyexe" \
+ run_self_inside_as_cd "$name" "$user" "$cdir" pytest \
tests/unittests cloudinit/ || {
- errorrc "nosetests failed.";
- errors[${#errors[@]}]="nosetests"
+ errorrc "pytest failed.";
+ errors[${#errors[@]}]="pytest"
}
fi
@@ -537,7 +507,7 @@ main() {
}
debug 1 "building source package with $build_srcpkg."
# shellcheck disable=SC2086
- inside_as_cd "$name" "$user" "$cdir" $pyexe $build_srcpkg || {
+ inside_as_cd "$name" "$user" "$cdir" python3 $build_srcpkg || {
errorrc "failed: $build_srcpkg";
errors[${#errors[@]}]="source package"
}
@@ -550,7 +520,7 @@ main() {
}
debug 1 "building binary package with $build_pkg."
# shellcheck disable=SC2086
- inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || {
+ inside_as_cd "$name" "$user" "$cdir" python3 $build_pkg || {
errorrc "failed: $build_pkg";
errors[${#errors[@]}]="binary package"
}
@@ -586,7 +556,7 @@ main() {
}
case "${1:-}" in
- prep|os_info|wait_inside|nose) _n=$1; shift; "$_n" "$@";;
+ prep|os_info|wait_inside|pytest) _n=$1; shift; "$_n" "$@";;
*) main "$@";;
esac
diff --git a/tools/run-pyflakes b/tools/run-pyflakes
index b3759a94..179afebe 100755
--- a/tools/run-pyflakes
+++ b/tools/run-pyflakes
@@ -1,6 +1,5 @@
#!/bin/bash
-PYTHON_VERSION=${PYTHON_VERSION:-2}
CR="
"
pycheck_dirs=( "cloudinit/" "tests/" "tools/" )
@@ -12,7 +11,7 @@ else
files=( "$@" )
fi
-cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" )
+cmd=( "python3" -m "pyflakes" "${files[@]}" )
echo "Running: " "${cmd[@]}" 1>&2
exec "${cmd[@]}"
diff --git a/tools/run-pyflakes3 b/tools/run-pyflakes3
deleted file mode 100755
index e9f0863d..00000000
--- a/tools/run-pyflakes3
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@"
diff --git a/tools/tox-venv b/tools/tox-venv
index a5d21625..9dd02460 100755
--- a/tools/tox-venv
+++ b/tools/tox-venv
@@ -116,7 +116,7 @@ Usage: ${0##*/} [--no-create] tox-environment [command [args]]
be read from tox.ini. This allows you to do:
tox-venv py27 - tests/some/sub/dir
and have the 'command' read correctly and have that execute:
- python -m nose tests/some/sub/dir
+ python -m pytest tests/some/sub/dir
EOF
if [ -f "$tox_ini" ]; then
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index a57ea847..d8bbcfcb 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Try to read a YAML file and report any errors.
"""
diff --git a/tox.ini b/tox.ini
index 8612f034..95a8511f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,13 +1,13 @@
[tox]
-envlist = py3, xenial, pycodestyle, pyflakes, pylint
+envlist = py3, xenial-dev, pycodestyle, pyflakes, pylint
recreate = True
[testenv]
-commands = python -m nose {posargs:tests/unittests cloudinit}
+commands = {envpython} -m pytest {posargs:tests/unittests cloudinit}
setenv =
LC_ALL = en_US.utf-8
passenv=
- NOSE_VERBOSE
+ PYTEST_ADDOPTS
[testenv:pycodestyle]
basepython = python3
@@ -32,23 +32,16 @@ commands = {envpython} -m pylint {posargs:cloudinit tests tools}
[testenv:py3]
basepython = python3
deps =
- nose-timer
-r{toxinidir}/test-requirements.txt
-commands = {envpython} -m nose --with-timer --timer-top-n 10 \
- {posargs:--with-coverage --cover-erase --cover-branches \
- --cover-inclusive --cover-package=cloudinit \
+commands = {envpython} -m pytest \
+ --durations 10 \
+ {posargs:--cov=cloudinit --cov-branch \
tests/unittests cloudinit}
[testenv:py27]
basepython = python2.7
deps = -r{toxinidir}/test-requirements.txt
-[testenv:py26]
-deps = -r{toxinidir}/test-requirements.txt
-commands = nosetests {posargs:tests/unittests cloudinit}
-setenv =
- LC_ALL = C
-
[flake8]
#H102 Apache 2.0 license header not found
ignore=H404,H405,H105,H301,H104,H403,H101,H102,H106,H304
@@ -62,11 +55,15 @@ commands =
{envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
doc8 doc/rtd
-[testenv:xenial]
-commands =
- python ./tools/pipremove jsonschema
- python -m nose {posargs:tests/unittests cloudinit}
-basepython = python3
+[xenial-shared-deps]
+# The version of pytest in xenial doesn't work with Python 3.8, so we define
+# two xenial environments: [testenv:xenial] runs the tests with exactly the
+# version of pytest present in xenial, and is used in CI. [testenv:xenial-dev]
+# runs the tests with the lowest version of pytest that works with Python 3.8,
+# 3.0.7, but keeps the other dependencies at xenial's level.
+#
+# (This section is not a testenv, it is used to maintain a single definition of
+# the dependencies shared between the two xenial testenvs.)
deps =
# requirements
jinja2==2.8
@@ -75,46 +72,42 @@ deps =
pyserial==3.0.1
configobj==5.0.6
requests==2.9.1
- # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version
- # to work with python3.6 is 1.16 as found in Artful. To keep default
- # invocation of 'tox' happy, accept the difference in version here.
- jsonpatch==1.16
- six==1.10.0
# test-requirements
httpretty==0.9.6
mock==1.3.0
- nose==1.3.7
- unittest2==1.1.0
- contextlib2==0.5.1
-[testenv:centos6]
-basepython = python2.6
-commands = nosetests {posargs:tests/unittests cloudinit}
+[testenv:xenial]
+# When updating this commands definition, also update the definition in
+# [testenv:xenial-dev]. See the comment there for details.
+commands =
+ python ./tools/pipremove jsonschema
+ python -m pytest {posargs:tests/unittests cloudinit}
+basepython = python3
deps =
- # requirements
- argparse==1.2.1
- jinja2==2.2.1
- pyyaml==3.10
- oauthlib==0.6.0
- configobj==4.6.0
- requests==2.6.0
- jsonpatch==1.2
- six==1.9.0
- -r{toxinidir}/test-requirements.txt
-
-[testenv:opensusel150]
-basepython = python2.7
-commands = nosetests {posargs:tests/unittests cloudinit}
+ # Refer to the comment in [xenial-shared-deps] for details
+ {[xenial-shared-deps]deps}
+ jsonpatch==1.10
+ pytest==2.8.7
+
+[testenv:xenial-dev]
+# This should be:
+# commands = {[testenv:xenial]commands}
+# but the version of pytest in xenial has a bug
+# (https://github.com/tox-dev/tox/issues/208) which means that the {posargs}
+# substitution variable is misparsed and causes a traceback. Ensure that any
+# changes here are reflected in [testenv:xenial].
+commands =
+ python ./tools/pipremove jsonschema
+ python -m pytest {posargs:tests/unittests cloudinit}
+basepython = {[testenv:xenial]basepython}
deps =
- # requirements
- jinja2==2.10
- PyYAML==3.12
- oauthlib==2.0.6
- configobj==5.0.6
- requests==2.18.4
+ # Refer to the comment in [xenial-shared-deps] for details
+ {[xenial-shared-deps]deps}
+ # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version
+ # to work with python3.6 is 1.16 as found in Artful. To keep default
+ # invocation of 'tox' happy, accept the difference in version here.
jsonpatch==1.16
- six==1.11.0
- -r{toxinidir}/test-requirements.txt
+ pytest==3.0.7
[testenv:tip-pycodestyle]
commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/}
@@ -123,7 +116,7 @@ deps = pycodestyle
[testenv:pyflakes]
commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
deps =
- pyflakes==1.6.0
+ pyflakes==2.1.1
[testenv:tip-pyflakes]
commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
@@ -141,6 +134,6 @@ deps =
[testenv:citest]
basepython = python3
commands = {envpython} -m tests.cloud_tests {posargs}
-passenv = HOME
+passenv = HOME TRAVIS
deps =
-r{toxinidir}/integration-requirements.txt