summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlberto Contreras <alberto.contreras@canonical.com>2022-07-29 18:51:43 +0200
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2022-07-30 13:51:09 +0000
commit941cd498e204632c63917801c22f2411826cf7da (patch)
tree409db917dff8970f5022279e0ea6d9692d36bbdf
parentcff8a8b47acf7048ad08bd121e677fb86e73635b (diff)
downloadcloud-init-git-941cd498e204632c63917801c22f2411826cf7da.tar.gz
22.2-115-g6e498773-0ubuntu1~22.10.1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.github/workflows/check_format.yml4
-rw-r--r--.gitignore1
-rw-r--r--CONTRIBUTING.rst8
-rw-r--r--README.md2
-rw-r--r--cloudinit/analyze/__main__.py13
-rwxr-xr-xcloudinit/cmd/clean.py23
-rwxr-xr-xcloudinit/cmd/cloud_id.py3
-rwxr-xr-xcloudinit/cmd/devel/hotplug_hook.py4
-rwxr-xr-xcloudinit/cmd/main.py5
-rw-r--r--cloudinit/config/cc_apk_configure.py1
-rw-r--r--cloudinit/config/cc_apt_configure.py1
-rw-r--r--cloudinit/config/cc_apt_pipelining.py1
-rw-r--r--cloudinit/config/cc_bootcmd.py1
-rw-r--r--cloudinit/config/cc_byobu.py1
-rw-r--r--cloudinit/config/cc_ca_certs.py1
-rw-r--r--cloudinit/config/cc_chef.py1
-rw-r--r--cloudinit/config/cc_debug.py1
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py1
-rw-r--r--cloudinit/config/cc_disk_setup.py1
-rw-r--r--cloudinit/config/cc_fan.py1
-rw-r--r--cloudinit/config/cc_final_message.py1
-rw-r--r--cloudinit/config/cc_growpart.py1
-rw-r--r--cloudinit/config/cc_grub_dpkg.py6
-rw-r--r--cloudinit/config/cc_install_hotplug.py1
-rw-r--r--cloudinit/config/cc_keyboard.py1
-rw-r--r--cloudinit/config/cc_keys_to_console.py1
-rw-r--r--cloudinit/config/cc_landscape.py1
-rw-r--r--cloudinit/config/cc_locale.py1
-rw-r--r--cloudinit/config/cc_lxd.py81
-rw-r--r--cloudinit/config/cc_mcollective.py1
-rw-r--r--cloudinit/config/cc_migrator.py1
-rw-r--r--cloudinit/config/cc_mounts.py41
-rw-r--r--cloudinit/config/cc_ntp.py1
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py7
-rw-r--r--cloudinit/config/cc_phone_home.py1
-rw-r--r--cloudinit/config/cc_power_state_change.py1
-rw-r--r--cloudinit/config/cc_puppet.py1
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py1
-rw-r--r--cloudinit/config/cc_reset_rmc.py1
-rw-r--r--cloudinit/config/cc_resizefs.py1
-rw-r--r--cloudinit/config/cc_resolv_conf.py1
-rw-r--r--cloudinit/config/cc_rh_subscription.py1
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py1
-rw-r--r--cloudinit/config/cc_rsyslog.py1
-rw-r--r--cloudinit/config/cc_runcmd.py1
-rw-r--r--cloudinit/config/cc_salt_minion.py1
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py1
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py1
-rw-r--r--cloudinit/config/cc_scripts_per_once.py1
-rw-r--r--cloudinit/config/cc_scripts_user.py1
-rw-r--r--cloudinit/config/cc_scripts_vendor.py1
-rw-r--r--cloudinit/config/cc_seed_random.py1
-rw-r--r--cloudinit/config/cc_set_hostname.py3
-rw-r--r--cloudinit/config/cc_set_passwords.py127
-rw-r--r--cloudinit/config/cc_snap.py1
-rw-r--r--cloudinit/config/cc_spacewalk.py1
-rw-r--r--cloudinit/config/cc_ssh.py1
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py1
-rw-r--r--cloudinit/config/cc_ssh_import_id.py1
-rw-r--r--cloudinit/config/cc_timezone.py1
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py108
-rw-r--r--cloudinit/config/cc_ubuntu_autoinstall.py142
-rw-r--r--cloudinit/config/cc_ubuntu_drivers.py1
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py1
-rw-r--r--cloudinit/config/cc_update_hostname.py1
-rw-r--r--cloudinit/config/cc_users_groups.py1
-rw-r--r--cloudinit/config/cc_write_files.py11
-rw-r--r--cloudinit/config/cc_write_files_deferred.py6
-rw-r--r--cloudinit/config/cc_yum_add_repo.py1
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py1
-rw-r--r--cloudinit/config/modules.py47
-rw-r--r--cloudinit/config/schema.py579
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json320
-rw-r--r--cloudinit/distros/__init__.py38
-rw-r--r--cloudinit/distros/bsd.py5
-rw-r--r--cloudinit/features.py10
-rw-r--r--cloudinit/importer.py44
-rw-r--r--cloudinit/net/__init__.py7
-rw-r--r--cloudinit/net/activators.py46
-rw-r--r--cloudinit/net/network_state.py16
-rw-r--r--cloudinit/settings.py2
-rw-r--r--cloudinit/sources/DataSourceAzure.py318
-rw-r--r--cloudinit/sources/helpers/azure.py302
-rw-r--r--config/clean.d/README18
-rw-r--r--config/cloud.cfg.tmpl8
-rw-r--r--debian/changelog53
-rw-r--r--debian/cloud-init.lintian-overrides10
-rw-r--r--debian/cloud-init.postinst4
-rw-r--r--debian/control2
-rwxr-xr-xdebian/gbp_format_changelog1
-rw-r--r--debian/source/lintian-overrides2
-rw-r--r--doc-requirements.txt3
-rw-r--r--doc/examples/cloud-config-apt.txt2
-rw-r--r--doc/examples/cloud-config-install-packages.txt2
-rw-r--r--doc/examples/cloud-config-lxd.txt2
-rw-r--r--doc/examples/cloud-config.txt13
-rw-r--r--doc/rtd/topics/availability.rst2
-rw-r--r--doc/rtd/topics/bugs.rst8
-rw-r--r--doc/rtd/topics/cli.rst5
-rw-r--r--doc/rtd/topics/datasources/vmware.rst4
-rw-r--r--doc/rtd/topics/module_creation.rst5
-rw-r--r--doc/rtd/topics/network-config.rst27
-rw-r--r--setup.py1
-rw-r--r--tests/hypothesis.py20
-rw-r--r--tests/hypothesis_jsonschema.py12
-rw-r--r--tests/integration_tests/cmd/test_schema.py66
-rw-r--r--tests/integration_tests/cmd/test_status.py3
-rw-r--r--tests/integration_tests/datasources/test_lxd_discovery.py1
-rw-r--r--tests/integration_tests/modules/test_ca_certs.py42
-rw-r--r--tests/integration_tests/modules/test_combined.py29
-rw-r--r--tests/integration_tests/modules/test_lxd.py101
-rw-r--r--tests/integration_tests/modules/test_lxd_bridge.py46
-rw-r--r--tests/integration_tests/modules/test_set_password.py27
-rw-r--r--tests/integration_tests/modules/test_ubuntu_autoinstall.py26
-rw-r--r--tests/integration_tests/util.py29
-rw-r--r--tests/unittests/analyze/test_boot.py26
-rw-r--r--tests/unittests/analyze/test_dump.py4
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py10
-rw-r--r--tests/unittests/cmd/test_clean.py301
-rw-r--r--tests/unittests/cmd/test_query.py2
-rw-r--r--tests/unittests/config/test_apt_source_v1.py33
-rw-r--r--tests/unittests/config/test_apt_source_v3.py19
-rw-r--r--tests/unittests/config/test_cc_ca_certs.py9
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py55
-rw-r--r--tests/unittests/config/test_cc_landscape.py9
-rw-r--r--tests/unittests/config/test_cc_lxd.py126
-rw-r--r--tests/unittests/config/test_cc_mcollective.py5
-rw-r--r--tests/unittests/config/test_cc_mounts.py39
-rw-r--r--tests/unittests/config/test_cc_package_update_upgrade_install.py24
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py16
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py613
-rw-r--r--tests/unittests/config/test_cc_ubuntu_advantage.py652
-rw-r--r--tests/unittests/config/test_cc_ubuntu_autoinstall.py141
-rw-r--r--tests/unittests/config/test_cc_users_groups.py105
-rw-r--r--tests/unittests/config/test_cc_write_files.py12
-rw-r--r--tests/unittests/config/test_cc_yum_add_repo.py18
-rw-r--r--tests/unittests/config/test_modules.py174
-rw-r--r--tests/unittests/config/test_schema.py621
-rw-r--r--tests/unittests/distros/test_create_users.py36
-rw-r--r--tests/unittests/distros/test_netconfig.py59
-rw-r--r--tests/unittests/helpers.py8
-rw-r--r--tests/unittests/net/test_dns.py32
-rw-r--r--tests/unittests/net/test_network_state.py20
-rw-r--r--tests/unittests/net/test_networkd.py14
-rw-r--r--tests/unittests/runs/test_simple_run.py5
-rw-r--r--tests/unittests/sources/test_azure.py454
-rw-r--r--tests/unittests/sources/test_azure_helper.py566
-rw-r--r--tests/unittests/sources/test_ec2.py2
-rw-r--r--tests/unittests/test_features.py5
-rw-r--r--tests/unittests/test_net_activators.py31
-rw-r--r--tests/unittests/test_util.py66
-rw-r--r--tests/unittests/util.py8
-rw-r--r--tools/.github-cla-signers11
-rw-r--r--tox.ini65
154 files changed, 5539 insertions, 1811 deletions
diff --git a/.github/workflows/check_format.yml b/.github/workflows/check_format.yml
index 7b52d278..874534c0 100644
--- a/.github/workflows/check_format.yml
+++ b/.github/workflows/check_format.yml
@@ -18,7 +18,7 @@ jobs:
lint-with:
- {tip-versions: false, os: ubuntu-18.04}
- {tip-versions: true, os: ubuntu-latest}
- name: ${{ matrix.lint-with.tip-versions && 'Check format (tip)' || 'Check format (pinned)' }}
+ name: Check ${{ matrix.lint-with.tip-versions && 'tip-' || '' }}${{ matrix.env }}
runs-on: ${{ matrix.lint-with.os }}
steps:
- name: "Checkout #1"
@@ -38,7 +38,7 @@ jobs:
run: python3 --version
- name: Test
- if: matrix.lint-with.tip-versions
+ if: ${{ !matrix.lint-with.tip-versions }}
env:
# matrix env: not to be confused w/environment variables or testenv
TOXENV: ${{ matrix.env }}
diff --git a/.gitignore b/.gitignore
index 6eae45c9..9923afae 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,6 +21,7 @@ stage
.vscode/
htmlcov/
tags
+.hypothesis/
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 50ca7cfb..62628fd5 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -72,10 +72,10 @@ Follow these steps to submit your first pull request to cloud-init:
.. code:: sh
- git clone git://github.com/canonical/cloud-init
+ git clone git@github.com:GH_USER/cloud-init.git
cd cloud-init
- git remote add GH_USER git@github.com:GH_USER/cloud-init.git
- git push GH_USER main
+ git remote add upstream git@github.com:canonical/cloud-init.git
+ git push origin main
* Read through the cloud-init `Code Review Process`_, so you understand
how your changes will end up in cloud-init's codebase.
@@ -144,7 +144,7 @@ Do these things for each feature or bug
* Push your changes to your personal GitHub repository::
- git push -u GH_USER my-topic-branch
+ git push -u origin my-topic-branch
* Use your browser to create a pull request:
diff --git a/README.md b/README.md
index 0a4d36c6..64a1635d 100644
--- a/README.md
+++ b/README.md
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />Arch Linux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />openEuler<br />OpenMandriva<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br />VMware<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 36a5be78..df08d46c 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -6,6 +6,7 @@ import argparse
import re
import sys
from datetime import datetime
+from typing import IO
from cloudinit.util import json_dumps
@@ -192,6 +193,7 @@ def analyze_boot(name, args):
}
outfh.write(status_map[status_code].format(**kwargs))
+ clean_io(infh, outfh)
return status_code
@@ -218,6 +220,7 @@ def analyze_blame(name, args):
outfh.write("\n".join(srecs) + "\n")
outfh.write("\n")
outfh.write("%d boot records analyzed\n" % (idx + 1))
+ clean_io(infh, outfh)
def analyze_show(name, args):
@@ -254,12 +257,14 @@ def analyze_show(name, args):
)
outfh.write("\n".join(record) + "\n")
outfh.write("%d boot records analyzed\n" % (idx + 1))
+ clean_io(infh, outfh)
def analyze_dump(name, args):
"""Dump cloud-init events in json format"""
(infh, outfh) = configure_io(args)
outfh.write(json_dumps(_get_events(infh)) + "\n")
+ clean_io(infh, outfh)
def _get_events(infile):
@@ -293,6 +298,14 @@ def configure_io(args):
return (infh, outfh)
+def clean_io(*file_handles: IO) -> None:
+ """close filehandles"""
+ for file_handle in file_handles:
+ if file_handle in (sys.stdin, sys.stdout):
+ continue
+ file_handle.close()
+
+
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 1a017608..65d3eece 100755
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -11,8 +11,9 @@ import glob
import os
import sys
+from cloudinit import settings
from cloudinit.stages import Init
-from cloudinit.subp import ProcessExecutionError, subp
+from cloudinit.subp import ProcessExecutionError, runparts, subp
from cloudinit.util import (
del_dir,
del_file,
@@ -21,6 +22,8 @@ from cloudinit.util import (
is_link,
)
+ETC_MACHINE_ID = "/etc/machine-id"
+
def get_parser(parser=None):
"""Build or extend an arg parser for clean utility.
@@ -48,6 +51,15 @@ def get_parser(parser=None):
help="Remove cloud-init logs.",
)
parser.add_argument(
+ "--machine-id",
+ action="store_true",
+ default=False,
+ help=(
+ "Remove /etc/machine-id for golden image creation."
+ " Next boot generates a new machine-id."
+ ),
+ )
+ parser.add_argument(
"-r",
"--reboot",
action="store_true",
@@ -94,12 +106,21 @@ def remove_artifacts(remove_logs, remove_seed=False):
except OSError as e:
error("Could not remove {0}: {1}".format(path, str(e)))
return 1
+ try:
+ runparts(settings.CLEAN_RUNPARTS_DIR)
+ except Exception as e:
+ error(
+ f"Failure during run-parts of {settings.CLEAN_RUNPARTS_DIR}: {e}"
+ )
+ return 1
return 0
def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
+ if args.machine_id:
+ del_file(ETC_MACHINE_ID)
if exit_code == 0 and args.reboot:
cmd = ["shutdown", "-r", "now"]
try:
diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py
index 34160f8c..567d341a 100755
--- a/cloudinit/cmd/cloud_id.py
+++ b/cloudinit/cmd/cloud_id.py
@@ -76,7 +76,8 @@ def handle_args(name, args):
return 3
try:
- instance_data = json.load(open(args.instance_data))
+ with open(args.instance_data) as file:
+ instance_data = json.load(file)
except IOError:
return error(
"File not found '%s'. Provide a path to instance data json file"
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index bc8f3ef3..f95e8cc0 100755
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -10,7 +10,7 @@ import time
from cloudinit import log, reporting, stages
from cloudinit.event import EventScope, EventType
-from cloudinit.net import activators, read_sys_net_safe
+from cloudinit.net import read_sys_net_safe
from cloudinit.net.network_state import parse_net_config_data
from cloudinit.reporting import events
from cloudinit.sources import DataSource, DataSourceNotFoundException
@@ -132,7 +132,7 @@ class NetHandler(UeventHandler):
bring_up=False,
)
interface_name = os.path.basename(self.devpath)
- activator = activators.select_activator()
+ activator = self.datasource.distro.network_activator()
if self.action == "add":
if not activator.bring_up_interface(interface_name):
raise RuntimeError(
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 4f157870..a6fb7088 100755
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -456,7 +456,10 @@ def main_init(name, args):
# Validate user-data adheres to schema definition
if os.path.exists(init.paths.get_ipath_cur("userdata_raw")):
validate_cloudconfig_schema(
- config=init.cfg, strict=False, log_details=False
+ config=init.cfg,
+ strict=False,
+ log_details=False,
+ log_deprecations=True,
)
else:
LOG.debug("Skipping user-data validation. No user-data found.")
diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py
index 0952c971..0fd7d229 100644
--- a/cloudinit/config/cc_apk_configure.py
+++ b/cloudinit/config/cc_apk_configure.py
@@ -100,6 +100,7 @@ meta: MetaSchema = {
),
],
"frequency": frequency,
+ "activate_by_schema_keys": ["apk_repos"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 5403499e..9d39c918 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -118,6 +118,7 @@ meta: MetaSchema = {
)
],
"frequency": frequency,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 901633d3..82a8e6e0 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -50,6 +50,7 @@ meta: MetaSchema = {
"apt_pipelining: os",
"apt_pipelining: 3",
],
+ "activate_by_schema_keys": ["apt_pipelining"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index bd14aede..4ee79859 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -54,6 +54,7 @@ meta: MetaSchema = {
)
],
"frequency": PER_ALWAYS,
+ "activate_by_schema_keys": ["bootcmd"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index fbc20410..e48fce34 100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -43,6 +43,7 @@ meta: MetaSchema = {
"byobu_by_default: enable-user",
"byobu_by_default: disable-system",
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 6084cb4c..6c9c7ab4 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -66,6 +66,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["ca_certs", "ca-certs"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 11060f3b..5ab2b401 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -136,6 +136,7 @@ meta: MetaSchema = {
)
],
"frequency": frequency,
+ "activate_by_schema_keys": ["chef"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index a00f2823..bb5f5062 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -40,6 +40,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 88cc28e2..a7832e25 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -31,6 +31,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_ALWAYS,
"examples": ["disable_ec2_metadata: true"],
+ "activate_by_schema_keys": ["disable_ec2_metadata"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index ee05ea87..182e9401 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -81,6 +81,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["disk_setup", "fs_setup"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 57c762a1..094baa09 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -49,6 +49,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["fan"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index 89be520e..c44f021f 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -45,6 +45,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index f23a6bb8..e3ba0e9a 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -81,6 +81,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index c23e40f5..f2fa6985 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -49,6 +49,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
@@ -133,11 +134,6 @@ def handle(name, cfg, _cloud, log, _args):
if idevs_empty is None:
idevs_empty = not idevs
elif not isinstance(idevs_empty, bool):
- log.warning(
- "DEPRECATED: grub_dpkg: grub-pc/install_devices_empty value of "
- f"'{idevs_empty}' is not boolean. Use of non-boolean values "
- "will be removed in a future version of cloud-init."
- )
idevs_empty = util.translate_bool(idevs_empty)
idevs_empty = str(idevs_empty).lower()
diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py
index a3668232..e29b58b9 100644
--- a/cloudinit/config/cc_install_hotplug.py
+++ b/cloudinit/config/cc_install_hotplug.py
@@ -49,6 +49,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_keyboard.py b/cloudinit/config/cc_keyboard.py
index fbb166f6..e44b8648 100644
--- a/cloudinit/config/cc_keyboard.py
+++ b/cloudinit/config/cc_keyboard.py
@@ -48,6 +48,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["keyboard"],
}
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index dd8b92fe..115df520 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -61,6 +61,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index ede09bd9..2607b866 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -91,6 +91,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["landscape"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 6a31933e..dd7fda38 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -42,6 +42,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 847a7c3c..490533c0 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -7,10 +7,13 @@
"""LXD: configure lxd with ``lxd init`` and optionally lxd-bridge"""
import os
+from logging import Logger
from textwrap import dedent
+from typing import List, Tuple
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
@@ -22,9 +25,9 @@ _DEFAULT_NETWORK_NAME = "lxdbr0"
MODULE_DESCRIPTION = """\
This module configures lxd with user specified options using ``lxd init``.
If lxd is not present on the system but lxd configuration is provided, then
-lxd will be installed. If the selected storage backend is zfs, then zfs will
-be installed if missing. If network bridge configuration is provided, then
-lxd-bridge will be configured accordingly.
+lxd will be installed. If the selected storage backend userspace utility is
+not installed, it will be installed. If network bridge configuration is
+provided, then lxd-bridge will be configured accordingly.
"""
distros = ["ubuntu"]
@@ -55,6 +58,7 @@ meta: MetaSchema = {
storage_create_loop: 10
bridge:
mode: new
+ mtu: 1500
name: lxdbr0
ipv4_address: 10.0.8.1
ipv4_netmask: 24
@@ -70,12 +74,13 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["lxd"],
}
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, cloud, log, args):
+def handle(name, cfg, cloud: Cloud, log: Logger, args):
# Get config
lxd_cfg = cfg.get("lxd")
if not lxd_cfg:
@@ -105,15 +110,7 @@ def handle(name, cfg, cloud, log, args):
type(bridge_cfg),
)
bridge_cfg = {}
-
- # Install the needed packages
- packages = []
- if not subp.which("lxd"):
- packages.append("lxd")
-
- if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"):
- packages.append("zfsutils-linux")
-
+ packages = get_required_packages(init_cfg)
if len(packages):
try:
cloud.distro.install_packages(packages)
@@ -123,7 +120,10 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd if init config is given
if init_cfg:
- init_keys = (
+
+ # type is known, number of elements is not
+ # in the case of the ubuntu+lvm backend workaround
+ init_keys: Tuple[str, ...] = (
"network_address",
"network_port",
"storage_backend",
@@ -132,7 +132,36 @@ def handle(name, cfg, cloud, log, args):
"storage_pool",
"trust_password",
)
+
subp.subp(["lxd", "waitready", "--timeout=300"])
+
+ # Bug https://bugs.launchpad.net/ubuntu/+source/linux-kvm/+bug/1982780
+ kernel = util.system_info()["uname"][2]
+ if init_cfg["storage_backend"] == "lvm" and not os.path.exists(
+ f"/lib/modules/{kernel}/kernel/drivers/md/dm-thin-pool.ko"
+ ):
+ log.warning(
+ "cloud-init doesn't use thinpool by default on Ubuntu due to "
+ "LP #1982780. This behavior will change in the future.",
+ )
+ subp.subp(
+ [
+ "lxc",
+ "storage",
+ "create",
+ "default",
+ "lvm",
+ "lvm.use_thinpool=false",
+ ]
+ )
+
+ # Since we're manually setting use_thinpool=false
+ # filter it from the lxd init commands, don't configure
+ # storage twice
+ init_keys = tuple(
+ key for key in init_keys if key != "storage_backend"
+ )
+
cmd = ["lxd", "init", "--auto"]
for k in init_keys:
if init_cfg.get(k):
@@ -298,6 +327,12 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("domain"):
cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
+ # if the default schema value is passed (-1) don't pass arguments
+ # to LXD. Use LXD defaults unless user manually sets a number
+ mtu = bridge_cfg.get("mtu", -1)
+ if mtu != -1:
+ cmd_create.append(f"bridge.mtu={mtu}")
+
return cmd_create, cmd_attach
@@ -350,4 +385,20 @@ def maybe_cleanup_default(
LOG.debug(msg, nic_name, profile, fail_assume_enoent)
-# vi: ts=4 expandtab
+def get_required_packages(cfg: dict) -> List[str]:
+ """identify required packages for install"""
+ packages = []
+ if not subp.which("lxd"):
+ packages.append("lxd")
+
+ # binary for pool creation must be available for the requested backend:
+ # zfs, lvcreate, mkfs.btrfs
+ storage: str = cfg.get("storage_backend", "")
+ if storage:
+ if storage == "zfs" and not subp.which("zfs"):
+ packages.append("zfsutils-linux")
+ if storage == "lvm" and not subp.which("lvcreate"):
+ packages.append("lvm2")
+ if storage == "btrfs" and not subp.which("mkfs.btrfs"):
+ packages.append("btrfs-progs")
+ return packages
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index 33f7556d..f4fd456e 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -82,6 +82,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["mcollective"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index 6aed54b3..f1cd788a 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -32,6 +32,7 @@ meta: MetaSchema = {
"distros": distros,
"examples": ["# Do not migrate cloud-init semaphores\nmigrate: false\n"],
"frequency": frequency,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 1d05c9b9..843ea5eb 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -9,6 +9,7 @@
"""Mounts: Configure mount points and swap files"""
import logging
+import math
import os
import re
from string import whitespace
@@ -97,6 +98,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
@@ -110,6 +112,8 @@ NETWORK_NAME_RE = re.compile(NETWORK_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
MNT_COMMENT = "comment=cloudconfig"
+MB = 2**20
+GB = 2**30
LOG = logging.getLogger(__name__)
@@ -210,13 +214,12 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
if memsize is None:
memsize = util.read_meminfo()["total"]
- GB = 2**30
- sugg_max = 8 * GB
+ sugg_max = memsize * 2
info = {"avail": "na", "max_in": maxsize, "mem": memsize}
if fsys is None and maxsize is None:
- # set max to 8GB default if no filesystem given
+ # set max to default if no filesystem given
maxsize = sugg_max
elif fsys:
statvfs = os.statvfs(fsys)
@@ -234,35 +237,17 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
info["max"] = maxsize
- formulas = [
- # < 1G: swap = double memory
- (1 * GB, lambda x: x * 2),
- # < 2G: swap = 2G
- (2 * GB, lambda x: 2 * GB),
- # < 4G: swap = memory
- (4 * GB, lambda x: x),
- # < 16G: 4G
- (16 * GB, lambda x: 4 * GB),
- # < 64G: 1/2 M up to max
- (64 * GB, lambda x: x / 2),
- ]
-
- size = None
- for top, func in formulas:
- if memsize <= top:
- size = min(func(memsize), maxsize)
- # if less than 1/2 memory and not much, return 0
- if size < (memsize / 2) and size < 4 * GB:
- size = 0
- break
- break
+ if memsize < 4 * GB:
+ minsize = memsize
+ elif memsize < 16 * GB:
+ minsize = 4 * GB
+ else:
+ minsize = round(math.sqrt(memsize / GB)) * GB
- if size is not None:
- size = maxsize
+ size = min(minsize, maxsize)
info["size"] = size
- MB = 2**20
pinfo = {}
for k, v in info.items():
if isinstance(v, int):
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index ef1c02ca..20c0ad8e 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -218,6 +218,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["ntp"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 5198305e..a8a3e9ff 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -47,6 +47,13 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": [
+ "apt_update",
+ "package_update",
+ "apt_upgrade",
+ "package_upgrade",
+ "packages",
+ ],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 0534a83a..dee30e96 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -88,6 +88,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": ["phone_home"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 7fc4e5ca..39459bfe 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -72,6 +72,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": ["power_state"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 2e964dcf..14467e36 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -98,6 +98,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": ["puppet"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 3ed5612b..180a8873 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -42,6 +42,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_ALWAYS,
"examples": [],
+ "activate_by_schema_keys": [],
}
# This module is undocumented in our schema docs
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index 57f024ef..9766c3a4 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -43,6 +43,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
"examples": [],
+ "activate_by_schema_keys": [],
}
# This module is undocumented in our schema docs
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 39da1b5a..3372208f 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -42,6 +42,7 @@ meta: MetaSchema = {
"resize_rootfs: noblock # runs resize operation in the background",
],
"frequency": PER_ALWAYS,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index bbf68079..545b22c3 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -75,6 +75,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["manage_resolv_conf"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index b742cb95..9dfe6a38 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -71,6 +71,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": ["rh_subscription"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index c1b0f8bd..5ebf359f 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -44,6 +44,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
"examples": [],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5b55028c..5484691b 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -60,6 +60,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": ["rsyslog"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index b883e107..60e53298 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -67,6 +67,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["runcmd"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index df9d4205..ebab4e30 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -58,6 +58,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["salt_minion"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index aa311d59..408c3bfd 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -30,6 +30,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": frequency,
"examples": [],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index 1fb40717..c1360ae6 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -31,6 +31,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
"examples": [],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index d9f406b7..baf2214e 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -30,6 +30,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": frequency,
"examples": [],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 85375dac..ffe610fd 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -31,6 +31,7 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
"examples": [],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 894404f8..8dc99e1e 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -52,6 +52,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index b0ffdd15..f829eaf4 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -69,6 +69,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 4cf6e448..2a4c565f 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -41,7 +41,7 @@ if the hostname is set by metadata or user data on the local system.
This will occur on datasources like nocloud and ovf where metadata and user
data are available locally. This ensures that the desired hostname is applied
-before any DHCP requests are preformed on these platforms where dynamic DNS is
+before any DHCP requests are performed on these platforms where dynamic DNS is
based on initial hostname.
"""
@@ -62,6 +62,7 @@ meta: MetaSchema = {
"""
),
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 3c8b378b..6acbb69e 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -8,11 +8,15 @@
"""Set Passwords: Set user passwords and enable/disable SSH password auth"""
import re
+from logging import Logger
from string import ascii_letters, digits
from textwrap import dedent
+from typing import List
+from cloudinit import features
from cloudinit import log as logging
from cloudinit import subp, util
+from cloudinit.cloud import Cloud
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.distros import ALL_DISTROS, Distro, ug_util
from cloudinit.settings import PER_INSTANCE
@@ -26,13 +30,19 @@ The ``ssh_pwauth`` config key determines whether or not sshd will be configured
to accept password authentication.
The ``chpasswd`` config key accepts a dictionary containing either or both of
-``list`` and ``expire``. The ``list`` key is used to assign a password to a
-to a corresponding pre-existing user. The ``expire`` key is used to set
-whether to expire all user passwords such that a password will need to be reset
-on the user's next login.
+``users`` and ``expire``. The ``users`` key is used to assign a password to a
+corresponding pre-existing user. The ``expire`` key is used to set
+whether to expire all user passwords specified by this module,
+such that a password will need to be reset on the user's next login.
+
+.. note::
+ Prior to cloud-init 22.3, the ``expire`` key only applies to plain text
+ (including ``RANDOM``) passwords. Post 22.3, the ``expire`` key applies to
+ both plain text and hashed passwords.
``password`` config key is used to set the default user's password. It is
-ignored if the ``chpasswd`` ``list`` is used.
+ignored if the ``chpasswd`` ``users`` is used. Note: the ``list`` keyword is
+deprecated in favor of ``users``.
"""
meta: MetaSchema = {
@@ -56,19 +66,24 @@ meta: MetaSchema = {
# Disable ssh password authentication
# Don't require users to change their passwords on next login
# Set the password for user1 to be 'password1' (OS does hashing)
- # Set the password for user2 to be a randomly generated password,
+ # Set the password for user2 to a pre-hashed password
+ # Set the password for user3 to be a randomly generated password,
# which will be written to the system console
- # Set the password for user3 to a pre-hashed password
ssh_pwauth: false
chpasswd:
expire: false
- list:
- - user1:password1
- - user2:RANDOM
- - user3:$6$rounds=4096$5DJ8a9WMTEzIo5J4$Yms6imfeBvf3Yfu84mQBerh18l7OR1Wm1BJXZqFSpJ6BVas0AYJqIjP7czkOaAZHZi1kxQ5Y1IhgWN8K9NgxR1
+ users:
+ - name: user1
+ password: password1
+ type: text
+ - name: user2
+ password: $6$rounds=4096$5DJ8a9WMTEzIo5J4$Yms6imfeBvf3Yfu84mQBerh18l7OR1Wm1BJXZqFSpJ6BVas0AYJqIjP7czkOaAZHZi1kxQ5Y1IhgWN8K9NgxR1
+ - name: user3
+ type: RANDOM
""" # noqa
),
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
@@ -79,6 +94,19 @@ LOG = logging.getLogger(__name__)
PW_SET = "".join([x for x in ascii_letters + digits if x not in "loLOI01"])
+def get_users_by_type(users_list: list, pw_type: str) -> list:
+ """either password or type: RANDOM is required, user is always required"""
+ return (
+ []
+ if not users_list
+ else [
+ (item["name"], item.get("password", "RANDOM"))
+ for item in users_list
+ if item.get("type", "hash") == pw_type
+ ]
+ )
+
+
def handle_ssh_pwauth(pw_auth, distro: Distro):
"""Apply sshd PasswordAuthentication changes.
@@ -162,7 +190,8 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
LOG.debug("Not restarting SSH service: service is stopped.")
-def handle(_name, cfg, cloud, log, args):
+def handle(_name, cfg: dict, cloud: Cloud, log: Logger, args: list):
+ distro: Distro = cloud.distro
if args:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
@@ -172,11 +201,16 @@ def handle(_name, cfg, cloud, log, args):
password = util.get_cfg_option_str(cfg, "password", None)
expire = True
- plist = None
+ plist: List = []
+ users_list: List = []
if "chpasswd" in cfg:
chfg = cfg["chpasswd"]
+ users_list = util.get_cfg_option_list(chfg, "users", default=[])
if "list" in chfg and chfg["list"]:
+ log.warning(
+ "DEPRECATION: key 'lists' is now deprecated. Use 'users'."
+ )
if isinstance(chfg["list"], list):
log.debug("Handling input for chpasswd as list.")
plist = util.get_cfg_option_list(chfg, "list", plist)
@@ -187,14 +221,14 @@ def handle(_name, cfg, cloud, log, args):
"cloud-init. Use the list format instead."
)
log.debug("Handling input for chpasswd as multiline string.")
- plist = util.get_cfg_option_str(chfg, "list", plist)
- if plist:
- plist = plist.splitlines()
+ multiline = util.get_cfg_option_str(chfg, "list")
+ if multiline:
+ plist = multiline.splitlines()
expire = util.get_cfg_option_bool(chfg, "expire", expire)
- if not plist and password:
- (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ if not (users_list or plist) and password:
+ (users, _groups) = ug_util.normalize_users_groups(cfg, distro)
(user, _user_config) = ug_util.extract_default(users)
if user:
plist = ["%s:%s" % (user, password)]
@@ -202,19 +236,32 @@ def handle(_name, cfg, cloud, log, args):
log.warning("No default or defined user to change password for.")
errors = []
- if plist:
- plist_in = []
- hashed_plist_in = []
- hashed_users = []
+ if plist or users_list:
+ # This section is for parsing the data that arrives in the form of
+ # chpasswd:
+ # users:
+ plist_in = get_users_by_type(users_list, "text")
+ users = [user for user, _ in plist_in]
+ hashed_plist_in = get_users_by_type(users_list, "hash")
+ hashed_users = [user for user, _ in hashed_plist_in]
randlist = []
- users = []
- # N.B. This regex is included in the documentation (i.e. the module
+ for user, _ in get_users_by_type(users_list, "RANDOM"):
+ password = rand_user_password()
+ users.append(user)
+ plist_in.append((user, password))
+ randlist.append(f"{user}:{password}")
+
+ # This for loop is for parsing the data that arrives in the deprecated
+ # form of
+ # chpasswd:
+ # list:
+ # N.B. This regex is included in the documentation (i.e. the schema
# docstring), so any changes to it should be reflected there.
prog = re.compile(r"\$(1|2a|2y|5|6)(\$.+){2}")
for line in plist:
u, p = line.split(":", 1)
if prog.match(p) is not None and ":" not in p:
- hashed_plist_in.append(line)
+ hashed_plist_in.append((u, p))
hashed_users.append(u)
else:
# in this else branch, we potentially change the password
@@ -222,24 +269,22 @@ def handle(_name, cfg, cloud, log, args):
if p == "R" or p == "RANDOM":
p = rand_user_password()
randlist.append("%s:%s" % (u, p))
- plist_in.append("%s:%s" % (u, p))
+ plist_in.append((u, p))
users.append(u)
- ch_in = "\n".join(plist_in) + "\n"
if users:
try:
log.debug("Changing password for %s:", users)
- chpasswd(cloud.distro, ch_in)
+ distro.chpasswd(plist_in, hashed=False)
except Exception as e:
errors.append(e)
util.logexc(
log, "Failed to set passwords with chpasswd for %s", users
)
- hashed_ch_in = "\n".join(hashed_plist_in) + "\n"
if hashed_users:
try:
log.debug("Setting hashed password for %s:", hashed_users)
- chpasswd(cloud.distro, hashed_ch_in, hashed=True)
+ distro.chpasswd(hashed_plist_in, hashed=True)
except Exception as e:
errors.append(e)
util.logexc(
@@ -258,10 +303,13 @@ def handle(_name, cfg, cloud, log, args):
)
if expire:
+ users_to_expire = users
+ if features.EXPIRE_APPLIES_TO_HASHED_USERS:
+ users_to_expire += hashed_users
expired_users = []
- for u in users:
+ for u in users_to_expire:
try:
- cloud.distro.expire_passwd(u)
+ distro.expire_passwd(u)
expired_users.append(u)
except Exception as e:
errors.append(e)
@@ -269,7 +317,7 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- handle_ssh_pwauth(cfg.get("ssh_pwauth"), cloud.distro)
+ handle_ssh_pwauth(cfg.get("ssh_pwauth"), distro)
if len(errors):
log.debug("%s errors occurred, re-raising the last one", len(errors))
@@ -278,16 +326,3 @@ def handle(_name, cfg, cloud, log, args):
def rand_user_password(pwlen=20):
return util.rand_str(pwlen, select_from=PW_SET)
-
-
-def chpasswd(distro, plist_in, hashed=False):
- if util.is_BSD():
- for pentry in plist_in.splitlines():
- u, p = pentry.split(":")
- distro.set_passwd(u, p, hashed=hashed)
- else:
- cmd = ["chpasswd"] + (["-e"] if hashed else [])
- subp.subp(cmd, plist_in)
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 2e595934..7bf22a52 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -105,6 +105,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["snap"],
}
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index a61ea209..991aa5ed 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -34,6 +34,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": ["spacewalk"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 9f71f273..ad4fcf80 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -165,6 +165,7 @@ meta: MetaSchema = {
""" # noqa: E501
)
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index db5c1454..40fb4ce5 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -31,6 +31,7 @@ meta: MetaSchema = {
"no_ssh_fingerprints: true",
"authkey_hash: sha512",
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 86cf7254..358c571f 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -43,6 +43,7 @@ meta: MetaSchema = {
"""
)
],
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index 47da2d06..b9df31af 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -26,6 +26,7 @@ meta: MetaSchema = {
"examples": [
"timezone: US/Eastern",
],
+ "activate_by_schema_keys": ["timezone"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 900db695..c05d6297 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -2,7 +2,9 @@
"""ubuntu_advantage: Configure Ubuntu Advantage support services"""
+import re
from textwrap import dedent
+from urllib.parse import urlparse
from cloudinit import log as logging
from cloudinit import subp, util
@@ -69,8 +71,26 @@ meta: MetaSchema = {
- fips
"""
),
+ dedent(
+ """\
+ # Set a http(s) proxy before attaching the machine to an
+ # Ubuntu Advantage support contract and enabling the FIPS service.
+ ubuntu_advantage:
+ token: <ua_contract_token>
+ config:
+ http_proxy: 'http://some-proxy:8088'
+ https_proxy: 'https://some-proxy:8088'
+ global_apt_https_proxy: 'http://some-global-apt-proxy:8088/'
+ global_apt_http_proxy: 'https://some-global-apt-proxy:8088/'
+ ua_apt_http_proxy: 'http://10.0.10.10:3128'
+ ua_apt_https_proxy: 'https://10.0.10.10:3128'
+ enable:
+ - fips
+ """
+ ),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["ubuntu_advantage", "ubuntu-advantage"],
}
__doc__ = get_meta_doc(meta)
@@ -78,7 +98,46 @@ __doc__ = get_meta_doc(meta)
LOG = logging.getLogger(__name__)
-def configure_ua(token=None, enable=None):
+def supplemental_schema_validation(ua_config):
+ """Validate user-provided ua:config option values.
+
+ This function supplements flexible jsonschema validation with specific
+ value checks to aid in triage of invalid user-provided configuration.
+
+ @param ua_config: Dictionary of config value under 'ubuntu_advantage'.
+
+ @raises: ValueError describing invalid values provided.
+ """
+ errors = []
+ nl = "\n"
+ for key, value in sorted(ua_config.items()):
+ if key in (
+ "http_proxy",
+ "https_proxy",
+ "global_apt_http_proxy",
+ "global_apt_https_proxy",
+ "ua_apt_http_proxy",
+ "ua_apt_https_proxy",
+ ):
+ try:
+ parsed_url = urlparse(value)
+ if parsed_url.scheme not in ("http", "https"):
+ errors.append(
+ f"Expected URL scheme http/https for ua:config:{key}."
+ f" Found: {value}"
+ )
+ except (AttributeError, ValueError):
+ errors.append(
+ f"Expected a URL for ua:config:{key}. Found: {value}"
+ )
+
+ if errors:
+ raise ValueError(
+ f"Invalid ubuntu_advantage configuration:{nl}{nl.join(errors)}"
+ )
+
+
+def configure_ua(token=None, enable=None, config=None):
"""Call ua commandline client to attach or enable services."""
error = None
if not token:
@@ -102,6 +161,44 @@ def configure_ua(token=None, enable=None):
)
enable = []
+ if config is None:
+ config = dict()
+ elif not isinstance(config, dict):
+ LOG.warning(
+ "ubuntu_advantage: config should be a dict, not"
+ " a %s; skipping enabling config parameters",
+ type(config).__name__,
+ )
+ config = dict()
+
+ enable_errors = []
+
+ # UA Config
+ for key, value in sorted(config.items()):
+ if value is None:
+ LOG.debug("Unsetting UA config for %s", key)
+ config_cmd = ["ua", "config", "unset", key]
+ else:
+ LOG.debug("Setting UA config %s=%s", key, value)
+ if re.search(r"\s", value):
+ key_value = f"{key}={re.escape(value)}"
+ else:
+ key_value = f"{key}={value}"
+ config_cmd = ["ua", "config", "set", key_value]
+
+ try:
+ subp.subp(config_cmd)
+ except subp.ProcessExecutionError as e:
+ enable_errors.append((key, e))
+
+ if enable_errors:
+ for param, error in enable_errors:
+ LOG.warning('Failure enabling "%s":\n%s', param, error)
+ raise RuntimeError(
+ "Failure enabling Ubuntu Advantage config(s): {}".format(
+ ", ".join('"{}"'.format(param) for param, _ in enable_errors)
+ )
+ )
attach_cmd = ["ua", "attach", token]
LOG.debug("Attaching to Ubuntu Advantage. %s", " ".join(attach_cmd))
try:
@@ -176,9 +273,16 @@ def handle(name, cfg, cloud, log, args):
LOG.error(msg)
raise RuntimeError(msg)
+ config = ua_section.get("config")
+
+ if config is not None:
+ supplemental_schema_validation(config)
+
maybe_install_ua_tools(cloud)
configure_ua(
- token=ua_section.get("token"), enable=ua_section.get("enable")
+ token=ua_section.get("token"),
+ enable=ua_section.get("enable"),
+ config=config,
)
diff --git a/cloudinit/config/cc_ubuntu_autoinstall.py b/cloudinit/config/cc_ubuntu_autoinstall.py
new file mode 100644
index 00000000..a6180fe6
--- /dev/null
+++ b/cloudinit/config/cc_ubuntu_autoinstall.py
@@ -0,0 +1,142 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Autoinstall: Support ubuntu live-server autoinstall syntax."""
+
+import re
+from textwrap import dedent
+
+from cloudinit import log as logging
+from cloudinit.config.schema import (
+ MetaSchema,
+ SchemaProblem,
+ SchemaValidationError,
+ get_meta_doc,
+)
+from cloudinit.settings import PER_ONCE
+from cloudinit.subp import subp
+
+LOG = logging.getLogger(__name__)
+
+distros = ["ubuntu"]
+
+meta: MetaSchema = {
+ "id": "cc_ubuntu_autoinstall",
+ "name": "Autoinstall",
+ "title": "Support Ubuntu live-server install syntax",
+ "description": dedent(
+ """\
+ Ubuntu's autoinstall syntax supports single-system automated installs
+ in either the live-server or live-desktop installers.
+ When "autoinstall" directives are provided in either
+ #cloud-config user-data or ``/etc/cloud/cloud.cfg.d`` validate
+ minimal autoinstall schema adherance and emit a warning if the
+ live-installer is not present.
+
+ The live-installer will use autoinstall directives to seed answers to
+ configuration prompts during system install to allow for a
+ "touchless" Ubuntu system install.
+
+ For more details on Ubuntu's autoinstaller:
+ https://ubuntu.com/server/docs/install/autoinstall
+ """
+ ),
+ "distros": distros,
+ "examples": [
+ dedent(
+ """\
+ # Tell the live-server installer to provide dhcp6 network config
+ # and LVM on a disk matching the serial number prefix CT
+ autoinstall:
+ version: 1
+ network:
+ version: 2
+ ethernets:
+ enp0s31f6:
+ dhcp6: yes
+ storage:
+ layout:
+ name: lvm
+ match:
+ serial: CT*
+ """
+ )
+ ],
+ "frequency": PER_ONCE,
+ "activate_by_schema_keys": ["autoinstall"],
+}
+
+__doc__ = get_meta_doc(meta)
+
+
+LIVE_INSTALLER_SNAPS = ("subiquity", "ubuntu-desktop-installer")
+
+
+def handle(name, cfg, cloud, log, _args):
+
+ if "autoinstall" not in cfg:
+ LOG.debug(
+ "Skipping module named %s, no 'autoinstall' key in configuration",
+ name,
+ )
+ return
+
+ snap_list, _ = subp(["snap", "list"])
+ installer_present = None
+ for snap_name in LIVE_INSTALLER_SNAPS:
+ if re.search(snap_name, snap_list):
+ installer_present = snap_name
+ if not installer_present:
+ LOG.warning(
+ "Skipping autoinstall module. Expected one of the Ubuntu"
+ " installer snap packages to be present: %s",
+ ", ".join(LIVE_INSTALLER_SNAPS),
+ )
+ return
+ validate_config_schema(cfg)
+ LOG.debug(
+ "Valid autoinstall schema. Config will be processed by %s",
+ installer_present,
+ )
+
+
+def validate_config_schema(cfg):
+ """Supplemental runtime schema validation for autoinstall yaml.
+
+ Schema validation issues currently result in a warning log currently which
+ can be easily ignored because warnings do not bubble up to cloud-init
+ status output.
+
+ In the case of the live-installer, we want cloud-init to raise an error
+ to set overall cloud-init status to 'error' so it is more discoverable
+ in installer environments.
+
+ # TODO(Drop this validation When cloud-init schema is strict and errors)
+
+ :raise: SchemaValidationError if any known schema values are present.
+ """
+ autoinstall_cfg = cfg["autoinstall"]
+ if not isinstance(autoinstall_cfg, dict):
+ raise SchemaValidationError(
+ [
+ SchemaProblem(
+ "autoinstall",
+ "Expected dict type but found:"
+ f" {type(autoinstall_cfg).__name__}",
+ )
+ ]
+ )
+
+ if "version" not in autoinstall_cfg:
+ raise SchemaValidationError(
+ [SchemaProblem("autoinstall", "Missing required 'version' key")]
+ )
+ elif not isinstance(autoinstall_cfg.get("version"), int):
+ raise SchemaValidationError(
+ [
+ SchemaProblem(
+ "autoinstall.version",
+ f"Expected int type but found:"
+ f" {type(autoinstall_cfg['version']).__name__}",
+ )
+ ]
+ )
diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py
index a962bce3..09e7badd 100644
--- a/cloudinit/config/cc_ubuntu_drivers.py
+++ b/cloudinit/config/cc_ubuntu_drivers.py
@@ -42,6 +42,7 @@ meta: MetaSchema = {
)
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["drivers"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index e0d15167..606b7860 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -88,6 +88,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_ALWAYS,
+ "activate_by_schema_keys": ["manage_etc_hosts"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index aa769405..01d2078f 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -73,6 +73,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_ALWAYS,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index 96e63242..a84a6183 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -146,6 +146,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": [],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index 7cc7f854..a020fac4 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -12,10 +12,10 @@ from textwrap import dedent
from cloudinit import log as logging
from cloudinit import util
+from cloudinit.cloud import Cloud
from cloudinit.config.schema import MetaSchema, get_meta_doc
from cloudinit.settings import PER_INSTANCE
-DEFAULT_OWNER = "root:root"
DEFAULT_PERMS = 0o644
DEFAULT_DEFER = False
TEXT_PLAIN_ENC = "text/plain"
@@ -108,12 +108,13 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["write_files"],
}
__doc__ = get_meta_doc(meta)
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud: Cloud, log, _args):
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -127,7 +128,7 @@ def handle(name, cfg, _cloud, log, _args):
name,
)
return
- write_files(name, filtered_files)
+ write_files(name, filtered_files, cloud.distro.default_owner)
def canonicalize_extraction(encoding_type):
@@ -155,7 +156,7 @@ def canonicalize_extraction(encoding_type):
return [TEXT_PLAIN_ENC]
-def write_files(name, files):
+def write_files(name, files, owner: str):
if not files:
return
@@ -171,7 +172,7 @@ def write_files(name, files):
path = os.path.abspath(path)
extractions = canonicalize_extraction(f_info.get("encoding"))
contents = extract_contents(f_info.get("content", ""), extractions)
- (u, g) = util.extract_usergroup(f_info.get("owner", DEFAULT_OWNER))
+ (u, g) = util.extract_usergroup(f_info.get("owner", owner))
perms = decode_perms(f_info.get("permissions"), DEFAULT_PERMS)
omode = "ab" if util.get_cfg_option_bool(f_info, "append") else "wb"
util.write_file(path, contents, omode=omode, mode=perms)
diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py
index dbbe90f6..7cf5d593 100644
--- a/cloudinit/config/cc_write_files_deferred.py
+++ b/cloudinit/config/cc_write_files_deferred.py
@@ -5,6 +5,7 @@
"""Write Files Deferred: Defer writing certain files"""
from cloudinit import util
+from cloudinit.cloud import Cloud
from cloudinit.config.cc_write_files import DEFAULT_DEFER, write_files
from cloudinit.config.schema import MetaSchema
from cloudinit.distros import ALL_DISTROS
@@ -27,13 +28,14 @@ meta: MetaSchema = {
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
"examples": [],
+ "activate_by_schema_keys": ["write_files"],
}
# This module is undocumented in our schema docs
__doc__ = ""
-def handle(name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud: Cloud, log, _args):
file_list = cfg.get("write_files", [])
filtered_files = [
f
@@ -47,4 +49,4 @@ def handle(name, cfg, _cloud, log, _args):
name,
)
return
- write_files(name, filtered_files)
+ write_files(name, filtered_files, cloud.distro.default_owner)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 405207ad..0e683de2 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -113,6 +113,7 @@ meta: MetaSchema = {
),
],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["yum_repos"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
index 9b682bc6..02867b8f 100644
--- a/cloudinit/config/cc_zypper_add_repo.py
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -61,6 +61,7 @@ meta: MetaSchema = {
)
],
"frequency": PER_ALWAYS,
+ "activate_by_schema_keys": ["zypper"],
}
__doc__ = get_meta_doc(meta)
diff --git a/cloudinit/config/modules.py b/cloudinit/config/modules.py
index efb7a5a4..970343cd 100644
--- a/cloudinit/config/modules.py
+++ b/cloudinit/config/modules.py
@@ -7,8 +7,8 @@
# This file is part of cloud-init. See LICENSE file for license information.
import copy
-from collections import namedtuple
-from typing import List
+from types import ModuleType
+from typing import Dict, List, NamedTuple
from cloudinit import config, importer
from cloudinit import log as logging
@@ -26,9 +26,13 @@ LOG = logging.getLogger(__name__)
# we will not find something else with the same
# name in the lookup path...
MOD_PREFIX = "cc_"
-ModuleDetails = namedtuple(
- "ModuleDetails", ["module", "name", "frequency", "run_args"]
-)
+
+
+class ModuleDetails(NamedTuple):
+ module: ModuleType
+ name: str
+ frequency: str
+ run_args: List[str]
def form_module_name(name):
@@ -65,6 +69,17 @@ def validate_module(mod, name):
)
+def _is_active(module_details: ModuleDetails, cfg: dict) -> bool:
+ activate_by_schema_keys_keys = frozenset(
+ module_details.module.meta.get("activate_by_schema_keys", {})
+ )
+ if not activate_by_schema_keys_keys:
+ return True
+ if not activate_by_schema_keys_keys.intersection(cfg.keys()):
+ return False
+ return True
+
+
class Modules(object):
def __init__(self, init: Init, cfg_files=None, reporter=None):
self.init = init
@@ -93,21 +108,21 @@ class Modules(object):
# Only give out a copy so that others can't modify this...
return copy.deepcopy(self._cached_cfg)
- def _read_modules(self, name):
+ def _read_modules(self, name) -> List[Dict]:
"""Read the modules from the config file given the specified name.
Returns a list of module definitions. E.g.,
[
{
"mod": "bootcmd",
- "freq": "always"
+ "freq": "always",
"args": "some_arg",
}
]
Note that in the default case, only "mod" will be set.
"""
- module_list = []
+ module_list: List[dict] = []
if name not in self.cfg:
return module_list
cfg_mods = self.cfg.get(name)
@@ -155,7 +170,7 @@ class Modules(object):
def _fixup_modules(self, raw_mods) -> List[ModuleDetails]:
"""Convert list of returned from _read_modules() into new format.
- Invalid modules and arguments are ingnored.
+ Invalid modules and arguments are ignored.
Also ensures that the module has the required meta fields.
"""
mostly_mods = []
@@ -269,12 +284,16 @@ class Modules(object):
skipped = []
forced = []
overridden = self.cfg.get("unverified_modules", [])
+ inapplicable_mods = []
active_mods = []
- for (mod, name, _freq, _args) in mostly_mods:
+ for module_details in mostly_mods:
+ (mod, name, _freq, _args) = module_details
if mod is None:
continue
worked_distros = mod.meta["distros"]
-
+ if not _is_active(module_details, self.cfg):
+ inapplicable_mods.append(name)
+ continue
# Skip only when the following conditions are all met:
# - distros are defined in the module != ALL_DISTROS
# - the current d_name isn't in distros
@@ -288,6 +307,12 @@ class Modules(object):
forced.append(name)
active_mods.append([mod, name, _freq, _args])
+ if inapplicable_mods:
+ LOG.info(
+ "Skipping modules '%s' because no applicable config "
+ "is provided.",
+ ",".join(inapplicable_mods),
+ )
if skipped:
LOG.info(
"Skipping modules '%s' because they are not verified "
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 1e29ae5a..1d95b858 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -8,11 +8,11 @@ import os
import re
import sys
import textwrap
-import typing
from collections import defaultdict
from copy import deepcopy
from functools import partial
-from typing import Optional, Tuple, cast
+from itertools import chain
+from typing import TYPE_CHECKING, List, NamedTuple, Optional, Type, Union, cast
import yaml
@@ -20,6 +20,14 @@ from cloudinit import importer, safeyaml
from cloudinit.cmd.devel import read_cfg_paths
from cloudinit.util import error, get_modules_from_dir, load_file
+try:
+ from jsonschema import ValidationError as _ValidationError
+
+ ValidationError = _ValidationError
+except ImportError:
+ ValidationError = Exception # type: ignore
+
+
error = partial(error, sys_exit=True)
LOG = logging.getLogger(__name__)
@@ -42,7 +50,7 @@ SCHEMA_DOC_TMPL = """
**Supported distros:** {distros}
-{property_header}
+{activate_by_schema_keys}{property_header}
{property_doc}
{examples}
@@ -54,14 +62,17 @@ SCHEMA_LIST_ITEM_TMPL = (
)
SCHEMA_EXAMPLES_HEADER = "**Examples**::\n\n"
SCHEMA_EXAMPLES_SPACER_TEMPLATE = "\n # --- Example{0} ---"
+DEPRECATED_KEY = "deprecated"
+
+# type-annotate only if type-checking.
+# Consider to add `type_extensions` as a dependency when Bionic is EOL.
+if TYPE_CHECKING:
+ import typing
-# annotations add value for development, but don't break old versions
-# pyver: 3.6 -> 3.8
-# pylint: disable=E1101
-if sys.version_info >= (3, 8):
+ from typing_extensions import NotRequired, TypedDict
- class MetaSchema(typing.TypedDict):
+ class MetaSchema(TypedDict):
name: str
id: str
title: str
@@ -69,30 +80,72 @@ if sys.version_info >= (3, 8):
distros: typing.List[str]
examples: typing.List[str]
frequency: str
+ activate_by_schema_keys: NotRequired[List[str]]
else:
MetaSchema = dict
-# pylint: enable=E1101
+
+
+class SchemaDeprecationError(ValidationError):
+ pass
+
+
+class SchemaProblem(NamedTuple):
+ path: str
+ message: str
+
+ def format(self) -> str:
+ return f"{self.path}: {self.message}"
+
+
+SchemaProblems = List[SchemaProblem]
+
+
+def _format_schema_problems(
+ schema_problems: SchemaProblems,
+ *,
+ prefix: Optional[str] = None,
+ separator: str = ", ",
+) -> str:
+ formatted = separator.join(map(lambda p: p.format(), schema_problems))
+ if prefix:
+ formatted = f"{prefix}{formatted}"
+ return formatted
class SchemaValidationError(ValueError):
"""Raised when validating a cloud-config file against a schema."""
- def __init__(self, schema_errors=()):
+ def __init__(
+ self,
+ schema_errors: Optional[SchemaProblems] = None,
+ schema_deprecations: Optional[SchemaProblems] = None,
+ ):
"""Init the exception an n-tuple of schema errors.
@param schema_errors: An n-tuple of the format:
((flat.config.key, msg),)
+ @param schema_deprecations: An n-tuple of the format:
+ ((flat.config.key, msg),)
"""
+ message = ""
+ if schema_errors:
+ message += _format_schema_problems(
+ schema_errors, prefix="Cloud config schema errors: "
+ )
+ if schema_deprecations:
+ if message:
+ message += "\n\n"
+ message += _format_schema_problems(
+ schema_deprecations,
+ prefix="Cloud config schema deprecations: ",
+ )
+ super().__init__(message)
self.schema_errors = schema_errors
- error_messages = [
- "{0}: {1}".format(config_key, message)
- for config_key, message in schema_errors
- ]
- message = "Cloud config schema errors: {0}".format(
- ", ".join(error_messages)
- )
- super(SchemaValidationError, self).__init__(message)
+ self.schema_deprecations = schema_deprecations
+
+ def has_errors(self) -> bool:
+ return bool(self.schema_errors)
def is_schema_byte_string(checker, instance):
@@ -109,6 +162,114 @@ def is_schema_byte_string(checker, instance):
) or isinstance(instance, (bytes,))
+def _add_deprecation_msg(description: Optional[str] = None):
+ msg = "DEPRECATED."
+ if description:
+ msg += f" {description}"
+ return msg
+
+
+def _validator_deprecated(
+ _validator,
+ deprecated: bool,
+ _instance,
+ schema: dict,
+ error_type: Type[Exception] = SchemaDeprecationError,
+):
+ """Jsonschema validator for `deprecated` items.
+
+ It raises a instance of `error_type` if deprecated that must be handled,
+ otherwise the instance is consider faulty.
+ """
+ if deprecated:
+ description = schema.get("description")
+ msg = _add_deprecation_msg(description)
+ yield error_type(msg)
+
+
+def _anyOf(
+ validator,
+ anyOf,
+ instance,
+ _schema,
+ error_type: Type[Exception] = SchemaDeprecationError,
+):
+ """Jsonschema validator for `anyOf`.
+
+ It treats occurrences of `error_type` as non-errors, but yield them for
+ external processing. Useful to process schema annotations, as `deprecated`.
+ """
+ from jsonschema import ValidationError
+
+ all_errors = []
+ all_deprecations = []
+ for index, subschema in enumerate(anyOf):
+ all_errs = list(
+ validator.descend(instance, subschema, schema_path=index)
+ )
+ errs = list(filter(lambda e: not isinstance(e, error_type), all_errs))
+ deprecations = list(
+ filter(lambda e: isinstance(e, error_type), all_errs)
+ )
+ if not errs:
+ all_deprecations.extend(deprecations)
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ "%r is not valid under any of the given schemas" % (instance,),
+ context=all_errors,
+ )
+ yield from all_deprecations
+
+
+def _oneOf(
+ validator,
+ oneOf,
+ instance,
+ _schema,
+ error_type: Type[Exception] = SchemaDeprecationError,
+):
+ """Jsonschema validator for `oneOf`.
+
+ It treats occurrences of `error_type` as non-errors, but yield them for
+ external processing. Useful to process schema annotations, as `deprecated`.
+ """
+ from jsonschema import ValidationError
+
+ subschemas = enumerate(oneOf)
+ all_errors = []
+ all_deprecations = []
+ for index, subschema in subschemas:
+ all_errs = list(
+ validator.descend(instance, subschema, schema_path=index)
+ )
+ errs = list(filter(lambda e: not isinstance(e, error_type), all_errs))
+ deprecations = list(
+ filter(lambda e: isinstance(e, error_type), all_errs)
+ )
+ if not errs:
+ first_valid = subschema
+ all_deprecations.extend(deprecations)
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ "%r is not valid under any of the given schemas" % (instance,),
+ context=all_errors,
+ )
+
+ more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
+ if more_valid:
+ more_valid.append(first_valid)
+ reprs = ", ".join(repr(schema) for schema in more_valid)
+ yield ValidationError(
+ "%r is valid under each of %s" % (instance, reprs)
+ )
+ else:
+ yield from all_deprecations
+
+
def get_jsonschema_validator():
"""Get metaschema validator and format checker
@@ -135,28 +296,51 @@ def get_jsonschema_validator():
# http://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties
strict_metaschema["properties"]["label"] = {"type": "string"}
+ validator_kwargs = {}
if hasattr(Draft4Validator, "TYPE_CHECKER"): # jsonschema 3.0+
type_checker = Draft4Validator.TYPE_CHECKER.redefine(
"string", is_schema_byte_string
)
- cloudinitValidator = create(
- meta_schema=strict_metaschema,
- validators=Draft4Validator.VALIDATORS,
- version="draft4",
- type_checker=type_checker,
- )
+ validator_kwargs = {
+ "type_checker": type_checker,
+ }
else: # jsonschema 2.6 workaround
types = Draft4Validator.DEFAULT_TYPES # pylint: disable=E1101
# Allow bytes as well as string (and disable a spurious unsupported
# assignment-operation pylint warning which appears because this
# code path isn't written against the latest jsonschema).
types["string"] = (str, bytes) # pylint: disable=E1137
- cloudinitValidator = create( # pylint: disable=E1123
- meta_schema=strict_metaschema,
- validators=Draft4Validator.VALIDATORS,
- version="draft4",
- default_types=types,
+ validator_kwargs = {"default_types": types}
+
+ # Add deprecation handling
+ validators = dict(Draft4Validator.VALIDATORS)
+ validators[DEPRECATED_KEY] = _validator_deprecated
+ validators["oneOf"] = _oneOf
+ validators["anyOf"] = _anyOf
+
+ cloudinitValidator = create(
+ meta_schema=strict_metaschema,
+ validators=validators,
+ version="draft4",
+ **validator_kwargs,
+ )
+
+ # Add deprecation handling
+ def is_valid(self, instance, _schema=None, **__):
+ """Override version of `is_valid`.
+
+ It does ignore instances of `SchemaDeprecationError`.
+ """
+ errors = filter(
+ lambda e: not isinstance( # pylint: disable=W1116
+ e, SchemaDeprecationError
+ ),
+ self.iter_errors(instance, _schema),
)
+ return next(errors, None) is None
+
+ cloudinitValidator.is_valid = is_valid
+
return (cloudinitValidator, FormatChecker)
@@ -182,9 +366,11 @@ def validate_cloudconfig_metaschema(validator, schema: dict, throw=True):
# sites
if throw:
raise SchemaValidationError(
- schema_errors=(
- (".".join([str(p) for p in err.path]), err.message),
- )
+ schema_errors=[
+ SchemaProblem(
+ ".".join([str(p) for p in err.path]), err.message
+ )
+ ]
) from err
LOG.warning(
"Meta-schema validation failed, attempting to validate config "
@@ -199,6 +385,7 @@ def validate_cloudconfig_schema(
strict: bool = False,
strict_metaschema: bool = False,
log_details: bool = True,
+ log_deprecations: bool = False,
):
"""Validate provided config meets the schema definition.
@@ -214,6 +401,7 @@ def validate_cloudconfig_schema(
@param log_details: Boolean, when True logs details of validation errors.
If there are concerns about logging sensitive userdata, this should
be set to False.
+ @param log_deprecations: Controls whether to log deprecations or not.
@raises: SchemaValidationError when provided config does not validate
against the provided schema.
@@ -232,76 +420,181 @@ def validate_cloudconfig_schema(
return
validator = cloudinitValidator(schema, format_checker=FormatChecker())
- errors: Tuple[Tuple[str, str], ...] = ()
+
+ errors: SchemaProblems = []
+ deprecations: SchemaProblems = []
for error in sorted(validator.iter_errors(config), key=lambda e: e.path):
path = ".".join([str(p) for p in error.path])
- errors += ((path, error.message),)
+ problem = (SchemaProblem(path, error.message),)
+ if isinstance(error, SchemaDeprecationError): # pylint: disable=W1116
+ deprecations += problem
+ else:
+ errors += problem
+
+ if log_deprecations and deprecations:
+ message = _format_schema_problems(
+ deprecations,
+ prefix="Deprecated cloud-config provided:\n",
+ separator="\n",
+ )
+ LOG.warning(message)
+ if strict and (errors or deprecations):
+ raise SchemaValidationError(errors, deprecations)
if errors:
- if strict:
- # This could output/log sensitive data
- raise SchemaValidationError(errors)
if log_details:
- messages = ["{0}: {1}".format(k, msg) for k, msg in errors]
- details = "\n" + "\n".join(messages)
+ details = _format_schema_problems(
+ errors,
+ prefix="Invalid cloud-config provided:\n",
+ separator="\n",
+ )
else:
details = (
+ "Invalid cloud-config provided: "
"Please run 'sudo cloud-init schema --system' to "
"see the schema errors."
)
- LOG.warning("Invalid cloud-config provided: %s", details)
+ LOG.warning(details)
+
+
+class _Annotator:
+ def __init__(
+ self,
+ cloudconfig: dict,
+ original_content: bytes,
+ schemamarks: dict,
+ ):
+ self._cloudconfig = cloudconfig
+ self._original_content = original_content
+ self._schemamarks = schemamarks
+
+ @staticmethod
+ def _build_footer(title: str, content: List[str]) -> str:
+ body = "\n".join(content)
+ return f"# {title}: -------------\n{body}\n\n"
+
+ def _build_errors_by_line(self, schema_problems: SchemaProblems):
+ errors_by_line = defaultdict(list)
+ for (path, msg) in schema_problems:
+ match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
+ if match:
+ line, col = match.groups()
+ errors_by_line[int(line)].append(msg)
+ else:
+ col = None
+ errors_by_line[self._schemamarks[path]].append(msg)
+ if col is not None:
+ msg = "Line {line} column {col}: {msg}".format(
+ line=line, col=col, msg=msg
+ )
+ return errors_by_line
+
+ @staticmethod
+ def _add_problems(
+ problems: List[str],
+ labels: List[str],
+ footer: List[str],
+ index: int,
+ label_prefix: str = "",
+ ) -> int:
+ for problem in problems:
+ label = f"{label_prefix}{index}"
+ labels.append(label)
+ footer.append(f"# {label}: {problem}")
+ index += 1
+ return index
+
+ def _annotate_content(
+ self,
+ lines: List[str],
+ errors_by_line: dict,
+ deprecations_by_line: dict,
+ ) -> List[str]:
+ annotated_content = []
+ error_footer: List[str] = []
+ deprecation_footer: List[str] = []
+ error_index = 1
+ deprecation_index = 1
+ for line_number, line in enumerate(lines, 1):
+ errors = errors_by_line[line_number]
+ deprecations = deprecations_by_line[line_number]
+ if errors or deprecations:
+ labels: List[str] = []
+ error_index = self._add_problems(
+ errors, labels, error_footer, error_index, label_prefix="E"
+ )
+ deprecation_index = self._add_problems(
+ deprecations,
+ labels,
+ deprecation_footer,
+ deprecation_index,
+ label_prefix="D",
+ )
+ annotated_content.append(line + "\t\t# " + ",".join(labels))
+ else:
+ annotated_content.append(line)
+
+ annotated_content.extend(
+ map(
+ lambda seq: self._build_footer(*seq),
+ filter(
+ lambda seq: bool(seq[1]),
+ (
+ ("Errors", error_footer),
+ ("Deprecations", deprecation_footer),
+ ),
+ ),
+ )
+ )
+ return annotated_content
+
+ def annotate(
+ self,
+ schema_errors: SchemaProblems,
+ schema_deprecations: SchemaProblems,
+ ) -> Union[str, bytes]:
+ if not schema_errors and not schema_deprecations:
+ return self._original_content
+ lines = self._original_content.decode().split("\n")
+ if not isinstance(self._cloudconfig, dict):
+ # Return a meaningful message on empty cloud-config
+ return "\n".join(
+ lines
+ + [
+ self._build_footer(
+ "Errors", ["# E1: Cloud-config is not a YAML dict."]
+ )
+ ]
+ )
+ errors_by_line = self._build_errors_by_line(schema_errors)
+ deprecations_by_line = self._build_errors_by_line(schema_deprecations)
+ annotated_content = self._annotate_content(
+ lines, errors_by_line, deprecations_by_line
+ )
+ return "\n".join(annotated_content)
def annotated_cloudconfig_file(
- cloudconfig, original_content, schema_errors, schemamarks
-):
+ cloudconfig: dict,
+ original_content: bytes,
+ schemamarks: dict,
+ *,
+ schema_errors: Optional[SchemaProblems] = None,
+ schema_deprecations: Optional[SchemaProblems] = None,
+) -> Union[str, bytes]:
"""Return contents of the cloud-config file annotated with schema errors.
@param cloudconfig: YAML-loaded dict from the original_content or empty
dict if unparseable.
@param original_content: The contents of a cloud-config file
- @param schema_errors: List of tuples from a JSONSchemaValidationError. The
- tuples consist of (schemapath, error_message).
- """
- if not schema_errors:
- return original_content
- errors_by_line = defaultdict(list)
- error_footer = []
- error_header = "# Errors: -------------\n{0}\n\n"
- annotated_content = []
- lines = original_content.decode().split("\n")
- if not isinstance(cloudconfig, dict):
- # Return a meaningful message on empty cloud-config
- return "\n".join(
- lines
- + [error_header.format("# E1: Cloud-config is not a YAML dict.")]
- )
- for path, msg in schema_errors:
- match = re.match(r"format-l(?P<line>\d+)\.c(?P<col>\d+).*", path)
- if match:
- line, col = match.groups()
- errors_by_line[int(line)].append(msg)
- else:
- col = None
- errors_by_line[schemamarks[path]].append(msg)
- if col is not None:
- msg = "Line {line} column {col}: {msg}".format(
- line=line, col=col, msg=msg
- )
- error_index = 1
- for line_number, line in enumerate(lines, 1):
- errors = errors_by_line[line_number]
- if errors:
- error_label = []
- for error in errors:
- error_label.append("E{0}".format(error_index))
- error_footer.append("# E{0}: {1}".format(error_index, error))
- error_index += 1
- annotated_content.append(line + "\t\t# " + ",".join(error_label))
+ @param schemamarks: Dict with schema marks.
+ @param schema_errors: Instance of `SchemaProblems`.
+ @param schema_deprecations: Instance of `SchemaProblems`.
- else:
- annotated_content.append(line)
- annotated_content.append(error_header.format("\n".join(error_footer)))
- return "\n".join(annotated_content)
+ @return Annotated schema
+ """
+ return _Annotator(cloudconfig, original_content, schemamarks).annotate(
+ schema_errors or [], schema_deprecations or []
+ )
def validate_cloudconfig_file(config_path, schema, annotate=False):
@@ -333,19 +626,19 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
)
content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
- errors = (
- (
+ errors = [
+ SchemaProblem(
"format-l1.c1",
'File {0} needs to begin with "{1}"'.format(
config_path, CLOUD_CONFIG_HEADER.decode()
),
),
- )
+ ]
error = SchemaValidationError(errors)
if annotate:
print(
annotated_cloudconfig_file(
- {}, content, error.schema_errors, {}
+ {}, content, {}, schema_errors=error.schema_errors
)
)
raise error
@@ -365,17 +658,17 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
if mark:
line = mark.line + 1
column = mark.column + 1
- errors = (
- (
+ errors = [
+ SchemaProblem(
"format-l{line}.c{col}".format(line=line, col=column),
"File {0} is not valid yaml. {1}".format(config_path, str(e)),
),
- )
+ ]
error = SchemaValidationError(errors)
if annotate:
print(
annotated_cloudconfig_file(
- {}, content, error.schema_errors, {}
+ {}, content, {}, schema_errors=error.schema_errors
)
)
raise error from e
@@ -384,15 +677,29 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
if not annotate:
raise RuntimeError("Cloud-config is not a YAML dict.")
try:
- validate_cloudconfig_schema(cloudconfig, schema, strict=True)
+ validate_cloudconfig_schema(
+ cloudconfig, schema, strict=True, log_deprecations=False
+ )
except SchemaValidationError as e:
if annotate:
print(
annotated_cloudconfig_file(
- cloudconfig, content, e.schema_errors, marks
+ cloudconfig,
+ content,
+ marks,
+ schema_errors=e.schema_errors,
+ schema_deprecations=e.schema_deprecations,
)
)
- raise
+ else:
+ message = _format_schema_problems(
+ e.schema_deprecations,
+ prefix="Cloud config schema deprecations: ",
+ separator=", ",
+ )
+ print(message)
+ if e.has_errors(): # We do not consider deprecations as error
+ raise
def _sort_property_order(value):
@@ -483,14 +790,31 @@ def _flatten_schema_refs(src_cfg: dict, defs: dict):
# Update the references in subschema for doc rendering
src_cfg["items"].update(defs[reference])
if "oneOf" in src_cfg["items"]:
- for alt_schema in src_cfg["items"]["oneOf"]:
- if "$ref" in alt_schema:
- reference = alt_schema.pop("$ref").replace("#/$defs/", "")
- alt_schema.update(defs[reference])
- for alt_schema in src_cfg.get("oneOf", []):
- if "$ref" in alt_schema:
- reference = alt_schema.pop("$ref").replace("#/$defs/", "")
- alt_schema.update(defs[reference])
+ for sub_schema in src_cfg["items"]["oneOf"]:
+ if "$ref" in sub_schema:
+ reference = sub_schema.pop("$ref").replace("#/$defs/", "")
+ sub_schema.update(defs[reference])
+ for sub_schema in chain(
+ src_cfg.get("oneOf", []),
+ src_cfg.get("anyOf", []),
+ src_cfg.get("allOf", []),
+ ):
+ if "$ref" in sub_schema:
+ reference = sub_schema.pop("$ref").replace("#/$defs/", "")
+ sub_schema.update(defs[reference])
+
+
+def _flatten_schema_all_of(src_cfg: dict):
+ """Flatten schema: Merge allOf.
+
+ If a schema as allOf, then all of the sub-schemas must hold. Therefore
+ it is safe to merge them.
+ """
+ sub_schemas = src_cfg.pop("allOf", None)
+ if not sub_schemas:
+ return
+ for sub_schema in sub_schemas:
+ src_cfg.update(sub_schema)
def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
@@ -509,10 +833,14 @@ def _get_property_doc(schema: dict, defs: dict, prefix=" ") -> str:
for prop_schema in property_schemas:
for prop_key, prop_config in prop_schema.items():
_flatten_schema_refs(prop_config, defs)
+ _flatten_schema_all_of(prop_config)
if prop_config.get("hidden") is True:
continue # document nothing for this property
- # Define prop_name and description for SCHEMA_PROPERTY_TMPL
+
+ deprecated = bool(prop_config.get(DEPRECATED_KEY))
description = prop_config.get("description", "")
+ if deprecated:
+ description = _add_deprecation_msg(description)
if description:
description = " " + description
@@ -582,6 +910,15 @@ def _get_examples(meta: MetaSchema) -> str:
return rst_content
+def _get_activate_by_schema_keys_doc(meta: MetaSchema) -> str:
+ if not meta.get("activate_by_schema_keys"):
+ return ""
+ schema_keys = ", ".join(
+ f"``{k}``" for k in meta["activate_by_schema_keys"]
+ )
+ return f"**Activate only on keys:** {schema_keys}\n\n"
+
+
def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str:
"""Return reStructured text rendering the provided metadata.
@@ -595,26 +932,25 @@ def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str:
if not meta or not schema:
raise ValueError("Expected non-empty meta and schema")
keys = set(meta.keys())
- expected = set(
- {
- "id",
- "title",
- "examples",
- "frequency",
- "distros",
- "description",
- "name",
- }
- )
+ required_keys = {
+ "id",
+ "title",
+ "examples",
+ "frequency",
+ "distros",
+ "description",
+ "name",
+ }
+ optional_keys = {"activate_by_schema_keys"}
error_message = ""
- if expected - keys:
- error_message = "Missing expected keys in module meta: {}".format(
- expected - keys
+ if required_keys - keys:
+ error_message = "Missing required keys in module meta: {}".format(
+ required_keys - keys
)
- elif keys - expected:
+ elif keys - required_keys - optional_keys:
error_message = (
"Additional unexpected keys found in module meta: {}".format(
- keys - expected
+ keys - required_keys
)
)
if error_message:
@@ -638,6 +974,9 @@ def get_meta_doc(meta: MetaSchema, schema: Optional[dict] = None) -> str:
meta_copy["distros"] = ", ".join(meta["distros"])
# Need an underbar of the same length as the name
meta_copy["title_underbar"] = re.sub(r".", "-", meta["name"])
+ meta_copy["activate_by_schema_keys"] = _get_activate_by_schema_keys_doc(
+ meta
+ )
template = SCHEMA_DOC_TMPL.format(**meta_copy)
return template
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
index 7bbfcb1a..598d1c3c 100644
--- a/cloudinit/config/schemas/schema-cloud-config-v1.json
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -2,6 +2,7 @@
"$schema": "http://json-schema.org/draft-04/schema#",
"$defs": {
"users_groups.groups_by_groupname": {
+ "additionalProperties": false,
"patternProperties": {
"^.+$": {
"label": "<group_name>",
@@ -19,6 +20,7 @@
{"required": ["name"]},
{"required": ["snapuser"]}
],
+ "additionalProperties": false,
"properties": {
"name": {
"description": "The user's login name. Required otherwise user creation will be skipped for this user.",
@@ -34,10 +36,30 @@
"type": "string"
},
"groups": {
- "description": "Optional comma-separated string or list of groups to add the user to.",
- "type": ["string", "array"],
- "items": {"type": "string"},
- "minItems": 1
+ "description": "Optional comma-separated string of groups to add the user to.",
+ "oneOf": [
+ {"type": "string"},
+ {
+ "type": "array",
+ "items": {
+ "type": ["string"]
+ },
+ "minItems": 1
+ },
+ {
+ "type": "object",
+ "patternProperties": {
+ "^.+$": {
+ "label": "<group_name>",
+ "description": "When providing an object for users.groups the ``<group_name>`` keys are the groups to add this user to",
+ "deprecated": true,
+ "type": ["null"],
+ "minItems": 1
+ }
+ },
+ "hidden": ["patternProperties"]
+ }
+ ]
},
"homedir": {
"description": "Optional home dir for user. Default: ``/home/<username>``",
@@ -48,6 +70,12 @@
"description": "Optional string representing the number of days until the user is disabled. ",
"type": "string"
},
+ "lock-passwd": {
+ "default": true,
+ "description": "Dropped after April 2027. Use ``lock_passwd``. Default: ``true``",
+ "type": "boolean",
+ "deprecated": true
+ },
"lock_passwd": {
"default": true,
"description": "Disable password login. Default: ``true``",
@@ -69,15 +97,15 @@
"type": "boolean"
},
"passwd": {
- "description": "Hash of user password applied when user does not exist. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While hashed password is better than plain text, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
+ "description": "Hash of user password applied when user does not exist. This will NOT be applied if the user already exists. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While hashed password is better than plain text, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
"type": "string"
},
"hashed_passwd": {
- "description": "Hash of user password applied to new or existing users. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While ``hashed_password`` is better than ``plain_text_passwd``, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
+ "description": "Hash of user password to be applied. This will be applied even if the user is pre-existing. To generate this hash, run: mkpasswd --method=SHA-512 --rounds=4096. **Note:** While ``hashed_password`` is better than ``plain_text_passwd``, using ``passwd`` in user-data represents a security risk as user-data could be accessible by third-parties depending on your cloud platform.",
"type": "string"
},
"plain_text_passwd": {
- "description": "Clear text of user password applied to new or existing users. There are many more secure options than using plain text passwords, such as ``ssh_import_id`` or ``hashed_passwd``. Do not use this in production as user-data and your password can be exposed.",
+ "description": "Clear text of user password to be applied. This will be applied even if the user is pre-existing. There are many more secure options than using plain text passwords, such as ``ssh_import_id`` or ``hashed_passwd``. Do not use this in production as user-data and your password can be exposed.",
"type": "string"
},
"create_groups": {
@@ -132,15 +160,14 @@
"description": "The user's ID. Default is next available value.",
"type": "integer"
}
- },
- "additionalProperties": false
+ }
},
"apt_configure.mirror": {
"type": "array",
"items": {
"type": "object",
- "additionalProperties": false,
"required": ["arches"],
+ "additionalProperties": false,
"properties": {
"arches": {
"type": "array",
@@ -165,12 +192,13 @@
},
"ca_certs.properties": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"remove-defaults": {
- "description": "DEPRECATED. Use ``remove_defaults``. ",
- "deprecated": true,
+ "description": "Dropped after April 2027. Use ``remove_defaults``.",
"type": "boolean",
- "default": false
+ "default": false,
+ "deprecated": true
},
"remove_defaults": {
"description": "Remove default CA certificates if true. Default: false",
@@ -184,14 +212,31 @@
"minItems": 1
}
},
- "additionalProperties": false,
"minProperties": 1
},
+ "cc_ubuntu_autoinstall": {
+ "type": "object",
+ "properties": {
+ "autoinstall": {
+ "description": "Opaque autoinstall schema definition for Ubuntu autoinstall. Full schema processed by live-installer. See: https://ubuntu.com/server/docs/install/autoinstall-reference",
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "integer"
+ }
+ },
+ "required": ["version"]
+ }
+ },
+ "additionalProperties": true
+ },
"cc_apk_configure": {
"type": "object",
"properties": {
"apk_repos": {
"type": "object",
+ "minProperties": 1,
+ "additionalProperties": false,
"properties": {
"preserve_repositories": {
"type": "boolean",
@@ -200,6 +245,7 @@
},
"alpine_repo": {
"type": ["object", "null"],
+ "additionalProperties": false,
"properties": {
"base_url": {
"type": "string",
@@ -222,16 +268,13 @@
}
},
"required": ["version"],
- "minProperties": 1,
- "additionalProperties": false
+ "minProperties": 1
},
"local_repo_base_url": {
"type": "string",
"description": "The base URL of an Alpine repository containing unofficial packages"
}
- },
- "minProperties": 1,
- "additionalProperties": false
+ }
}
}
},
@@ -239,8 +282,8 @@
"properties": {
"apt": {
"type": "object",
- "additionalProperties": false,
"minProperties": 1,
+ "additionalProperties": false,
"properties": {
"preserve_sources_list": {
"type": "boolean",
@@ -270,6 +313,7 @@
"debconf_selections": {
"type": "object",
"minProperties": 1,
+ "additionalProperties": false,
"patternProperties": {
"^.+$": {
"type": "string"
@@ -303,9 +347,11 @@
},
"sources": {
"type": "object",
+ "additionalProperties": false,
"patternProperties": {
"^.+$": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"source": {
"type": "string"
@@ -323,7 +369,6 @@
"type": "string"
}
},
- "additionalProperties": false,
"minProperties": 1
}
},
@@ -386,7 +431,13 @@
"$ref": "#/$defs/ca_certs.properties"
},
"ca-certs": {
- "$ref": "#/$defs/ca_certs.properties"
+ "allOf": [
+ {"$ref": "#/$defs/ca_certs.properties"},
+ {
+ "deprecated": true,
+ "description": "Dropped after April 2027. Use ``ca_certs``."
+ }
+ ]
}
}
},
@@ -395,8 +446,8 @@
"properties": {
"chef": {
"type": "object",
- "additionalProperties": false,
"minProperties": 1,
+ "additionalProperties": false,
"properties": {
"directories": {
"type": "array",
@@ -542,9 +593,9 @@
"type": "object",
"properties": {
"debug": {
- "additionalProperties": false,
"minProperties": 1,
"type": "object",
+ "additionalProperties": false,
"properties": {
"verbose": {
"description": "Should always be true for this module",
@@ -573,6 +624,7 @@
"properties": {
"device_aliases": {
"type": "object",
+ "additionalProperties": false,
"patternProperties": {
"^.+$": {
"label": "<alias_name>",
@@ -583,6 +635,7 @@
},
"disk_setup": {
"type": "object",
+ "additionalProperties": false,
"patternProperties": {
"^.+$": {
"label": "<alias name/path>",
@@ -683,8 +736,8 @@
"properties": {
"fan": {
"type": "object",
- "additionalProperties": false,
"required": ["config"],
+ "additionalProperties": false,
"properties": {
"config": {
"type": "string",
@@ -754,14 +807,24 @@
"description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device"
},
"grub-pc/install_devices_empty": {
- "type": ["string", "boolean"],
- "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``. Using a non-boolean value for this field is deprecated."
+ "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``. Using a non-boolean value for this field is deprecated.",
+ "oneOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "string",
+ "description": "Use a boolean value instead.",
+ "deprecated": true
+ }
+ ]
}
}
},
"grub-dpkg": {
"type": "object",
- "description": "DEPRECATED: Use ``grub_dpkg`` instead"
+ "description": "Use ``grub_dpkg`` instead",
+ "deprecated": true
}
}
},
@@ -779,10 +842,8 @@
"properties": {
"when": {
"type": "array",
- "additionalProperties": false,
"items": {
"type": "string",
- "additionalProperties": false,
"enum": [
"boot-new-instance",
"boot-legacy",
@@ -802,6 +863,7 @@
"properties": {
"keyboard": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"layout": {
"type": "string",
@@ -821,8 +883,7 @@
"description": "Optional. Keyboard options. Corresponds to XKBOPTIONS."
}
},
- "required": ["layout"],
- "additionalProperties": false
+ "required": ["layout"]
}
}
},
@@ -831,6 +892,7 @@
"properties": {
"ssh": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"emit_keys_to_console": {
"type": "boolean",
@@ -838,7 +900,6 @@
"description": "Set false to avoid printing SSH keys to system console. Default: ``true``."
}
},
- "additionalProperties": false,
"required": ["emit_keys_to_console"]
},
"ssh_key_console_blacklist": {
@@ -862,9 +923,11 @@
"landscape": {
"type": "object",
"required": ["client"],
+ "additionalProperties": false,
"properties": {
"client": {
"type": "object",
+ "additionalProperties": true,
"properties": {
"url": {
"type": "string",
@@ -887,7 +950,7 @@
"enum": ["debug", "info", "warning", "error", "critical"],
"description": "The log level for the client. Default: ``info``."
},
- "computer_tite": {
+ "computer_title": {
"type": "string",
"description": "The title of this computer."
},
@@ -936,9 +999,11 @@
"lxd": {
"type": "object",
"minProperties": 1,
+ "additionalProperties": false,
"properties": {
"init": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"network_address": {
"type": "string",
@@ -950,7 +1015,7 @@
},
"storage_backend": {
"type": "string",
- "enum": ["zfs", "dir"],
+ "enum": ["zfs", "dir", "lvm", "btrfs"],
"default": "dir",
"description": "Storage backend to use. Default: ``dir``."
},
@@ -975,6 +1040,7 @@
"bridge": {
"type": "object",
"required": ["mode"],
+ "additionalProperties": false,
"properties": {
"mode": {
"type": "string",
@@ -986,6 +1052,12 @@
"description": "Name of the LXD network bridge to attach or create. Default: ``lxdbr0``.",
"default": "lxdbr0"
},
+ "mtu": {
+ "type": "integer",
+ "description": "Bridge MTU, defaults to LXD's default value",
+ "default": -1,
+ "minimum": -1
+ },
"ipv4_address": {
"type": "string",
"description": "IPv4 address for the bridge. If set, ``ipv4_netmask`` key required."
@@ -1030,8 +1102,7 @@
}
}
}
- },
- "additionalProperties": false
+ }
}
}
},
@@ -1040,9 +1111,11 @@
"properties": {
"mcollective": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"conf": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"public-cert": {
"type": "string",
@@ -1064,8 +1137,7 @@
}
}
}
- },
- "additionalProperties": false
+ }
}
}
},
@@ -1108,6 +1180,7 @@
},
"swap": {
"type": "object",
+ "additionalProperties": false,
"properties": {
"filename": {
"type": "string",
@@ -1128,8 +1201,7 @@
],
"description": "The maxsize in bytes of the swap file"
}
- },
- "additionalProperties": false
+ }
}
}
},
@@ -1138,6 +1210,7 @@
"properties": {
"ntp": {
"type": ["null", "object"],
+ "additionalProperties": false,
"properties": {
"pools": {
"type": "array",
@@ -1170,6 +1243,8 @@
"config": {
"description": "Configuration settings or overrides for the\n``ntp_client`` specified.",
"type": "object",
+ "minProperties": 1,
+ "additionalProperties": false,
"properties": {
"confpath": {
"type": "string",
@@ -1195,12 +1270,9 @@
"type": "string",
"description": "Inline template allowing users to define their\nown ``ntp_client`` configuration template.\nThe value must start with '## template:jinja'\nto enable use of templating support.\n"
}
- },
- "minProperties": 1,
- "additionalProperties": false
+ }
}
- },
- "additionalProperties": false
+ }
}
}
},
@@ -1234,22 +1306,22 @@
"description": "Set ``true`` to reboot the system if required by presence of `/var/run/reboot-required`. Default: ``false``"
},
"apt_update": {
- "type": "boolean",
- "default": false,
- "description": "DEPRECATED. Use ``package_update``. Default: ``false``",
- "deprecated": true
+ "type": "boolean",
+ "default": false,
+ "description": "Dropped after April 2027. Use ``package_update``. Default: ``false``",
+ "deprecated": true
},
"apt_upgrade": {
- "type": "boolean",
- "default": false,
- "description": "DEPRECATED. Use ``package_upgrade``. Default: ``false``",
- "deprecated": true
+ "type": "boolean",
+ "default": false,
+ "description": "Dropped after April 2027. Use ``package_upgrade``. Default: ``false``",
+ "deprecated": true
},
"apt_reboot_if_required": {
- "type": "boolean",
- "default": false,
- "description": "DEPRECATED. Use ``package_reboot_if_required``. Default: ``false``",
- "deprecated": true
+ "type": "boolean",
+ "default": false,
+ "description": "Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``",
+ "deprecated": true
}
}
},
@@ -1258,8 +1330,8 @@
"properties": {
"phone_home": {
"type": "object",
- "additionalProperties": false,
"required": ["url"],
+ "additionalProperties": false,
"properties": {
"url": {
"type": "string",
@@ -1301,15 +1373,20 @@
"properties": {
"power_state": {
"type": "object",
- "additionalProperties": false,
"required": ["mode"],
+ "additionalProperties": false,
"properties": {
"delay": {
"description": "Time in minutes to delay after cloud-init has finished. Can be ``now`` or an integer specifying the number of minutes to delay. Default: ``now``",
"default": "now",
"oneOf": [
{"type": "integer", "minimum": 0},
- {"type": "string", "pattern": "^\\+?[0-9]+$"},
+ {
+ "type": "string",
+ "pattern": "^\\+?[0-9]+$",
+ "deprecated": true,
+ "description": "Dropped after April 2027. Use ``now`` or integer type."
+ },
{"enum": ["now"]}
]
},
@@ -1579,8 +1656,8 @@
{"type": "string"},
{
"type": "object",
- "additionalProperties": false,
"required": ["content"],
+ "additionalProperties": false,
"properties": {
"filename": {
"type": "string"
@@ -1764,6 +1841,46 @@
"default": true,
"description": "Whether to expire all user passwords such that a password will need to be reset on the user's next login. Default: ``true``"
},
+ "users": {
+ "description": "Replaces the deprecated ``list`` key. This key represents a list of existing users to set passwords for. Each item under users contains the following required keys: ``name`` and ``password`` or in the case of a randomly generated password, ``name`` and ``type``. The ``type`` key has a default value of ``hash``, and may alternatively be set to ``text`` or ``RANDOM``.",
+ "type": "array",
+ "items": {
+ "minItems": 1,
+ "type": "object",
+ "anyOf": [
+ {
+ "required": ["name", "type"],
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "enum": ["RANDOM"],
+ "type": "string"
+ }
+ }
+ },
+ {
+ "required": ["name", "password"],
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "type": {
+ "enum": ["hash", "text"],
+ "default": "hash",
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ }
+ }
+ }
+ ]
+ }
+ },
"list": {
"oneOf": [
{"type": "string"},
@@ -1775,7 +1892,8 @@
}}
],
"minItems": 1,
- "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.\n\nUse of a multiline string for this field is DEPRECATED and will result in an error in a future version of cloud-init."
+ "description": "List of ``username:password`` pairs. Each user will have the corresponding password set. A password can be randomly generated by specifying ``RANDOM`` or ``R`` as a user's password. A hashed password, created by a tool like ``mkpasswd``, can be specified. A regex (``r'\\$(1|2a|2y|5|6)(\\$.+){2}'``) is used to determine if a password value should be treated as a hash.\n\nUse of a multiline string for this field is DEPRECATED and will result in an error in a future version of cloud-init.",
+ "deprecated": true
}
}
},
@@ -1790,12 +1908,12 @@
"properties": {
"snap": {
"type": "object",
- "additionalProperties": false,
"minProperties": 1,
+ "additionalProperties": false,
"properties": {
"assertions": {
- "type": ["object", "array"],
"description": "Properly-signed snap assertions which will run before and snap ``commands``.",
+ "type": ["object", "array"],
"items": {"type": "string"},
"additionalItems": false,
"minItems": 1,
@@ -1882,13 +2000,13 @@
"ssh_keys": {
"type": "object",
"description": "A dictionary entries for the public and private host keys of each desired key type. Entries in the ``ssh_keys`` config dict should have keys in the format ``<key type>_private``, ``<key type>_public``, and, optionally, ``<key type>_certificate``, e.g. ``rsa_private: <key>``, ``rsa_public: <key>``, and ``rsa_certificate: <key>``. Not all key types have to be specified, ones left unspecified will not be used. If this config option is used, then separate keys will not be automatically generated. In order to specify multiline private host keys and certificates, use yaml multiline syntax.",
+ "additionalProperties": false,
"patternProperties": {
"^(dsa|ecdsa|ed25519|rsa)_(public|private|certificate)$": {
"label": "<key_type>",
"type": "string"
}
- },
- "additionalProperties": false
+ }
},
"ssh_authorized_keys": {
"type": "array",
@@ -1967,6 +2085,8 @@
"properties": {
"ubuntu_advantage": {
"type": "object",
+ "required": ["token"],
+ "additionalProperties": false,
"properties": {
"enable": {
"type": "array",
@@ -1976,10 +2096,44 @@
"token": {
"type": "string",
"description": "Required contract token obtained from https://ubuntu.com/advantage to attach."
+ },
+ "config": {
+ "type": "object",
+ "description": "Configuration settings or override Ubuntu Advantage config",
+ "properties": {
+ "http_proxy": {
+ "type": "string",
+ "format": "uri",
+ "description": "Ubuntu Advantage HTTP Proxy URL"
+ },
+ "https_proxy": {
+ "type": "string",
+ "format": "uri",
+ "description": "Ubuntu Advantage HTTPS Proxy URL"
+ },
+ "global_apt_http_proxy": {
+ "type": "string",
+ "format": "uri",
+ "description": "HTTP Proxy URL used for all APT repositories on a system. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ },
+ "global_apt_https_proxy": {
+ "type": "string",
+ "format": "uri",
+ "description": "HTTPS Proxy URL used for all APT repositories on a system. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ },
+ "ua_apt_http_proxy": {
+ "type": "string",
+ "format": "uri",
+ "description": "HTTP Proxy URL used only for Ubuntu Advantage APT repositories. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ },
+ "ua_apt_https_proxy": {
+ "type": "string",
+ "format": "uri",
+ "description": "HTTPS Proxy URL used only for Ubuntu Advantage APT repositories. Stored at ``/etc/apt/apt.conf.d/90ubuntu-advantage-aptproxy``"
+ }
+ }
}
- },
- "required": ["token"],
- "additionalProperties": false
+ }
}
}
},
@@ -1992,10 +2146,10 @@
"properties": {
"nvidia": {
"type": "object",
- "additionalProperties": false,
"required": [
"license-accepted"
],
+ "additionalProperties": false,
"properties": {
"license-accepted": {
"type": "boolean",
@@ -2062,7 +2216,7 @@
{"type": "string"},
{"type": "object", "$ref": "#/$defs/users_groups.user"}
],
- "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema. DEPRECATED: string and types will be removed in a future release. Use ``users`` instead."
+ "description": "The ``user`` dictionary values override the ``default_user`` configuration from ``/etc/cloud/cloud.cfg``. The `user` dictionary keys supported for the default_user are the same as the ``users`` schema."
},
"users": {
"type": ["string", "array", "object"],
@@ -2084,6 +2238,8 @@
"type": "array",
"items": {
"type": "object",
+ "required": ["path"],
+ "additionalProperties": false,
"properties": {
"path": {
"type": "string",
@@ -2120,9 +2276,7 @@
"default": false,
"description": "Defer writing the file until 'final' stage, after users were created, and packages were installed. Default: ``false``."
}
- },
- "required": ["path"],
- "additionalProperties": false
+ }
},
"minItems": 1
}
@@ -2139,11 +2293,13 @@
"yum_repos": {
"type": "object",
"minProperties": 1,
+ "additionalProperties": false,
"patternProperties": {
"^[0-9a-zA-Z -_]+$": {
"label": "<repo_name>",
"type": "object",
"description": "Object keyed on unique yum repo IDs. The key used will be used to write yum repo config files in ``yum_repo_dir``/<repo_key_id>.repo.",
+ "additionalProperties": false,
"properties": {
"baseurl": {
"type": "string",
@@ -2173,8 +2329,7 @@
},
"required": ["baseurl"]
}
- },
- "additionalProperties": false
+ }
}
}
},
@@ -2183,11 +2338,14 @@
"properties": {
"zypper": {
"type": "object",
+ "minProperties": 1,
+ "additionalProperties": true,
"properties": {
"repos": {
"type": "array",
"items": {
"type": "object",
+ "additionalProperties": true,
"properties": {
"id": {
"type": "string",
@@ -2202,8 +2360,7 @@
"required": [
"id",
"baseurl"
- ],
- "additionalProperties": true
+ ]
},
"minItems": 1
},
@@ -2211,9 +2368,7 @@
"type": "object",
"description": "Any supported zypo.conf key is written to ``/etc/zypp/zypp.conf``"
}
- },
- "minProperties": 1,
- "additionalProperties": false
+ }
}
}
},
@@ -2326,6 +2481,7 @@
{ "$ref": "#/$defs/cc_apk_configure" },
{ "$ref": "#/$defs/cc_apt_configure" },
{ "$ref": "#/$defs/cc_apt_pipelining" },
+ { "$ref": "#/$defs/cc_ubuntu_autoinstall"},
{ "$ref": "#/$defs/cc_bootcmd" },
{ "$ref": "#/$defs/cc_byobu" },
{ "$ref": "#/$defs/cc_ca_certs" },
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 3d771c2a..ffa41093 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -16,7 +16,7 @@ import stat
import string
import urllib.parse
from io import StringIO
-from typing import Any, Mapping, Type
+from typing import Any, Mapping, Optional, Type
from cloudinit import importer
from cloudinit import log as logging
@@ -76,6 +76,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
+ default_owner = "root:root"
init_cmd = ["service"] # systemctl, service etc
renderer_configs: Mapping[str, Mapping[str, Any]] = {}
_preferred_ntp_clients = None
@@ -117,6 +118,17 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
"_write_network_config needs implementation.\n" % self.name
)
+ @property
+ def network_activator(self) -> Optional[Type[activators.NetworkActivator]]:
+ """Return the configured network activator for this environment."""
+ priority = util.get_cfg_by_path(
+ self._cfg, ("network", "activators"), None
+ )
+ try:
+ return activators.select_activator(priority=priority)
+ except activators.NoActivatorException:
+ return None
+
def _write_network_state(self, network_state):
priority = util.get_cfg_by_path(
self._cfg, ("network", "renderers"), None
@@ -241,9 +253,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Now try to bring them up
if bring_up:
LOG.debug("Bringing up newly configured network interfaces")
- try:
- network_activator = activators.select_activator()
- except activators.NoActivatorException:
+ network_activator = self.network_activator
+ if not network_activator:
LOG.warning(
"No network activator found, not bringing up "
"network interfaces"
@@ -510,6 +521,15 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
if isinstance(groups, str):
groups = groups.split(",")
+ if isinstance(groups, dict):
+ LOG.warning(
+ "DEPRECATED: The user %s has a 'groups' config value of"
+ " type dict which is deprecated and will be removed in a"
+ " future version of cloud-init. Use a comma-delimited"
+ " string or array instead: group1,group2.",
+ name,
+ )
+
# remove any white spaces in group names, most likely
# that came in as a string like: groups: group1, group2
groups = [g.strip() for g in groups]
@@ -717,6 +737,16 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return True
+ def chpasswd(self, plist_in: list, hashed: bool):
+ payload = (
+ "\n".join(
+ (":".join([name, password]) for name, password in plist_in)
+ )
+ + "\n"
+ )
+ cmd = ["chpasswd"] + (["-e"] if hashed else [])
+ subp.subp(cmd, payload)
+
def ensure_sudo_dir(self, path, sudo_base="/etc/sudoers"):
# Ensure the dir is included and that
# it actually exists as a directory
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index fca9f9fa..77e9bf11 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -15,6 +15,7 @@ class BSD(distros.Distro):
networking_cls = BSDNetworking
hostname_conf_fn = "/etc/rc.conf"
rc_conf_fn = "/etc/rc.conf"
+ default_owner = "root:wheel"
# This differs from the parent Distro class, which has -P for
# poweroff.
@@ -134,3 +135,7 @@ class BSD(distros.Distro):
def apply_locale(self, locale, out_fn=None):
LOG.debug("Cannot set the locale.")
+
+ def chpasswd(self, plist_in: list, hashed: bool):
+ for name, password in plist_in:
+ self.set_passwd(name, password, hashed=hashed)
diff --git a/cloudinit/features.py b/cloudinit/features.py
index e1116a17..ac586f6b 100644
--- a/cloudinit/features.py
+++ b/cloudinit/features.py
@@ -49,6 +49,16 @@ mirrors via :py:mod:`apt: <cloudinit.config.cc_apt_configure>`
directives in cloud-config.
"""
+
+EXPIRE_APPLIES_TO_HASHED_USERS = True
+"""
+If ``EXPIRE_APPLIES_TO_HASHED_USERS`` is True, then when expire is set true
+in cc_set_passwords, hashed passwords will be expired. Previous to 22.3,
+only non-hashed passwords were expired.
+
+(This flag can be removed after Jammy is no longer supported.)
+"""
+
try:
# pylint: disable=wildcard-import
from cloudinit.feature_overrides import * # noqa
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
index c9fa9dc5..ce25fe9a 100644
--- a/cloudinit/importer.py
+++ b/cloudinit/importer.py
@@ -8,16 +8,34 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import sys
+import importlib
+from types import ModuleType
+from typing import Optional, Sequence
-def import_module(module_name):
- __import__(module_name)
- return sys.modules[module_name]
+def import_module(module_name: str) -> ModuleType:
+ return importlib.import_module(module_name)
-def find_module(base_name: str, search_paths, required_attrs=None) -> tuple:
- """Finds and imports specified modules"""
+def _count_attrs(
+ module_name: str, attrs: Optional[Sequence[str]] = None
+) -> int:
+ found_attrs = 0
+ if not attrs:
+ return found_attrs
+ mod = importlib.import_module(module_name)
+ for attr in attrs:
+ if hasattr(mod, attr):
+ found_attrs += 1
+ return found_attrs
+
+
+def find_module(
+ base_name: str,
+ search_paths: Sequence[str],
+ required_attrs: Optional[Sequence[str]] = None,
+) -> tuple:
+ """Finds specified modules"""
if not required_attrs:
required_attrs = []
# NOTE(harlowja): translate the search paths to include the base name.
@@ -31,18 +49,10 @@ def find_module(base_name: str, search_paths, required_attrs=None) -> tuple:
lookup_paths.append(full_path)
found_paths = []
for full_path in lookup_paths:
- mod = None
- try:
- mod = import_module(full_path)
- except ImportError:
- pass
- if not mod:
+ if not importlib.util.find_spec(full_path):
continue
- found_attrs = 0
- for attr in required_attrs:
- if hasattr(mod, attr):
- found_attrs += 1
- if found_attrs == len(required_attrs):
+ # Check that required_attrs are all present within the module.
+ if _count_attrs(full_path, required_attrs) == len(required_attrs):
found_paths.append(full_path)
return (found_paths, lookup_paths)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index f5545fc1..4bc48676 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -928,6 +928,13 @@ def get_interfaces_by_mac(blacklist_drivers=None) -> dict:
)
+def find_interface_name_from_mac(mac: str) -> Optional[str]:
+ for interface_mac, interface_name in get_interfaces_by_mac().items():
+ if mac.lower() == interface_mac.lower():
+ return interface_name
+ return None
+
+
def get_interfaces_by_mac_on_freebsd(blacklist_drivers=None) -> dict:
(out, _) = subp.subp(["ifconfig", "-a", "ether"])
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index f2cc078f..b6af3770 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -1,7 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import logging
from abc import ABC, abstractmethod
-from typing import Iterable, List, Type
+from typing import Dict, Iterable, List, Optional, Type, Union
from cloudinit import subp, util
from cloudinit.net.eni import available as eni_available
@@ -32,7 +32,7 @@ def _alter_interface(cmd, device_name) -> bool:
class NetworkActivator(ABC):
@staticmethod
@abstractmethod
- def available() -> bool:
+ def available(target: Optional[str] = None) -> bool:
"""Return True if activator is available, otherwise return False."""
raise NotImplementedError()
@@ -97,7 +97,7 @@ class IfUpDownActivator(NetworkActivator):
# E.g., NetworkManager has a ifupdown plugin that requires the name
# of a specific connection.
@staticmethod
- def available(target=None) -> bool:
+ def available(target: str = None) -> bool:
"""Return true if ifupdown can be used on this system."""
return eni_available(target=target)
@@ -254,33 +254,43 @@ class NetworkdActivator(NetworkActivator):
# This section is mostly copied and pasted from renderers.py. An abstract
# version to encompass both seems overkill at this point
DEFAULT_PRIORITY = [
- IfUpDownActivator,
- NetplanActivator,
- NetworkManagerActivator,
- NetworkdActivator,
+ "eni",
+ "netplan",
+ "network-manager",
+ "networkd",
]
+NAME_TO_ACTIVATOR: Dict[str, Type[NetworkActivator]] = {
+ "eni": IfUpDownActivator,
+ "netplan": NetplanActivator,
+ "network-manager": NetworkManagerActivator,
+ "networkd": NetworkdActivator,
+}
+
def search_activator(
- priority=None, target=None
+ priority: List[str], target: Union[str, None]
) -> List[Type[NetworkActivator]]:
- if priority is None:
- priority = DEFAULT_PRIORITY
-
unknown = [i for i in priority if i not in DEFAULT_PRIORITY]
if unknown:
raise ValueError(
"Unknown activators provided in priority list: %s" % unknown
)
-
- return [activator for activator in priority if activator.available(target)]
+ activator_classes = [NAME_TO_ACTIVATOR[name] for name in priority]
+ return [
+ activator_cls
+ for activator_cls in activator_classes
+ if activator_cls.available(target)
+ ]
-def select_activator(priority=None, target=None) -> Type[NetworkActivator]:
+def select_activator(
+ priority: Optional[List[str]] = None, target: Optional[str] = None
+) -> Type[NetworkActivator]:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
found = search_activator(priority, target)
if not found:
- if priority is None:
- priority = DEFAULT_PRIORITY
tmsg = ""
if target and target != "/":
tmsg = " in target=%s" % target
@@ -289,5 +299,7 @@ def select_activator(priority=None, target=None) -> Type[NetworkActivator]:
"through list: %s" % (tmsg, priority)
)
selected = found[0]
- LOG.debug("Using selected activator: %s", selected)
+ LOG.debug(
+ "Using selected activator: %s from priority: %s", selected, priority
+ )
return selected
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 2c64e492..80f2b108 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -11,6 +11,7 @@ from typing import Any, Dict
from cloudinit import safeyaml, util
from cloudinit.net import (
+ find_interface_name_from_mac,
get_interfaces_by_mac,
ipv4_mask_to_net_prefix,
ipv6_mask_to_net_prefix,
@@ -700,15 +701,14 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
# * interface name looked up by mac
# * value of "eth" key from this loop
name = eth
- set_name = cfg.get("set-name", None)
+ set_name = cfg.get("set-name")
if set_name:
name = set_name
elif mac_address and ifaces_by_mac:
lcase_mac_address = mac_address.lower()
- for iface_mac, iface_name in ifaces_by_mac.items():
- if lcase_mac_address == iface_mac.lower():
- name = iface_name
- break
+ mac = find_interface_name_from_mac(lcase_mac_address)
+ if mac:
+ name = mac
phy_cmd["name"] = name
driver = match.get("driver", None)
@@ -780,6 +780,12 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
if len(dns) > 0:
name_cmd.update({"address": dns})
self.handle_nameserver(name_cmd)
+
+ mac_address = dev_cfg.get("match", {}).get("macaddress")
+ real_if_name = find_interface_name_from_mac(mac_address)
+ if real_if_name:
+ iface = real_if_name
+
self._handle_individual_nameserver(name_cmd, iface)
def _handle_bond_bridge(self, command, cmd_type=None):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index ecc1403b..32844e71 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG"
# This is expected to be a yaml formatted file
CLOUD_CONFIG = "/etc/cloud/cloud.cfg"
+CLEAN_RUNPARTS_DIR = "/etc/cloud/clean.d"
+
RUN_CLOUD_CONFIG = "/run/cloud-init/cloud.cfg"
# What u get if no config is provided
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index e7a0407c..5aea0c5c 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -12,9 +12,9 @@ import os.path
import re
import xml.etree.ElementTree as ET
from enum import Enum
+from pathlib import Path
from time import sleep, time
from typing import Any, Dict, List, Optional
-from xml.dom import minidom
import requests
@@ -34,6 +34,10 @@ from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE,
DEFAULT_WIRESERVER_ENDPOINT,
+ BrokenAzureDataSource,
+ ChassisAssetTag,
+ NonAzureDataSource,
+ OvfEnvXml,
azure_ds_reporter,
azure_ds_telemetry_reporter,
build_minimal_ovf,
@@ -58,8 +62,6 @@ DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
# ensures that it gets linked to this path.
RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
DEFAULT_FS = "ext4"
-# DMI chassis-asset-tag is set static for all azure instances
-AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
AGENT_SEED_DIR = "/var/lib/waagent"
DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
@@ -305,6 +307,20 @@ DEF_EPHEMERAL_LABEL = "Temporary Storage"
DEF_PASSWD_REDACTION = "REDACTED"
+@azure_ds_telemetry_reporter
+def is_platform_viable(seed_dir: Optional[Path]) -> bool:
+ """Check platform environment to report if this datasource may run."""
+ chassis_tag = ChassisAssetTag.query_system()
+ if chassis_tag is not None:
+ return True
+
+ # If no valid chassis tag, check for seeded ovf-env.xml.
+ if seed_dir is None:
+ return False
+
+ return (seed_dir / "ovf-env.xml").exists()
+
+
class DataSourceAzure(sources.DataSource):
dsname = "Azure"
@@ -665,10 +681,6 @@ class DataSourceAzure(sources.DataSource):
return crawled_data
- def _is_platform_viable(self):
- """Check platform environment to report if this datasource may run."""
- return _is_platform_viable(self.seed_dir)
-
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached class attributes to defaults."""
super(DataSourceAzure, self).clear_cached_attrs(attr_defaults)
@@ -681,7 +693,7 @@ class DataSourceAzure(sources.DataSource):
@return: True on success, False on error, invalid or disabled
datasource.
"""
- if not self._is_platform_viable():
+ if not is_platform_viable(Path(self.seed_dir)):
return False
try:
get_boot_telemetry()
@@ -1803,264 +1815,54 @@ def write_files(datadir, files, dirmode=None):
util.write_file(filename=fname, content=content, mode=0o600)
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-@azure_ds_telemetry_reporter
-def load_azure_ovf_pubkeys(sshnode):
- # This parses a 'SSH' node formatted like below, and returns
- # an array of dicts.
- # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
- # 'path': '/where/to/go'}]
- #
- # <SSH><PublicKeys>
- # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/x/y/z</Path>
- # ...
- # </PublicKeys></SSH>
- # Under some circumstances, there may be a <Value> element along with the
- # Fingerprint and Path. Pass those along if they appear.
- results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
- if len(results) == 0:
- return []
- if len(results) > 1:
- raise BrokenAzureDataSource(
- "Multiple 'PublicKeys'(%s) in SSH node" % len(results)
- )
-
- pubkeys_node = results[0]
- pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
-
- if len(pubkeys) == 0:
- return []
-
- found = []
- text_node = minidom.Document.TEXT_NODE
-
- for pk_node in pubkeys:
- if not pk_node.hasChildNodes():
- continue
-
- cur = {"fingerprint": "", "path": "", "value": ""}
- for child in pk_node.childNodes:
- if child.nodeType == text_node or not child.localName:
- continue
-
- name = child.localName.lower()
-
- if name not in cur.keys():
- continue
-
- if (
- len(child.childNodes) != 1
- or child.childNodes[0].nodeType != text_node
- ):
- continue
-
- cur[name] = child.childNodes[0].wholeText.strip()
- found.append(cur)
-
- return found
-
-
@azure_ds_telemetry_reporter
def read_azure_ovf(contents):
- try:
- dom = minidom.parseString(contents)
- except Exception as e:
- error_str = "Invalid ovf-env.xml: %s" % e
- report_diagnostic_event(error_str, logger_func=LOG.warning)
- raise BrokenAzureDataSource(error_str) from e
-
- results = find_child(
- dom.documentElement, lambda n: n.localName == "ProvisioningSection"
- )
-
- if len(results) == 0:
- raise NonAzureDataSource("No ProvisioningSection")
- if len(results) > 1:
- raise BrokenAzureDataSource(
- "found '%d' ProvisioningSection items" % len(results)
- )
- provSection = results[0]
+ """Parse OVF XML contents.
- lpcs_nodes = find_child(
- provSection,
- lambda n: n.localName == "LinuxProvisioningConfigurationSet",
- )
-
- if len(lpcs_nodes) == 0:
- raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
- if len(lpcs_nodes) > 1:
- raise BrokenAzureDataSource(
- "found '%d' %ss"
- % (len(lpcs_nodes), "LinuxProvisioningConfigurationSet")
- )
- lpcs = lpcs_nodes[0]
-
- if not lpcs.hasChildNodes():
- raise BrokenAzureDataSource("no child nodes of configuration set")
+ :return: Tuple of metadata, configuration, userdata dicts.
+ :raises NonAzureDataSource: if XML is not in Azure's format.
+ :raises BrokenAzureDataSource: if XML is unparseable or invalid.
+ """
+ ovf_env = OvfEnvXml.parse_text(contents)
md: Dict[str, Any] = {}
cfg = {}
- ud = ""
- password = None
- username = None
+ ud = ovf_env.custom_data or ""
- for child in lpcs.childNodes:
- if child.nodeType == dom.TEXT_NODE or not child.localName:
- continue
+ if ovf_env.hostname:
+ md["local-hostname"] = ovf_env.hostname
- name = child.localName.lower()
+ if ovf_env.public_keys:
+ cfg["_pubkeys"] = ovf_env.public_keys
- value = ""
- if (
- len(child.childNodes) == 1
- and child.childNodes[0].nodeType == dom.TEXT_NODE
- ):
- value = child.childNodes[0].wholeText
-
- if name == "customdata":
- ud = base64.b64decode("".join(value.split()))
- elif name == "username":
- username = value
- elif name == "userpassword":
- password = value
- elif name == "hostname":
- md["local-hostname"] = value
- elif name == "ssh":
- cfg["_pubkeys"] = load_azure_ovf_pubkeys(child)
- elif name == "disablesshpasswordauthentication":
- cfg["ssh_pwauth"] = util.is_false(value)
+ if ovf_env.disable_ssh_password_auth is not None:
+ cfg["ssh_pwauth"] = not ovf_env.disable_ssh_password_auth
+ elif ovf_env.password:
+ cfg["ssh_pwauth"] = True
defuser = {}
- if username:
- defuser["name"] = username
- if password:
+ if ovf_env.username:
+ defuser["name"] = ovf_env.username
+ if ovf_env.password:
defuser["lock_passwd"] = False
- if DEF_PASSWD_REDACTION != password:
- defuser["passwd"] = cfg["password"] = encrypt_pass(password)
+ if DEF_PASSWD_REDACTION != ovf_env.password:
+ defuser["hashed_passwd"] = encrypt_pass(ovf_env.password)
if defuser:
cfg["system_info"] = {"default_user": defuser}
- if "ssh_pwauth" not in cfg and password:
- cfg["ssh_pwauth"] = True
-
- preprovisioning_cfg = _get_preprovisioning_cfgs(dom)
- cfg = util.mergemanydict([cfg, preprovisioning_cfg])
-
- return (md, ud, cfg)
-
-
-@azure_ds_telemetry_reporter
-def _get_preprovisioning_cfgs(dom):
- """Read the preprovisioning related flags from ovf and populates a dict
- with the info.
-
- Two flags are in use today: PreprovisionedVm bool and
- PreprovisionedVMType enum. In the long term, the PreprovisionedVm bool
- will be deprecated in favor of PreprovisionedVMType string/enum.
-
- Only these combinations of values are possible today:
- - PreprovisionedVm=True and PreprovisionedVMType=Running
- - PreprovisionedVm=False and PreprovisionedVMType=Savable
- - PreprovisionedVm is missing and PreprovisionedVMType=Running/Savable
- - PreprovisionedVm=False and PreprovisionedVMType is missing
-
- More specifically, this will never happen:
- - PreprovisionedVm=True and PreprovisionedVMType=Savable
- """
- cfg = {"PreprovisionedVm": False, "PreprovisionedVMType": None}
-
- platform_settings_section = find_child(
- dom.documentElement, lambda n: n.localName == "PlatformSettingsSection"
- )
- if not platform_settings_section or len(platform_settings_section) == 0:
- LOG.debug("PlatformSettingsSection not found")
- return cfg
- platform_settings = find_child(
- platform_settings_section[0],
- lambda n: n.localName == "PlatformSettings",
- )
- if not platform_settings or len(platform_settings) == 0:
- LOG.debug("PlatformSettings not found")
- return cfg
-
- # Read the PreprovisionedVm bool flag. This should be deprecated when the
- # platform has removed PreprovisionedVm and only surfaces
- # PreprovisionedVMType.
- cfg["PreprovisionedVm"] = _get_preprovisionedvm_cfg_value(
- platform_settings
- )
-
- cfg["PreprovisionedVMType"] = _get_preprovisionedvmtype_cfg_value(
- platform_settings
- )
- return cfg
-
-
-@azure_ds_telemetry_reporter
-def _get_preprovisionedvm_cfg_value(platform_settings):
- preprovisionedVm = False
-
- # Read the PreprovisionedVm bool flag. This should be deprecated when the
- # platform has removed PreprovisionedVm and only surfaces
- # PreprovisionedVMType.
- preprovisionedVmVal = find_child(
- platform_settings[0], lambda n: n.localName == "PreprovisionedVm"
- )
- if not preprovisionedVmVal or len(preprovisionedVmVal) == 0:
- LOG.debug("PreprovisionedVm not found")
- return preprovisionedVm
- preprovisionedVm = util.translate_bool(
- preprovisionedVmVal[0].firstChild.nodeValue
- )
-
+ cfg["PreprovisionedVm"] = ovf_env.preprovisioned_vm
report_diagnostic_event(
- "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info
+ "PreprovisionedVm: %s" % ovf_env.preprovisioned_vm,
+ logger_func=LOG.info,
)
- return preprovisionedVm
-
-
-@azure_ds_telemetry_reporter
-def _get_preprovisionedvmtype_cfg_value(platform_settings):
- preprovisionedVMType = None
-
- # Read the PreprovisionedVMType value from the ovf. It can be
- # 'Running' or 'Savable' or not exist. This enum value is intended to
- # replace PreprovisionedVm bool flag in the long term.
- # A Running VM is the same as preprovisioned VMs of today. This is
- # equivalent to having PreprovisionedVm=True.
- # A Savable VM is one whose nic is hot-detached immediately after it
- # reports ready the first time to free up the network resources.
- # Once assigned to customer, the customer-requested nics are
- # hot-attached to it and reprovision happens like today.
- preprovisionedVMTypeVal = find_child(
- platform_settings[0], lambda n: n.localName == "PreprovisionedVMType"
- )
- if (
- not preprovisionedVMTypeVal
- or len(preprovisionedVMTypeVal) == 0
- or preprovisionedVMTypeVal[0].firstChild is None
- ):
- LOG.debug("PreprovisionedVMType not found")
- return preprovisionedVMType
-
- preprovisionedVMType = preprovisionedVMTypeVal[0].firstChild.nodeValue
-
+ cfg["PreprovisionedVMType"] = ovf_env.preprovisioned_vm_type
report_diagnostic_event(
- "PreprovisionedVMType: %s" % preprovisionedVMType, logger_func=LOG.info
+ "PreprovisionedVMType: %s" % ovf_env.preprovisioned_vm_type,
+ logger_func=LOG.info,
)
-
- return preprovisionedVMType
+ return (md, ud, cfg)
def encrypt_pass(password, salt_id="$6$"):
@@ -2346,32 +2148,6 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
util.del_file(path)
-def _is_platform_viable(seed_dir):
- """Check platform environment to report if this datasource may run."""
- with events.ReportEventStack(
- name="check-platform-viability",
- description="found azure asset tag",
- parent=azure_ds_reporter,
- ) as evt:
- asset_tag = dmi.read_dmi_data("chassis-asset-tag")
- if asset_tag == AZURE_CHASSIS_ASSET_TAG:
- return True
- msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
- evt.description = msg
- report_diagnostic_event(msg, logger_func=LOG.debug)
- if os.path.exists(os.path.join(seed_dir, "ovf-env.xml")):
- return True
- return False
-
-
-class BrokenAzureDataSource(Exception):
- pass
-
-
-class NonAzureDataSource(Exception):
- pass
-
-
# Legacy: Must be present in case we load an old pkl object
DataSourceAzureNet = DataSourceAzure
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 4bb8b8db..a2afbaad 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
+import enum
import json
import logging
import os
@@ -7,16 +8,16 @@ import re
import socket
import struct
import textwrap
-import time
import zlib
from contextlib import contextmanager
from datetime import datetime
from errno import ENOENT
+from time import sleep, time
from typing import List, Optional
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import distros, subp, temp_utils, url_helper, util, version
+from cloudinit import distros, dmi, subp, temp_utils, url_helper, util, version
from cloudinit.reporting import events
from cloudinit.settings import CFG_BUILTIN
@@ -99,7 +100,7 @@ def get_boot_telemetry():
LOG.debug("Collecting boot telemetry")
try:
- kernel_start = float(time.time()) - float(util.uptime())
+ kernel_start = float(time()) - float(util.uptime())
except ValueError as e:
raise RuntimeError("Failed to determine kernel start timestamp") from e
@@ -331,45 +332,49 @@ def get_ip_from_lease_value(fallback_lease_value):
@azure_ds_telemetry_reporter
def http_with_retries(
- url: str, *, headers: dict, data: Optional[str] = None
+ url: str,
+ *,
+ headers: dict,
+ data: Optional[str] = None,
+ retry_sleep: int = 5,
+ timeout_minutes: int = 20,
) -> url_helper.UrlResponse:
"""Readurl wrapper for querying wireserver.
- Retries up to 40 minutes:
- 240 attempts * (5s timeout + 5s sleep)
+ :param retry_sleep: Time to sleep before retrying.
+ :param timeout_minutes: Retry up to specified number of minutes.
+ :raises UrlError: on error fetching data.
"""
- max_readurl_attempts = 240
- readurl_timeout = 5
- sleep_duration_between_retries = 5
- periodic_logging_attempts = 12
+ timeout = timeout_minutes * 60 + time()
- for attempt in range(1, max_readurl_attempts + 1):
+ attempt = 0
+ response = None
+ while not response:
+ attempt += 1
try:
- ret = url_helper.readurl(
- url, headers=headers, data=data, timeout=readurl_timeout
+ response = url_helper.readurl(
+ url, headers=headers, data=data, timeout=(5, 60)
)
-
+ break
+ except url_helper.UrlError as e:
report_diagnostic_event(
- "Successful HTTP request with Azure endpoint %s after "
- "%d attempts" % (url, attempt),
+ "Failed HTTP request with Azure endpoint %s during "
+ "attempt %d with exception: %s (code=%r headers=%r)"
+ % (url, attempt, e, e.code, e.headers),
logger_func=LOG.debug,
)
-
- return ret
-
- except Exception as e:
- if attempt % periodic_logging_attempts == 0:
- report_diagnostic_event(
- "Failed HTTP request with Azure endpoint %s during "
- "attempt %d with exception: %s" % (url, attempt, e),
- logger_func=LOG.debug,
- )
- if attempt == max_readurl_attempts:
+ # Raise exception if we're out of time.
+ if time() + retry_sleep >= timeout:
raise
- time.sleep(sleep_duration_between_retries)
+ sleep(retry_sleep)
- raise RuntimeError("Failed to return in http_with_retries")
+ report_diagnostic_event(
+ "Successful HTTP request with Azure endpoint %s after "
+ "%d attempts" % (url, attempt),
+ logger_func=LOG.debug,
+ )
+ return response
def build_minimal_ovf(
@@ -777,11 +782,11 @@ class GoalStateHealthReporter:
# KVP messages that are published after the Azure Host receives the
# signal are ignored and unprocessed, so yield this thread to the
# Hyper-V KVP Reporting thread so that they are written.
- # time.sleep(0) is a low-cost and proven method to yield the scheduler
+ # sleep(0) is a low-cost and proven method to yield the scheduler
# and ensure that events are flushed.
# See HyperVKvpReportingHandler class, which is a multi-threaded
# reporting handler that writes to the special KVP files.
- time.sleep(0)
+ sleep(0)
LOG.debug("Sending health report to Azure fabric.")
url = "http://{}/machine?comp=health".format(self._endpoint)
@@ -1055,4 +1060,239 @@ def dhcp_log_cb(out, err):
)
+class BrokenAzureDataSource(Exception):
+ pass
+
+
+class NonAzureDataSource(Exception):
+ pass
+
+
+class ChassisAssetTag(enum.Enum):
+ AZURE_CLOUD = "7783-7084-3265-9085-8269-3286-77"
+
+ @classmethod
+ def query_system(cls) -> Optional["ChassisAssetTag"]:
+ """Check platform environment to report if this datasource may run.
+
+ :returns: ChassisAssetTag if matching tag found, else None.
+ """
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
+ try:
+ tag = cls(asset_tag)
+ except ValueError:
+ report_diagnostic_event(
+ "Non-Azure chassis asset tag: %r" % asset_tag,
+ logger_func=LOG.debug,
+ )
+ return None
+
+ report_diagnostic_event(
+ "Azure chassis asset tag: %r (%s)" % (asset_tag, tag.name),
+ logger_func=LOG.debug,
+ )
+ return tag
+
+
+class OvfEnvXml:
+ NAMESPACES = {
+ "ovf": "http://schemas.dmtf.org/ovf/environment/1",
+ "wa": "http://schemas.microsoft.com/windowsazure",
+ }
+
+ def __init__(
+ self,
+ *,
+ username: Optional[str] = None,
+ password: Optional[str] = None,
+ hostname: Optional[str] = None,
+ custom_data: Optional[bytes] = None,
+ disable_ssh_password_auth: Optional[bool] = None,
+ public_keys: Optional[List[dict]] = None,
+ preprovisioned_vm: bool = False,
+ preprovisioned_vm_type: Optional[str] = None,
+ ) -> None:
+ self.username = username
+ self.password = password
+ self.hostname = hostname
+ self.custom_data = custom_data
+ self.disable_ssh_password_auth = disable_ssh_password_auth
+ self.public_keys: List[dict] = public_keys or []
+ self.preprovisioned_vm = preprovisioned_vm
+ self.preprovisioned_vm_type = preprovisioned_vm_type
+
+ def __eq__(self, other) -> bool:
+ return self.__dict__ == other.__dict__
+
+ @classmethod
+ def parse_text(cls, ovf_env_xml: str) -> "OvfEnvXml":
+ """Parser for ovf-env.xml data.
+
+ :raises NonAzureDataSource: if XML is not in Azure's format.
+ :raises BrokenAzureDataSource: if XML is unparseable or invalid.
+ """
+ try:
+ root = ElementTree.fromstring(ovf_env_xml)
+ except ElementTree.ParseError as e:
+ error_str = "Invalid ovf-env.xml: %s" % e
+ raise BrokenAzureDataSource(error_str) from e
+
+ # If there's no provisioning section, it's not Azure ovf-env.xml.
+ if not root.find("./wa:ProvisioningSection", cls.NAMESPACES):
+ raise NonAzureDataSource(
+ "Ignoring non-Azure ovf-env.xml: ProvisioningSection not found"
+ )
+
+ instance = OvfEnvXml()
+ instance._parse_linux_configuration_set_section(root)
+ instance._parse_platform_settings_section(root)
+
+ return instance
+
+ def _find(
+ self,
+ node,
+ name: str,
+ required: bool,
+ namespace: str = "wa",
+ ):
+ matches = node.findall(
+ "./%s:%s" % (namespace, name), OvfEnvXml.NAMESPACES
+ )
+ if len(matches) == 0:
+ msg = "No ovf-env.xml configuration for %r" % name
+ LOG.debug(msg)
+ if required:
+ raise BrokenAzureDataSource(msg)
+ return None
+ elif len(matches) > 1:
+ raise BrokenAzureDataSource(
+ "Multiple configuration matches in ovf-exml.xml for %r (%d)"
+ % (name, len(matches))
+ )
+
+ return matches[0]
+
+ def _parse_property(
+ self,
+ node,
+ name: str,
+ required: bool,
+ decode_base64: bool = False,
+ parse_bool: bool = False,
+ default=None,
+ ):
+ matches = node.findall("./wa:" + name, OvfEnvXml.NAMESPACES)
+ if len(matches) == 0:
+ msg = "No ovf-env.xml configuration for %r" % name
+ LOG.debug(msg)
+ if required:
+ raise BrokenAzureDataSource(msg)
+ return default
+ elif len(matches) > 1:
+ raise BrokenAzureDataSource(
+ "Multiple configuration matches in ovf-exml.xml for %r (%d)"
+ % (name, len(matches))
+ )
+
+ value = matches[0].text
+
+ # Empty string may be None.
+ if value is None:
+ value = default
+
+ if decode_base64 and value is not None:
+ value = base64.b64decode("".join(value.split()))
+
+ if parse_bool:
+ value = util.translate_bool(value)
+
+ return value
+
+ def _parse_linux_configuration_set_section(self, root):
+ provisioning_section = self._find(
+ root, "ProvisioningSection", required=True
+ )
+ config_set = self._find(
+ provisioning_section,
+ "LinuxProvisioningConfigurationSet",
+ required=True,
+ )
+
+ self.custom_data = self._parse_property(
+ config_set,
+ "CustomData",
+ decode_base64=True,
+ required=False,
+ )
+ self.username = self._parse_property(
+ config_set, "UserName", required=True
+ )
+ self.password = self._parse_property(
+ config_set, "UserPassword", required=False
+ )
+ self.hostname = self._parse_property(
+ config_set, "HostName", required=True
+ )
+ self.disable_ssh_password_auth = self._parse_property(
+ config_set,
+ "DisableSshPasswordAuthentication",
+ parse_bool=True,
+ required=False,
+ )
+
+ self._parse_ssh_section(config_set)
+
+ def _parse_platform_settings_section(self, root):
+ platform_settings_section = self._find(
+ root, "PlatformSettingsSection", required=True
+ )
+ platform_settings = self._find(
+ platform_settings_section, "PlatformSettings", required=True
+ )
+
+ self.preprovisioned_vm = self._parse_property(
+ platform_settings,
+ "PreprovisionedVm",
+ parse_bool=True,
+ default=False,
+ required=False,
+ )
+ self.preprovisioned_vm_type = self._parse_property(
+ platform_settings,
+ "PreprovisionedVMType",
+ required=False,
+ )
+
+ def _parse_ssh_section(self, config_set):
+ self.public_keys = []
+
+ ssh_section = self._find(config_set, "SSH", required=False)
+ if ssh_section is None:
+ return
+
+ public_keys_section = self._find(
+ ssh_section, "PublicKeys", required=False
+ )
+ if public_keys_section is None:
+ return
+
+ for public_key in public_keys_section.findall(
+ "./wa:PublicKey", OvfEnvXml.NAMESPACES
+ ):
+ fingerprint = self._parse_property(
+ public_key, "Fingerprint", required=False
+ )
+ path = self._parse_property(public_key, "Path", required=False)
+ value = self._parse_property(
+ public_key, "Value", default="", required=False
+ )
+ ssh_key = {
+ "fingerprint": fingerprint,
+ "path": path,
+ "value": value,
+ }
+ self.public_keys.append(ssh_key)
+
+
# vi: ts=4 expandtab
diff --git a/config/clean.d/README b/config/clean.d/README
new file mode 100644
index 00000000..9b0feebe
--- /dev/null
+++ b/config/clean.d/README
@@ -0,0 +1,18 @@
+-- cloud-init's clean.d run-parts directory --
+
+This directory is provided for third party applications which need
+additional configuration artifact cleanup from the filesystem when
+the command `cloud-init clean` is invoked.
+
+The `cloud-init clean` operation is typically performed by image creators
+when preparing a golden image for clone and redeployment. The clean command
+removes any cloud-init semaphores, allowing cloud-init to treat the next
+boot of this image as the "first boot". When the image is next booted
+cloud-init will performing all initial configuration based on any valid
+datasource meta-data and user-data.
+
+Any executable scripts in this subdirectory will be invoked in lexicographical
+order with run-parts by the command: sudo cloud-init clean.
+
+Typical format of such scripts would be a ##-<some-app> like the following:
+ /etc/cloud/clean.d/99-live-installer
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 8c9b8398..5be80f53 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -114,6 +114,9 @@ cloud_config_modules:
{% if variant in ["ubuntu", "unknown", "debian"] %}
- snap
{% endif %}
+{% if variant in ["ubuntu"] %}
+ - ubuntu_autoinstall
+{% endif %}
{% if variant not in ["photon"] %}
- ssh-import-id
{% if variant not in ["rhel"] %}
@@ -212,6 +215,7 @@ system_info:
{# SRU_BLOCKER: do not ship network renderers on Xenial, Bionic or Eoan #}
network:
renderers: ['netplan', 'eni', 'sysconfig']
+ activators: ['netplan', 'eni', 'network-manager', 'networkd']
# Automatically discover the best ntp_client
ntp_client: auto
# Other config here will be given to the distro class and/or path classes
@@ -256,6 +260,10 @@ system_info:
name: cloud-user
lock_passwd: true
gecos: Cloud User
+{% elif variant == "openmandriva" %}
+ name: omv
+ lock_passwd: True
+ gecos: OpenMandriva admin
{% else %}
name: {{ variant }}
lock_passwd: True
diff --git a/debian/changelog b/debian/changelog
index 74fcfcef..5cccfb9f 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,56 @@
+cloud-init (22.2-115-g6e498773-0ubuntu1~22.10.1) kinetic; urgency=medium
+
+ * d/control: lintian fixes:
+ + Drop deprecated keys: XS-Python-Version and XB-Python-Version.
+ * d/cloud-init.lintian-overrides: lintian fixes:
+ + Fix systemd-service-file-refers-to-unusual-wantedby-target format.
+ * d/cloud-init.postinst: lintian fixes:
+ + Fix command-with-path-in-maintainer-script for grub-install.
+ * d/source/lintian-overrides: lintian fixes:
+ + silence binary-nmu-debian-revision-in-source bug:
+ https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1014584
+ * d/gbp_format_changelog: add alberto as filtered contributor
+ * New upstream snapshot.
+ + network: add system_info network activator cloud.cfg overrides (#1619)
+ (LP: #1982857)
+ + lxd: lvm avoid thinpool when kernel module absent
+ + lxd: enable MTU configuration in cloud-init
+ + cc_set_passwords fixes (#1590)
+ + Modernise importer.py and type ModuleDetails (#1605)
+ + config: Def activate_by_schema_keys for t-z (#1613)
+ + config: define activate_by_schema_keys for p-r mods (#1611)
+ + clean: add param to remove /etc/machine-id for golden image creation
+ + config: define `activate_by_schema_keys` for a-f mods (#1608)
+ + config: define activate_by_schema_keys for s mods (#1612)
+ + config: Define activate_by_schema_keys for g-n mods (#1610)
+ + meta-schema: add infra to skip inapplicable modules
+ + sources/azure: don't set cfg["password"] for default user pw (#1592)
+ [Chris Patterson]
+ + schema: activate grub-dpkg deprecations (#1600)
+ + cc_lxd: Add btrfs and lvm lxd storage options (SC-1026) (#1585)
+ + cc_ubuntu_autoinstall: support live-installer autoinstall config
+ + clean: allow third party cleanup scripts in /etc/cloud/clean.d (#1581)
+ + sources/azure: refactor chassis asset tag handling (#1574)
+ [Chris Patterson]
+ + bsd: Don't assume that root user is in root group (#1587)
+ + Update govc command in VMWare walkthrough (#1576) [manioo8]
+ + sources/azure: increase read-timeout to 60 seconds for wireserver
+ (#1571) [Chris Patterson]
+ + Resource leak cleanup (#1556)
+ + Fix expire passwords for hashed passwords (#1577) (LP: #1979065)
+ [Sadegh Hayeri]
+ + mounts: fix suggested_swapsize for > 64GB hosts (#1569)
+ [Steven Stallion]
+ + Update chpasswd schema to deprecate password parsing (#1517)
+ + Schema deprecation handling (#1549)
+ + sources/azure: refactor ovf-env.xml parsing (#1550) [Chris Patterson]
+ + schema: Force stricter validation (#1547)
+ + ubuntu advantage config: http_proxy, https_proxy (#1512)
+ [Fabian Lichtenegger-Lukas]
+ + net: fix interface matching support (#1552) (LP: #1979877)
+
+ -- Alberto Contreras <alberto.contreras@canonical.com> Fri, 29 Jul 2022 18:51:43 +0200
+
cloud-init (22.2-64-g1fcd55d6-0ubuntu1~22.10.1) kinetic; urgency=medium
* d/control: add python3-debconf as Depends and Build-Depends
diff --git a/debian/cloud-init.lintian-overrides b/debian/cloud-init.lintian-overrides
index 45037735..2176cba2 100644
--- a/debian/cloud-init.lintian-overrides
+++ b/debian/cloud-init.lintian-overrides
@@ -1,5 +1,7 @@
-cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/cloud-config.service cloud-init.target
-cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/cloud-final.service cloud-init.target
-cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/cloud-init-local.service cloud-init.target
-cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target lib/systemd/system/cloud-init.service cloud-init.target
+# cloud-init.target should not be considered as unusual
+cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target cloud-init.target [lib/systemd/system/cloud-config.service]
+cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target cloud-init.target [lib/systemd/system/cloud-final.service]
+cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target cloud-init.target [lib/systemd/system/cloud-init-local.service]
+cloud-init binary: systemd-service-file-refers-to-unusual-wantedby-target cloud-init.target [lib/systemd/system/cloud-init.service]
+
cloud-init binary: package-supports-alternative-init-but-no-init.d-script
diff --git a/debian/cloud-init.postinst b/debian/cloud-init.postinst
index f7926c91..268f3ec8 100644
--- a/debian/cloud-init.postinst
+++ b/debian/cloud-init.postinst
@@ -140,7 +140,7 @@ fix_1336855() {
[ -r /proc/cmdline ] || return 0
# Don't do anything unless we have grub
- command -v grub-install > /dev/null 2>&1 || return 0
+ command -v grub-install > /dev/null || return 0
# First, identify the kernel device for the parent.
for parm in $(cat /proc/cmdline); do
@@ -309,7 +309,7 @@ fix_lp1889555() {
[ -f /var/lib/cloud/instance/sem/config_grub_dpkg ] || return 0
# Don't do anything unless we have grub
- [ -x /usr/sbin/grub-install ] || return 0
+ command -v grub-install > /dev/null || return 0
# Make sure that we are not chrooted.
[ "$(stat -c %d:%i /)" != "$(stat -c %d:%i /proc/1/root/.)" ] && return 0
diff --git a/debian/control b/debian/control
index ead20100..a4c9e9a7 100644
--- a/debian/control
+++ b/debian/control
@@ -25,7 +25,6 @@ Build-Depends: debhelper-compat (= 13),
python3-setuptools,
python3-yaml,
python3-responses
-XS-Python-Version: all
Vcs-Browser: https://github.com/canonical/cloud-init/tree/ubuntu/devel
Vcs-Git: https://github.com/canonical/cloud-init -b ubuntu/devel
Standards-Version: 4.5.0
@@ -47,7 +46,6 @@ Depends: cloud-guest-utils | cloud-utils,
${python3:Depends}
Recommends: eatmydata, gdisk, gnupg, software-properties-common
Suggests: openssh-server, ssh-import-id
-XB-Python-Version: ${python:Versions}
Description: initialization and customization tool for cloud instances
Cloud-init is the industry standard multi-distribution method for
cross-platform cloud instance initialization. It is supported across all major
diff --git a/debian/gbp_format_changelog b/debian/gbp_format_changelog
index 62a871a7..42b89da1 100755
--- a/debian/gbp_format_changelog
+++ b/debian/gbp_format_changelog
@@ -22,6 +22,7 @@ FILTER_UPSTREAM_COMMITERS = ( # cloud-init upstream author names
"Chad Smith",
"James Falcon",
"Brett Holman",
+ "Alberto Contreras",
)
FILTER_NOISY_COMMIT_REGEX = (
diff --git a/debian/source/lintian-overrides b/debian/source/lintian-overrides
new file mode 100644
index 00000000..5b8bf5cb
--- /dev/null
+++ b/debian/source/lintian-overrides
@@ -0,0 +1,2 @@
+# Silence lintian bug: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1014584
+cloud-init source: binary-nmu-debian-revision-in-source
diff --git a/doc-requirements.txt b/doc-requirements.txt
index 38c943e1..3207e6c6 100644
--- a/doc-requirements.txt
+++ b/doc-requirements.txt
@@ -1,4 +1,5 @@
-doc8
+# doc8 1.0.0 depends on docutils 0.18.1 or later which added Node.findall()
+doc8==0.11.2
m2r2
sphinx==4.3.0
sphinx_rtd_theme==1.0.0
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index efeae625..dd6a0f6a 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -35,7 +35,7 @@ apt_pipelining: False
#
# Default: none
#
-# if packages are specified, this package_update will be set to true
+# if packages are specified, then package_update will be set to true
packages: ['pastebinit']
diff --git a/doc/examples/cloud-config-install-packages.txt b/doc/examples/cloud-config-install-packages.txt
index 8bd9b74f..ea1e5743 100644
--- a/doc/examples/cloud-config-install-packages.txt
+++ b/doc/examples/cloud-config-install-packages.txt
@@ -4,7 +4,7 @@
#
# Default: none
#
-# if packages are specified, this package_update will be set to true
+# if packages are specified, then package_update will be set to true
#
# packages may be supplied as a single package name or as a list
# with the format [<package>, <version>] wherein the specific
diff --git a/doc/examples/cloud-config-lxd.txt b/doc/examples/cloud-config-lxd.txt
index e96f314b..512b3f08 100644
--- a/doc/examples/cloud-config-lxd.txt
+++ b/doc/examples/cloud-config-lxd.txt
@@ -7,7 +7,7 @@
# init: dict of options for lxd init, see 'man lxd'
# network_address: address for lxd to listen on
# network_port: port for lxd to listen on
-# storage_backend: either 'zfs' or 'dir'
+# storage_backend: 'zfs', 'dir', 'lvm', or 'btrfs'
# storage_create_device: device based storage using specified device
# storage_create_loop: set up loop based storage with size in GB
# storage_pool: name of storage pool to use or create
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 177c5600..7f4ded8c 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -428,10 +428,15 @@ syslog_fix_perms: syslog:root
# to set hashed password, here account 'user3' has a password it set to
# 'cloud-init', hashed with SHA-256:
# chpasswd:
-# list: |
-# user1:password1
-# user2:RANDOM
-# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
+# users:
+# - name: user1
+# password: password1
+# type: text
+# - user2
+# type: RANDOM
+# - user3
+# password: $5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
+# type: hash
# expire: True
# ssh_pwauth: [ True, False, "" or "unchanged" ]
#
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index d8ca9d16..aa978237 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -18,7 +18,7 @@ Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD,
OpenBSD and DragonFlyBSD:
- Alpine Linux
-- ArchLinux
+- Arch Linux
- Debian
- DragonFlyBSD
- Fedora
diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst
index ee3828de..c66048e2 100644
--- a/doc/rtd/topics/bugs.rst
+++ b/doc/rtd/topics/bugs.rst
@@ -88,11 +88,11 @@ SUSE & openSUSE
To file a bug against the SuSE packages of cloud-init please use the
`SUSE bugzilla`_.
-Arch
-----
+Arch Linux
+----------
To file a bug against the Arch package of cloud-init please use the
-`Arch Linux Bugtracker`_. See the `Arch bug reporting wiki`_ for more
+`Arch Linux Bugtracker`_. See the `Arch Linux bug reporting wiki`_ for more
details.
.. _Create a Launchpad account: https://help.launchpad.net/YourAccount/NewAccount
@@ -103,6 +103,6 @@ details.
.. _Red Hat bugzilla: https://bugzilla.redhat.com/
.. _SUSE bugzilla: https://bugzilla.suse.com/index.cgi
.. _Arch Linux Bugtracker: https://bugs.archlinux.org/
-.. _Arch bug reporting wiki: https://wiki.archlinux.org/index.php/Bug_reporting_guidelines
+.. _Arch Linux bug reporting wiki: https://wiki.archlinux.org/index.php/Bug_reporting_guidelines
.. vi: textwidth=79
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index 2e209bb4..1a5f5e2d 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -67,6 +67,9 @@ instance. On reboot, cloud-init will re-run all stages as it did on first boot.
* ``--logs``: optionally remove all cloud-init log files in ``/var/log/``
* ``--reboot``: reboot the system after removing artifacts
+* ``--machine-id``: Remove ``/etc/machine-id`` on this image. Best practice
+ when cloning a golden image to ensure that the next boot of that image
+ auto-generates an unique machine ID. `More details on machine-id`_.
.. _cli_collect_logs:
@@ -323,3 +326,5 @@ the currently running modules, as well as when it is done.
time: Wed, 17 Jan 2018 20:41:59 +0000
detail:
DataSourceNoCloud [seed=/var/lib/cloud/seed/nocloud-net][dsmode=net]
+
+.. _More details on machine-id: https://www.freedesktop.org/software/systemd/man/machine-id.html
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
index 59cfc2f8..de3de6af 100644
--- a/doc/rtd/topics/datasources/vmware.rst
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -263,7 +263,7 @@ this datasource:
.. code-block:: bash
- cloud-init clean
+ cloud-init clean --logs --machine-id
Otherwise cloud-init may not run in first-boot mode. For more information
on how the boot mode is determined, please see the
@@ -311,7 +311,7 @@ this datasource:
.. code-block:: shell
- govc vm.power -vm "${VM}" -on
+ govc vm.power -on "${VM}"
If all went according to plan, the CentOS box is:
diff --git a/doc/rtd/topics/module_creation.rst b/doc/rtd/topics/module_creation.rst
index 070d411f..12cfdb00 100644
--- a/doc/rtd/topics/module_creation.rst
+++ b/doc/rtd/topics/module_creation.rst
@@ -34,6 +34,7 @@ Example
"description": MODULE_DESCRIPTION,
"distros": [ALL_DISTROS],
"frequency": PER_INSTANCE,
+ "activate_by_schema_keys": ["example_key, example_other_key"],
"examples": [
"example_key: example_value",
"example_other_key: ['value', 2]",
@@ -82,6 +83,10 @@ Guidelines
would be a significant change to the instance metadata. An example
could be an instance being moved to a different subnet.
+ * ``activate_by_schema_keys``: (Optional) List of cloud-config keys that will
+ activate this module. When this list not empty, the config module will be
+ skipped unless one of the ``activate_by_schema_keys`` are present in merged
+ cloud-config instance-data.
* ``examples``: Lists examples of any cloud-config keys this module reacts
to. These examples will be rendered in the module reference documentation
and will automatically be tested against the defined schema
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 682637c4..3e48555f 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -80,7 +80,8 @@ Disabling Network Activation
Some datasources may not be initialized until after network has been brought
up. In this case, cloud-init will attempt to bring up the interfaces specified
-by the datasource metadata.
+by the datasource metadata using a network activator discovered by
+`cloudinit.net.activators.select_activators`_.
This behavior can be disabled in the cloud-init configuration dictionary,
merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
@@ -215,6 +216,15 @@ network configuration for supported backends such as ``systemd-networkd`` and
Sysconfig format is used by RHEL, CentOS, Fedora and other derivatives.
+- **NetBSD, OpenBSD, FreeBSD**
+
+Network renders supporting BSD releases which typically write configuration to
+``/etc/rc.conf``. Unique to BSD renderers is that each renderer also calls
+something akin to `FreeBSD.start_services`_ which will invoke applicable
+network services to setup the network, making network activators unneeded
+for BSD flavors at the moment.
+
+
Network Output Policy
=====================
@@ -225,6 +235,18 @@ is as follows:
- Sysconfig
- Netplan
- NetworkManager
+- FreeBSD
+- NetBSD
+- OpenBSD
+- Networkd
+
+The default policy for selecting a network ``activator`` in order of preference
+is as follows:
+- ENI: using `ifup`, `ifdown` to manage device setup/teardown
+- Netplan: using `netplan apply` to manage device setup/teardown
+- NetworkManager: using `nmcli` to manage device setup/teardown
+- Networkd: using `ip` to manage device setup/teardown
+
When applying the policy, `Cloud-init`_ checks if the current instance has the
correct binaries and paths to support the renderer. The first renderer that
@@ -234,6 +256,7 @@ supplying an updated configuration in cloud-config. ::
system_info:
network:
renderers: ['netplan', 'network-manager', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
+ activators: ['eni', 'netplan', 'network-manager', 'networkd']
Network Configuration Tools
@@ -295,5 +318,7 @@ Example output converting V2 to sysconfig:
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
.. _Vultr JSON metadata: https://www.vultr.com/metadata/
+.. _cloudinit.net.activators.select_activators: https://github.com/canonical/cloud-init/blob/main/cloudinit/net/activators.py#L279
+.. _FreeBSD.start_services: https://github.com/canonical/cloud-init/blob/main/cloudinit/net/freebsd.py#L28
.. vi: textwidth=79
diff --git a/setup.py b/setup.py
index 53ebcb9b..470dd774 100644
--- a/setup.py
+++ b/setup.py
@@ -276,6 +276,7 @@ if not in_virtualenv():
data_files = [
(ETC + "/cloud", [render_tmpl("config/cloud.cfg.tmpl")]),
+ (ETC + "/cloud/clean.d", glob("config/clean.d/*")),
(ETC + "/cloud/cloud.cfg.d", glob("config/cloud.cfg.d/*")),
(ETC + "/cloud/templates", glob("templates/*")),
(
diff --git a/tests/hypothesis.py b/tests/hypothesis.py
new file mode 100644
index 00000000..def9de29
--- /dev/null
+++ b/tests/hypothesis.py
@@ -0,0 +1,20 @@
+try:
+ from hypothesis import given
+
+ HAS_HYPOTHESIS = True
+except ImportError:
+ HAS_HYPOTHESIS = False
+
+ from unittest import mock
+
+ def given(*_, **__): # type: ignore
+ """Dummy implementation to make pytest collection pass"""
+
+ @mock.Mock # Add mock to fulfill the expected hypothesis value
+ def run_test(item):
+ return item
+
+ return run_test
+
+
+__all__ = ["given", "HAS_HYPOTHESIS"]
diff --git a/tests/hypothesis_jsonschema.py b/tests/hypothesis_jsonschema.py
new file mode 100644
index 00000000..cce7a9da
--- /dev/null
+++ b/tests/hypothesis_jsonschema.py
@@ -0,0 +1,12 @@
+try:
+ from hypothesis_jsonschema import from_schema
+
+ HAS_HYPOTHESIS_JSONSCHEMA = True
+except ImportError:
+ HAS_HYPOTHESIS_JSONSCHEMA = False
+
+ def from_schema(*_, **__): # type: ignore
+ pass
+
+
+__all__ = ["from_schema", "HAS_HYPOTHESIS_JSONSCHEMA"]
diff --git a/tests/integration_tests/cmd/test_schema.py b/tests/integration_tests/cmd/test_schema.py
new file mode 100644
index 00000000..73adc2ac
--- /dev/null
+++ b/tests/integration_tests/cmd/test_schema.py
@@ -0,0 +1,66 @@
+"""Tests for `cloud-init status`"""
+from textwrap import dedent
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import verify_clean_log
+
+USER_DATA = """\
+#cloud-config
+apt_update: false
+apt_upgrade: false
+apt_reboot_if_required: false
+"""
+
+
+@pytest.mark.user_data(USER_DATA)
+class TestSchemaDeprecations:
+ def test_clean_log(self, class_client: IntegrationInstance):
+ log = class_client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log, ignore_deprecations=True)
+ assert "WARNING]: Deprecated cloud-config provided:" in log
+ assert "apt_reboot_if_required: DEPRECATED." in log
+ assert "apt_update: DEPRECATED." in log
+ assert "apt_upgrade: DEPRECATED." in log
+
+ def test_schema_deprecations(self, class_client: IntegrationInstance):
+ """Test schema behavior with deprecated configs."""
+ user_data_fn = "/root/user-data"
+ class_client.write_to_file(user_data_fn, USER_DATA)
+
+ result = class_client.execute(
+ f"cloud-init schema --config-file {user_data_fn}"
+ )
+ assert (
+ result.ok
+ ), "`schema` cmd must return 0 even with deprecated configs"
+ assert not result.stderr
+ assert "Cloud config schema deprecations:" in result.stdout
+ assert "apt_update: DEPRECATED" in result.stdout
+ assert "apt_upgrade: DEPRECATED" in result.stdout
+ assert "apt_reboot_if_required: DEPRECATED" in result.stdout
+
+ annotated_result = class_client.execute(
+ f"cloud-init schema --annotate --config-file {user_data_fn}"
+ )
+ assert (
+ annotated_result.ok
+ ), "`schema` cmd must return 0 even with deprecated configs"
+ assert not annotated_result.stderr
+ expected_output = dedent(
+ """\
+ #cloud-config
+ apt_update: false\t\t# D1
+ apt_upgrade: false\t\t# D2
+ apt_reboot_if_required: false\t\t# D3
+
+ # Deprecations: -------------
+ # D1: DEPRECATED. Dropped after April 2027. Use ``package_update``. Default: ``false``
+ # D2: DEPRECATED. Dropped after April 2027. Use ``package_upgrade``. Default: ``false``
+ # D3: DEPRECATED. Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``
+
+
+ Valid cloud-config: /root/user-data""" # noqa: E501
+ )
+ assert expected_output in annotated_result.stdout
diff --git a/tests/integration_tests/cmd/test_status.py b/tests/integration_tests/cmd/test_status.py
index f5a2d39c..2582855d 100644
--- a/tests/integration_tests/cmd/test_status.py
+++ b/tests/integration_tests/cmd/test_status.py
@@ -32,7 +32,9 @@ def _remove_nocloud_dir_and_reboot(client: IntegrationInstance):
# On Impish and below, NoCloud will be detected on an LXD container.
# If we remove this directory, it will no longer be detected.
client.execute("rm -rf /var/lib/cloud/seed/nocloud-net")
+ old_boot_id = client.instance.get_boot_id()
client.execute("cloud-init clean --logs --reboot")
+ client.instance._wait_for_execute(old_boot_id=old_boot_id)
@pytest.mark.ubuntu
@@ -60,7 +62,6 @@ def test_wait_when_no_datasource(session_cloud: IntegrationCloud, setup_image):
if ImageSpecification.from_os_image().release in [
"bionic",
"focal",
- "impish",
]:
_remove_nocloud_dir_and_reboot(client)
status_out = _wait_for_cloud_init(client).stdout.strip()
diff --git a/tests/integration_tests/datasources/test_lxd_discovery.py b/tests/integration_tests/datasources/test_lxd_discovery.py
index feae52a9..899ea935 100644
--- a/tests/integration_tests/datasources/test_lxd_discovery.py
+++ b/tests/integration_tests/datasources/test_lxd_discovery.py
@@ -113,7 +113,6 @@ def test_lxd_datasource_discovery(client: IntegrationInstance):
if ImageSpecification.from_os_image().release in [
"bionic",
"focal",
- "impish",
]:
# Assert NoCloud seed files are still present in non-Jammy images
# and that NoCloud seed files provide the same content as LXD socket.
diff --git a/tests/integration_tests/modules/test_ca_certs.py b/tests/integration_tests/modules/test_ca_certs.py
index 7247fd7d..37e5910b 100644
--- a/tests/integration_tests/modules/test_ca_certs.py
+++ b/tests/integration_tests/modules/test_ca_certs.py
@@ -10,6 +10,9 @@ import os.path
import pytest
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import get_inactive_modules, verify_clean_log
+
USER_DATA = """\
#cloud-config
ca_certs:
@@ -57,7 +60,7 @@ ca_certs:
@pytest.mark.ubuntu
@pytest.mark.user_data(USER_DATA)
class TestCaCerts:
- def test_certs_updated(self, class_client):
+ def test_certs_updated(self, class_client: IntegrationInstance):
"""Test that /etc/ssl/certs is updated as we expect."""
root = "/etc/ssl/certs"
filenames = class_client.execute(["ls", "-1", root]).splitlines()
@@ -79,7 +82,7 @@ class TestCaCerts:
== links["cloud-init-ca-certs.pem"]
)
- def test_cert_installed(self, class_client):
+ def test_cert_installed(self, class_client: IntegrationInstance):
"""Test that our specified cert has been installed"""
checksum = class_client.execute(
"sha256sum /etc/ssl/certs/ca-certificates.crt"
@@ -88,3 +91,38 @@ class TestCaCerts:
"78e875f18c73c1aab9167ae0bd323391e52222cc2dbcda42d129537219300062"
in checksum
)
+
+ def test_clean_logs(self, class_client: IntegrationInstance):
+ log = class_client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log, ignore_deprecations=False)
+ diff = {
+ "apt-pipelining",
+ "bootcmd",
+ "chef",
+ "disable-ec2-metadata",
+ "disk_setup",
+ "fan",
+ "keyboard",
+ "landscape",
+ "lxd",
+ "mcollective",
+ "ntp",
+ "package-update-upgrade-install",
+ "phone-home",
+ "power-state-change",
+ "puppet",
+ "rsyslog",
+ "runcmd",
+ "salt-minion",
+ "snap",
+ "timezone",
+ "ubuntu_autoinstall",
+ "ubuntu-advantage",
+ "ubuntu-drivers",
+ "update_etc_hosts",
+ "write-files",
+ "write-files-deferred",
+ }.symmetric_difference(get_inactive_modules(log))
+ assert (
+ not diff
+ ), f"Expected inactive modules do not match, diff: {diff}"
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 70850fd9..7e84626f 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -19,6 +19,7 @@ from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.decorators import retry
from tests.integration_tests.instances import IntegrationInstance
from tests.integration_tests.util import (
+ get_inactive_modules,
verify_clean_log,
verify_ordered_items_in_text,
)
@@ -173,7 +174,9 @@ class TestCombined:
assert timezone_output.strip() == "HDT"
def test_no_problems(self, class_client: IntegrationInstance):
- """Test no errors, warnings, or tracebacks"""
+ """Test no errors, warnings, deprecations, tracebacks or
+ inactive modules.
+ """
client = class_client
status_file = client.read_from_file("/run/cloud-init/status.json")
status_json = json.loads(status_file)["v1"]
@@ -184,7 +187,26 @@ class TestCombined:
assert result_json["errors"] == []
log = client.read_from_file("/var/log/cloud-init.log")
- verify_clean_log(log)
+ verify_clean_log(log, ignore_deprecations=False)
+ requested_modules = {
+ "apt_configure",
+ "apt_pipelining",
+ "byobu",
+ "final_message",
+ "locale",
+ "ntp",
+ "seed_random",
+ "rsyslog",
+ "runcmd",
+ "snap",
+ "ssh_import_id",
+ "timezone",
+ }
+ inactive_modules = get_inactive_modules(log)
+ assert not requested_modules.intersection(inactive_modules), (
+ f"Expected active modules:"
+ f" {requested_modules.intersection(inactive_modules)}"
+ )
def test_correct_datasource_detected(
self, class_client: IntegrationInstance
@@ -198,7 +220,6 @@ class TestCombined:
if ImageSpecification.from_os_image().release in [
"bionic",
"focal",
- "impish",
]:
datasource = "DataSourceNoCloud"
else:
@@ -276,7 +297,6 @@ class TestCombined:
if ImageSpecification.from_os_image().release not in [
"bionic",
"focal",
- "impish",
]:
cloud_name = "lxd"
subplatform = "LXD socket API v. 1.0 (/dev/lxd/sock)"
@@ -316,7 +336,6 @@ class TestCombined:
if ImageSpecification.from_os_image().release not in [
"bionic",
"focal",
- "impish",
]:
cloud_name = "lxd"
subplatform = "LXD socket API v. 1.0 (/dev/lxd/sock)"
diff --git a/tests/integration_tests/modules/test_lxd.py b/tests/integration_tests/modules/test_lxd.py
new file mode 100644
index 00000000..f4045425
--- /dev/null
+++ b/tests/integration_tests/modules/test_lxd.py
@@ -0,0 +1,101 @@
+"""Integration tests for LXD bridge creation.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.)
+"""
+import re
+import warnings
+
+import pytest
+import yaml
+
+from tests.integration_tests.util import verify_clean_log
+
+BRIDGE_USER_DATA = """\
+#cloud-config
+lxd:
+ init:
+ storage_backend: btrfs
+ bridge:
+ mode: new
+ name: lxdbr0
+ ipv4_address: 10.100.100.1
+ ipv4_netmask: 24
+ ipv4_dhcp_first: 10.100.100.100
+ ipv4_dhcp_last: 10.100.100.200
+ ipv4_nat: true
+ domain: lxd
+ mtu: 9000
+"""
+
+STORAGE_USER_DATA = """\
+#cloud-config
+bootcmd: [ "apt-get --yes remove {0}", "! command -v {2}", "{3}" ]
+lxd:
+ init:
+ storage_backend: {1}
+"""
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(BRIDGE_USER_DATA)
+class TestLxdBridge:
+ @pytest.mark.parametrize("binary_name", ["lxc", "lxd"])
+ def test_binaries_installed(self, class_client, binary_name):
+ """Check that the expected LXD binaries are installed"""
+ assert class_client.execute(["which", binary_name]).ok
+
+ def test_bridge(self, class_client):
+ """Check that the given bridge is configured"""
+ cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(cloud_init_log)
+
+ # The bridge should exist
+ assert class_client.execute("ip addr show lxdbr0")
+
+ raw_network_config = class_client.execute("lxc network show lxdbr0")
+ network_config = yaml.safe_load(raw_network_config)
+ assert "10.100.100.1/24" == network_config["config"]["ipv4.address"]
+
+
+def validate_storage(validate_client, pkg_name, command):
+ log = validate_client.read_from_file("/var/log/cloud-init.log")
+ assert re.search(f"apt-get.*install.*{pkg_name}", log) is not None
+ verify_clean_log(log, ignore_deprecations=False)
+ return log
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(
+ STORAGE_USER_DATA.format("btrfs-progs", "btrfs", "mkfs.btrfs", "true")
+)
+def test_storage_btrfs(client):
+ validate_storage(client, "btrfs-progs", "mkfs.btrfs")
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(
+ STORAGE_USER_DATA.format(
+ "lvm2",
+ "lvm",
+ "lvcreate",
+ "apt-get install "
+ "thin-provisioning-tools && systemctl unmask lvm2-lvmpolld.socket",
+ )
+)
+def test_storage_lvm(client):
+ log = client.read_from_file("/var/log/cloud-init.log")
+
+ # Note to self
+ if "doesn't use thinpool by default on Ubuntu due to LP" not in log:
+ warnings.warn("LP 1982780 has been fixed, update to allow thinpools")
+
+ validate_storage(client, "lvm2", "lvcreate")
+
+
+@pytest.mark.no_container
+@pytest.mark.user_data(
+ STORAGE_USER_DATA.format("zfsutils-linux", "zfs", "zpool", "true")
+)
+def test_storage_zfs(client):
+ validate_storage(client, "zfsutils-linux", "zpool")
diff --git a/tests/integration_tests/modules/test_lxd_bridge.py b/tests/integration_tests/modules/test_lxd_bridge.py
deleted file mode 100644
index 3292a833..00000000
--- a/tests/integration_tests/modules/test_lxd_bridge.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""Integration tests for LXD bridge creation.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/lxd_bridge.yaml``.)
-"""
-import pytest
-import yaml
-
-from tests.integration_tests.util import verify_clean_log
-
-USER_DATA = """\
-#cloud-config
-lxd:
- init:
- storage_backend: dir
- bridge:
- mode: new
- name: lxdbr0
- ipv4_address: 10.100.100.1
- ipv4_netmask: 24
- ipv4_dhcp_first: 10.100.100.100
- ipv4_dhcp_last: 10.100.100.200
- ipv4_nat: true
- domain: lxd
-"""
-
-
-@pytest.mark.no_container
-@pytest.mark.user_data(USER_DATA)
-class TestLxdBridge:
- @pytest.mark.parametrize("binary_name", ["lxc", "lxd"])
- def test_binaries_installed(self, class_client, binary_name):
- """Check that the expected LXD binaries are installed"""
- assert class_client.execute(["which", binary_name]).ok
-
- def test_bridge(self, class_client):
- """Check that the given bridge is configured"""
- cloud_init_log = class_client.read_from_file("/var/log/cloud-init.log")
- verify_clean_log(cloud_init_log)
-
- # The bridge should exist
- assert class_client.execute("ip addr show lxdbr0")
-
- raw_network_config = class_client.execute("lxc network show lxdbr0")
- network_config = yaml.safe_load(raw_network_config)
- assert "10.100.100.1/24" == network_config["config"]["ipv4.address"]
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index 66ea52dd..1ac131d9 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -64,6 +64,23 @@ chpasswd:
"""
)
+USERS_USER_DATA = (
+ COMMON_USER_DATA
+ + """
+chpasswd:
+ users:
+ - name: tom
+ password: mypassword123!
+ type: text
+ - name: dick
+ type: RANDOM
+ - name: harry
+ type: RANDOM
+ - name: mikey
+ password: $5$xZ$B2YGGEx2AOf4PeW48KC6.QyT1W2B4rZ9Qbltudtha89
+"""
+)
+
USERS_DICTS = yaml.safe_load(COMMON_USER_DATA)["users"]
USERS_PASSWD_VALUES = {
user_dict["name"]: user_dict["passwd"]
@@ -160,7 +177,7 @@ class Mixin:
shadow = class_client.read_from_file("/etc/shadow")
for user_dict in USERS_DICTS:
if "name" in user_dict:
- assert "{}:".format(user_dict["name"]) in shadow
+ assert f'{user_dict["name"]}:' in shadow
def test_sshd_config(self, class_client):
"""Test that SSH password auth is enabled."""
@@ -169,13 +186,17 @@ class Mixin:
assert "PasswordAuthentication yes" in sshd_config.splitlines()
-@pytest.mark.ci
@pytest.mark.user_data(LIST_USER_DATA)
class TestPasswordList(Mixin):
"""Launch an instance with LIST_USER_DATA, ensure Mixin tests pass."""
-@pytest.mark.ci
@pytest.mark.user_data(STRING_USER_DATA)
class TestPasswordListString(Mixin):
"""Launch an instance with STRING_USER_DATA, ensure Mixin tests pass."""
+
+
+@pytest.mark.ci
+@pytest.mark.user_data(USERS_USER_DATA)
+class TestPasswordUsersList(Mixin):
+ """Launch an instance with USERS_USER_DATA, ensure Mixin tests pass."""
diff --git a/tests/integration_tests/modules/test_ubuntu_autoinstall.py b/tests/integration_tests/modules/test_ubuntu_autoinstall.py
new file mode 100644
index 00000000..d340afc5
--- /dev/null
+++ b/tests/integration_tests/modules/test_ubuntu_autoinstall.py
@@ -0,0 +1,26 @@
+"""Integration tests for cc_ubuntu_autoinstall happy path"""
+
+import pytest
+
+USER_DATA = """\
+#cloud-config
+autoinstall:
+ version: 1
+ cloudinitdoesnotvalidateotherkeyschema: true
+snap:
+ commands:
+ - snap install subiquity --classic
+"""
+
+
+LOG_MSG = "Valid autoinstall schema. Config will be processed by subiquity"
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestUbuntuAutoinstall:
+ def test_autoinstall_schema_valid_when_snap_present(self, class_client):
+ """autoinstall directives will pass when snap is present"""
+ assert "subiquity" in class_client.execute(["snap", "list"]).stdout
+ log = class_client.read_from_file("/var/log/cloud-init.log")
+ assert LOG_MSG in log
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index ec6b1434..18ca1917 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -5,7 +5,9 @@ import re
import time
from collections import namedtuple
from contextlib import contextmanager
+from itertools import chain
from pathlib import Path
+from typing import Set
import pytest
@@ -35,8 +37,16 @@ def verify_ordered_items_in_text(to_verify: list, text: str):
index = matched.start()
-def verify_clean_log(log):
+def verify_clean_log(log: str, ignore_deprecations: bool = True):
"""Assert no unexpected tracebacks or warnings in logs"""
+ if ignore_deprecations:
+ is_deprecated = re.compile("deprecat", flags=re.IGNORECASE)
+ log_lines = log.split("\n")
+ log_lines = list(
+ filter(lambda line: not is_deprecated.search(line), log_lines)
+ )
+ log = "\n".join(log_lines)
+
warning_count = log.count("WARN")
expected_warnings = 0
traceback_count = log.count("Traceback")
@@ -45,7 +55,9 @@ def verify_clean_log(log):
warning_texts = [
# Consistently on all Azure launches:
# azure.py[WARNING]: No lease found; using default endpoint
- "No lease found; using default endpoint"
+ "No lease found; using default endpoint",
+ # Ubuntu lxd storage
+ "thinpool by default on Ubuntu due to LP #1982780",
]
traceback_texts = []
if "oracle" in log:
@@ -82,6 +94,19 @@ def verify_clean_log(log):
assert traceback_count == expected_tracebacks
+def get_inactive_modules(log: str) -> Set[str]:
+ matches = re.findall(
+ r"Skipping modules '(.*)' because no applicable config is provided.",
+ log,
+ )
+ return set(
+ map(
+ lambda module: module.strip(),
+ chain(*map(lambda match: match.split(","), matches)),
+ )
+ )
+
+
@contextmanager
def emit_dots_on_travis():
"""emit a dot every 60 seconds if running on Travis.
diff --git a/tests/unittests/analyze/test_boot.py b/tests/unittests/analyze/test_boot.py
index 68db69ec..261f4c4e 100644
--- a/tests/unittests/analyze/test_boot.py
+++ b/tests/unittests/analyze/test_boot.py
@@ -112,19 +112,19 @@ class TestAnalyzeBoot(CiTestCase):
analyze_boot(name_default, args)
# now args have been tested, go into outfile and make sure error
# message is in the outfile
- outfh = open(args.outfile, "r")
- data = outfh.read()
- err_string = (
- "Your Linux distro or container does not support this "
- "functionality.\nYou must be running a Kernel "
- "Telemetry supported distro.\nPlease check "
- "https://cloudinit.readthedocs.io/en/latest/topics"
- "/analyze.html for more information on supported "
- "distros.\n"
- )
-
- self.remove_dummy_file(path, log_path)
- self.assertEqual(err_string, data)
+ with open(args.outfile, "r") as outfh:
+ data = outfh.read()
+ err_string = (
+ "Your Linux distro or container does not support this "
+ "functionality.\nYou must be running a Kernel "
+ "Telemetry supported distro.\nPlease check "
+ "https://cloudinit.readthedocs.io/en/latest/topics"
+ "/analyze.html for more information on supported "
+ "distros.\n"
+ )
+
+ self.remove_dummy_file(path, log_path)
+ self.assertEqual(err_string, data)
@mock.patch("cloudinit.util.is_container", return_value=True)
@mock.patch("cloudinit.subp.subp", return_value=("U=1000000", None))
diff --git a/tests/unittests/analyze/test_dump.py b/tests/unittests/analyze/test_dump.py
index 56bbf97f..1b4ce820 100644
--- a/tests/unittests/analyze/test_dump.py
+++ b/tests/unittests/analyze/test_dump.py
@@ -216,8 +216,8 @@ class TestDumpEvents(CiTestCase):
tmpfile = self.tmp_path("logfile")
write_file(tmpfile, SAMPLE_LOGS)
m_parse_from_date.return_value = 1472594005.972
-
- events, data = dump_events(cisource=open(tmpfile))
+ with open(tmpfile) as file:
+ events, data = dump_events(cisource=file)
year = datetime.now().year
dt1 = datetime.strptime(
"Nov 03 06:51:06.074410 %d" % year, "%b %d %H:%M:%S.%f %Y"
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
index 5ecb5969..d2ef82b1 100644
--- a/tests/unittests/cmd/devel/test_hotplug_hook.py
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -19,7 +19,9 @@ FAKE_MAC = "11:22:33:44:55:66"
@pytest.fixture
def mocks():
m_init = mock.MagicMock(spec=Init)
+ m_activator = mock.MagicMock(spec=NetworkActivator)
m_distro = mock.MagicMock(spec=Distro)
+ m_distro.network_activator = mock.PropertyMock(return_value=m_activator)
m_datasource = mock.MagicMock(spec=DataSource)
m_datasource.distro = m_distro
m_init.datasource = m_datasource
@@ -41,18 +43,11 @@ def mocks():
return_value=m_network_state,
)
- m_activator = mock.MagicMock(spec=NetworkActivator)
- select_activator = mock.patch(
- "cloudinit.cmd.devel.hotplug_hook.activators.select_activator",
- return_value=m_activator,
- )
-
sleep = mock.patch("time.sleep")
read_sys_net.start()
update_event_enabled.start()
parse_net.start()
- select_activator.start()
m_sleep = sleep.start()
yield namedtuple("mocks", "m_init m_network_state m_activator m_sleep")(
@@ -65,7 +60,6 @@ def mocks():
read_sys_net.stop()
update_event_enabled.stop()
parse_net.stop()
- select_activator.stop()
sleep.stop()
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
index 71f541c6..b859b83b 100644
--- a/tests/unittests/cmd/test_clean.py
+++ b/tests/unittests/cmd/test_clean.py
@@ -2,170 +2,215 @@
import os
from collections import namedtuple
-from io import StringIO
+import pytest
+
+import cloudinit.settings
from cloudinit.cmd import clean
-from cloudinit.util import ensure_dir, sym_link, write_file
-from tests.unittests.helpers import CiTestCase, mock, wrap_and_call
+from cloudinit.util import ensure_dir, sym_link
+from tests.unittests.helpers import mock, wrap_and_call
MyPaths = namedtuple("MyPaths", "cloud_dir")
+CleanPaths = namedtuple(
+ "CleanPaths", ["tmpdir", "cloud_dir", "clean_dir", "log", "output_log"]
+)
+
+
+@pytest.fixture(scope="function")
+def clean_paths(tmpdir):
+ return CleanPaths(
+ tmpdir=tmpdir,
+ cloud_dir=tmpdir.join("varlibcloud"),
+ clean_dir=tmpdir.join("clean.d"),
+ log=tmpdir.join("cloud-init.log"),
+ output_log=tmpdir.join("cloud-init-output.log"),
+ )
-class TestClean(CiTestCase):
- def setUp(self):
- super(TestClean, self).setUp()
- self.new_root = self.tmp_dir()
- self.artifact_dir = self.tmp_path("artifacts", self.new_root)
- self.log1 = self.tmp_path("cloud-init.log", self.new_root)
- self.log2 = self.tmp_path("cloud-init-output.log", self.new_root)
+@pytest.fixture(scope="function")
+def init_class(clean_paths):
+ class FakeInit(object):
+ cfg = {
+ "def_log_file": clean_paths.log,
+ "output": {"all": f"|tee -a {clean_paths.output_log}"},
+ }
+ # Ensure cloud_dir has a trailing slash, to match real behaviour
+ paths = MyPaths(cloud_dir=f"{clean_paths.cloud_dir}/")
- class FakeInit(object):
- cfg = {
- "def_log_file": self.log1,
- "output": {"all": "|tee -a {0}".format(self.log2)},
- }
- # Ensure cloud_dir has a trailing slash, to match real behaviour
- paths = MyPaths(cloud_dir="{}/".format(self.artifact_dir))
+ def __init__(self, ds_deps):
+ pass
- def __init__(self, ds_deps):
- pass
+ def read_cfg(self):
+ pass
- def read_cfg(self):
- pass
+ return FakeInit
- self.init_class = FakeInit
- def test_remove_artifacts_removes_logs(self):
+class TestClean:
+ def test_remove_artifacts_removes_logs(self, clean_paths, init_class):
"""remove_artifacts removes logs when remove_logs is True."""
- write_file(self.log1, "cloud-init-log")
- write_file(self.log2, "cloud-init-output-log")
+ clean_paths.log.write("cloud-init-log")
+ clean_paths.output_log.write("cloud-init-output-log")
- self.assertFalse(
- os.path.exists(self.artifact_dir), "Unexpected artifacts dir"
- )
+ assert (
+ os.path.exists(clean_paths.cloud_dir) is False
+ ), "Unexpected cloud_dir"
retcode = wrap_and_call(
"cloudinit.cmd.clean",
- {"Init": {"side_effect": self.init_class}},
+ {"Init": {"side_effect": init_class}},
clean.remove_artifacts,
remove_logs=True,
)
- self.assertFalse(os.path.exists(self.log1), "Unexpected file")
- self.assertFalse(os.path.exists(self.log2), "Unexpected file")
- self.assertEqual(0, retcode)
+ assert (
+ clean_paths.log.exists() is False
+ ), f"Unexpected file {clean_paths.log}"
+ assert (
+ clean_paths.output_log.exists() is False
+ ), f"Unexpected file {clean_paths.output_log}"
+ assert 0 == retcode
+
+ @pytest.mark.allow_all_subp
+ def test_remove_artifacts_runparts_clean_d(self, clean_paths, init_class):
+ """remove_artifacts performs runparts on CLEAN_RUNPARTS_DIR"""
+ ensure_dir(clean_paths.cloud_dir)
+ artifact_file = clean_paths.tmpdir.join("didit")
+ ensure_dir(clean_paths.clean_dir)
+ assert artifact_file.exists() is False, f"Unexpected {artifact_file}"
+ clean_script = clean_paths.clean_dir.join("1.sh")
+ clean_script.write(f"#!/bin/bash\ntouch {artifact_file}\n")
+ clean_script.chmod(mode=0o755)
+ with mock.patch.object(
+ cloudinit.settings, "CLEAN_RUNPARTS_DIR", clean_paths.clean_dir
+ ):
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "Init": {"side_effect": init_class},
+ },
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ assert (
+ artifact_file.exists() is True
+ ), f"Missing expected {artifact_file}"
+ assert 0 == retcode
- def test_remove_artifacts_preserves_logs(self):
+ def test_remove_artifacts_preserves_logs(self, clean_paths, init_class):
"""remove_artifacts leaves logs when remove_logs is False."""
- write_file(self.log1, "cloud-init-log")
- write_file(self.log2, "cloud-init-output-log")
+ clean_paths.log.write("cloud-init-log")
+ clean_paths.output_log.write("cloud-init-output-log")
retcode = wrap_and_call(
"cloudinit.cmd.clean",
- {"Init": {"side_effect": self.init_class}},
+ {"Init": {"side_effect": init_class}},
clean.remove_artifacts,
remove_logs=False,
)
- self.assertTrue(os.path.exists(self.log1), "Missing expected file")
- self.assertTrue(os.path.exists(self.log2), "Missing expected file")
- self.assertEqual(0, retcode)
+ assert 0 == retcode
+ assert (
+ clean_paths.log.exists() is True
+ ), f"Missing expected file {clean_paths.log}"
+ assert (
+ clean_paths.output_log.exists()
+ ), f"Missing expected file {clean_paths.output_log}"
- def test_remove_artifacts_removes_unlinks_symlinks(self):
+ def test_remove_artifacts_removes_unlinks_symlinks(
+ self, clean_paths, init_class
+ ):
"""remove_artifacts cleans artifacts dir unlinking any symlinks."""
- dir1 = os.path.join(self.artifact_dir, "dir1")
+ dir1 = clean_paths.cloud_dir.join("dir1")
ensure_dir(dir1)
- symlink = os.path.join(self.artifact_dir, "mylink")
- sym_link(dir1, symlink)
+ symlink = clean_paths.cloud_dir.join("mylink")
+ sym_link(dir1.strpath, symlink.strpath)
retcode = wrap_and_call(
"cloudinit.cmd.clean",
- {"Init": {"side_effect": self.init_class}},
+ {"Init": {"side_effect": init_class}},
clean.remove_artifacts,
remove_logs=False,
)
- self.assertEqual(0, retcode)
+ assert 0 == retcode
for path in (dir1, symlink):
- self.assertFalse(
- os.path.exists(path), "Unexpected {0} dir".format(path)
- )
+ assert path.exists() is False, f"Unexpected {path} found"
- def test_remove_artifacts_removes_artifacts_skipping_seed(self):
+ def test_remove_artifacts_removes_artifacts_skipping_seed(
+ self, clean_paths, init_class
+ ):
"""remove_artifacts cleans artifacts dir with exception of seed dir."""
dirs = [
- self.artifact_dir,
- os.path.join(self.artifact_dir, "seed"),
- os.path.join(self.artifact_dir, "dir1"),
- os.path.join(self.artifact_dir, "dir2"),
+ clean_paths.cloud_dir,
+ clean_paths.cloud_dir.join("seed"),
+ clean_paths.cloud_dir.join("dir1"),
+ clean_paths.cloud_dir.join("dir2"),
]
for _dir in dirs:
ensure_dir(_dir)
retcode = wrap_and_call(
"cloudinit.cmd.clean",
- {"Init": {"side_effect": self.init_class}},
+ {"Init": {"side_effect": init_class}},
clean.remove_artifacts,
remove_logs=False,
)
- self.assertEqual(0, retcode)
+ assert 0 == retcode
for expected_dir in dirs[:2]:
- self.assertTrue(
- os.path.exists(expected_dir),
- "Missing {0} dir".format(expected_dir),
- )
+ assert expected_dir.exists() is True, f"Missing {expected_dir}"
for deleted_dir in dirs[2:]:
- self.assertFalse(
- os.path.exists(deleted_dir),
- "Unexpected {0} dir".format(deleted_dir),
- )
+ assert deleted_dir.exists() is False, f"Unexpected {deleted_dir}"
- def test_remove_artifacts_removes_artifacts_removes_seed(self):
+ def test_remove_artifacts_removes_artifacts_removes_seed(
+ self, clean_paths, init_class
+ ):
"""remove_artifacts removes seed dir when remove_seed is True."""
dirs = [
- self.artifact_dir,
- os.path.join(self.artifact_dir, "seed"),
- os.path.join(self.artifact_dir, "dir1"),
- os.path.join(self.artifact_dir, "dir2"),
+ clean_paths.cloud_dir,
+ clean_paths.cloud_dir.join("seed"),
+ clean_paths.cloud_dir.join("dir1"),
+ clean_paths.cloud_dir.join("dir2"),
]
for _dir in dirs:
ensure_dir(_dir)
retcode = wrap_and_call(
"cloudinit.cmd.clean",
- {"Init": {"side_effect": self.init_class}},
+ {"Init": {"side_effect": init_class}},
clean.remove_artifacts,
remove_logs=False,
remove_seed=True,
)
- self.assertEqual(0, retcode)
- self.assertTrue(
- os.path.exists(self.artifact_dir), "Missing artifact dir"
- )
+ assert 0 == retcode
+ assert (
+ clean_paths.cloud_dir.exists() is True
+ ), f"Missing dir {clean_paths.cloud_dir}"
for deleted_dir in dirs[1:]:
- self.assertFalse(
- os.path.exists(deleted_dir),
- "Unexpected {0} dir".format(deleted_dir),
- )
+ assert (
+ deleted_dir.exists() is False
+ ), f"Unexpected {deleted_dir} dir"
- def test_remove_artifacts_returns_one_on_errors(self):
+ def test_remove_artifacts_returns_one_on_errors(
+ self, clean_paths, init_class, capsys
+ ):
"""remove_artifacts returns non-zero on failure and prints an error."""
- ensure_dir(self.artifact_dir)
- ensure_dir(os.path.join(self.artifact_dir, "dir1"))
+ ensure_dir(clean_paths.cloud_dir)
+ ensure_dir(clean_paths.cloud_dir.join("dir1"))
- with mock.patch("sys.stderr", new_callable=StringIO) as m_stderr:
- retcode = wrap_and_call(
- "cloudinit.cmd.clean",
- {
- "del_dir": {"side_effect": OSError("oops")},
- "Init": {"side_effect": self.init_class},
- },
- clean.remove_artifacts,
- remove_logs=False,
- )
- self.assertEqual(1, retcode)
- self.assertEqual(
- "Error:\nCould not remove %s/dir1: oops\n" % self.artifact_dir,
- m_stderr.getvalue(),
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "del_dir": {"side_effect": OSError("oops")},
+ "Init": {"side_effect": init_class},
+ },
+ clean.remove_artifacts,
+ remove_logs=False,
+ )
+ assert 1 == retcode
+ _out, err = capsys.readouterr()
+ assert (
+ f"Error:\nCould not remove {clean_paths.cloud_dir}/dir1: oops\n"
+ == err
)
- def test_handle_clean_args_reboots(self):
+ def test_handle_clean_args_reboots(self, init_class):
"""handle_clean_args_reboots when reboot arg is provided."""
called_cmds = []
@@ -174,38 +219,76 @@ class TestClean(CiTestCase):
called_cmds.append((cmd, capture))
return "", ""
- myargs = namedtuple("MyArgs", "remove_logs remove_seed reboot")
- cmdargs = myargs(remove_logs=False, remove_seed=False, reboot=True)
+ myargs = namedtuple(
+ "MyArgs", "remove_logs remove_seed reboot machine_id"
+ )
+ cmdargs = myargs(
+ remove_logs=False, remove_seed=False, reboot=True, machine_id=False
+ )
retcode = wrap_and_call(
"cloudinit.cmd.clean",
{
"subp": {"side_effect": fake_subp},
- "Init": {"side_effect": self.init_class},
+ "Init": {"side_effect": init_class},
},
clean.handle_clean_args,
name="does not matter",
args=cmdargs,
)
- self.assertEqual(0, retcode)
- self.assertEqual([(["shutdown", "-r", "now"], False)], called_cmds)
+ assert 0 == retcode
+ assert [(["shutdown", "-r", "now"], False)] == called_cmds
+
+ @pytest.mark.parametrize("machine_id", (True, False))
+ def test_handle_clean_args_removed_machine_id(
+ self, machine_id, clean_paths, init_class
+ ):
+ """handle_clean_args removes /etc/machine-id when arg is True."""
+
+ myargs = namedtuple(
+ "MyArgs", "remove_logs remove_seed reboot machine_id"
+ )
+ cmdargs = myargs(
+ remove_logs=False,
+ remove_seed=False,
+ reboot=False,
+ machine_id=machine_id,
+ )
+ machine_id_path = clean_paths.tmpdir.join("machine-id")
+ machine_id_path.write("SOME-AMAZN-MACHINE-ID")
+ with mock.patch.object(
+ cloudinit.settings, "CLEAN_RUNPARTS_DIR", clean_paths.clean_dir
+ ):
+ with mock.patch.object(
+ cloudinit.cmd.clean, "ETC_MACHINE_ID", machine_id_path.strpath
+ ):
+ retcode = wrap_and_call(
+ "cloudinit.cmd.clean",
+ {
+ "Init": {"side_effect": init_class},
+ },
+ clean.handle_clean_args,
+ name="does not matter",
+ args=cmdargs,
+ )
+ assert 0 == retcode
+ assert machine_id_path.exists() is bool(not machine_id)
- def test_status_main(self):
+ def test_status_main(self, clean_paths, init_class):
"""clean.main can be run as a standalone script."""
- write_file(self.log1, "cloud-init-log")
- with self.assertRaises(SystemExit) as context_manager:
+ clean_paths.log.write("cloud-init-log")
+ with pytest.raises(SystemExit) as context_manager:
wrap_and_call(
"cloudinit.cmd.clean",
{
- "Init": {"side_effect": self.init_class},
+ "Init": {"side_effect": init_class},
"sys.argv": {"new": ["clean", "--logs"]},
},
clean.main,
)
-
- self.assertEqual(0, context_manager.exception.code)
- self.assertFalse(
- os.path.exists(self.log1), "Unexpected log {0}".format(self.log1)
- )
+ assert 0 == context_manager.value.code
+ assert (
+ clean_paths.log.exists() is False
+ ), f"Unexpected log {clean_paths.log}"
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/cmd/test_query.py b/tests/unittests/cmd/test_query.py
index 207078fa..dd517a4b 100644
--- a/tests/unittests/cmd/test_query.py
+++ b/tests/unittests/cmd/test_query.py
@@ -25,7 +25,7 @@ M_PATH = "cloudinit.cmd.query."
def _gzip_data(data):
with BytesIO() as iobuf:
- with gzip.GzipFile(mode="wb", fileobj=iobuf) as gzfp:
+ with gzip.GzipFile(mode="wb", fileobj=iobuf, mtime=0) as gzfp:
gzfp.write(data)
return iobuf.getvalue()
diff --git a/tests/unittests/config/test_apt_source_v1.py b/tests/unittests/config/test_apt_source_v1.py
index 371963b1..7c59d279 100644
--- a/tests/unittests/config/test_apt_source_v1.py
+++ b/tests/unittests/config/test_apt_source_v1.py
@@ -16,6 +16,7 @@ from unittest.mock import call
from cloudinit import gpg, subp, util
from cloudinit.config import cc_apt_configure
from tests.unittests.helpers import TestCase
+from tests.unittests.util import get_cloud
EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
@@ -49,14 +50,6 @@ class FakeDatasource:
self.region = "region"
-class FakeCloud(object):
- """Fake Cloud helper object"""
-
- def __init__(self):
- self.distro = FakeDistro()
- self.datasource = FakeDatasource()
-
-
class TestAptSourceConfig(TestCase):
"""TestAptSourceConfig
Main Class to test apt_source configs
@@ -78,7 +71,7 @@ class TestAptSourceConfig(TestCase):
self.tmp, "etc/apt/sources.list.d/", "cloud_config_sources.list"
)
- self.fakecloud = FakeCloud()
+ self.cloud = get_cloud()
rpatcher = mock.patch("cloudinit.util.lsb_release")
get_rel = rpatcher.start()
@@ -125,7 +118,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
self.assertTrue(os.path.isfile(filename))
@@ -280,7 +273,7 @@ class TestAptSourceConfig(TestCase):
"""
cfg = self.wrapv1conf(cfg)
params = self._get_default_params()
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
self.assertTrue(os.path.isfile(filename))
@@ -371,7 +364,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf(cfg)
with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
# check if it added the right number of keys
calls = []
@@ -497,7 +490,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg])
with mock.patch.object(cc_apt_configure, "add_apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
# check if it added the right amount of keys
sources = cfg["apt"]["sources"]
@@ -558,7 +551,7 @@ class TestAptSourceConfig(TestCase):
cfg = {"key": "fakekey 4242", "filename": self.aptlistfile}
cfg = self.wrapv1conf([cfg])
with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
calls = (
call(
@@ -582,9 +575,7 @@ class TestAptSourceConfig(TestCase):
subp, "subp", return_value=("fakekey 1212", "")
):
with mock.patch.object(cc_apt_configure, "apt_key") as mockobj:
- cc_apt_configure.handle(
- "test", cfg, self.fakecloud, None, None
- )
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
calls = (
call(
@@ -613,9 +604,7 @@ class TestAptSourceConfig(TestCase):
with mock.patch.object(
gpg, "getkeybyid", return_value=expectedkey
) as mockgetkey:
- cc_apt_configure.handle(
- "test", cfg, self.fakecloud, None, None
- )
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
if is_hardened is not None:
mockkey.assert_called_with(
expectedkey, self.aptlistfile, hardened=is_hardened
@@ -661,7 +650,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg])
with mock.patch.object(subp, "subp") as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
mockobj.assert_called_once_with(
[
"add-apt-repository",
@@ -691,7 +680,7 @@ class TestAptSourceConfig(TestCase):
cfg = self.wrapv1conf([cfg1, cfg2, cfg3])
with mock.patch.object(subp, "subp") as mockobj:
- cc_apt_configure.handle("test", cfg, self.fakecloud, None, None)
+ cc_apt_configure.handle("test", cfg, self.cloud, None, None)
calls = [
call(
[
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
index 8aceff06..abb94340 100644
--- a/tests/unittests/config/test_apt_source_v3.py
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -52,13 +52,6 @@ class FakeDatasource:
self.region = "region"
-class FakeCloud:
- """Fake Cloud helper object"""
-
- def __init__(self):
- self.datasource = FakeDatasource()
-
-
class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
"""TestAptSourceConfig
Main Class to test apt configs
@@ -690,7 +683,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
fromfn = "%s/%s_%s" % (pre, archive, post)
tofn = "%s/test.ubuntu.com_%s" % (pre, post)
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, get_cloud(), arch)
self.assertEqual(
mirrors["MIRROR"], "http://test.ubuntu.com/%s/" % component
@@ -785,7 +778,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
}
mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), "amd64"
+ cfg, get_cloud(), "amd64"
)
self.assertEqual(mirrors["MIRROR"], pmir)
@@ -821,7 +814,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
],
}
- mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch)
+ mirrors = cc_apt_configure.find_apt_mirror_info(cfg, get_cloud(), arch)
self.assertEqual(mirrors["PRIMARY"], pmir)
self.assertEqual(mirrors["MIRROR"], pmir)
@@ -843,7 +836,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
}
mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), "amd64"
+ cfg, get_cloud(), "amd64"
)
self.assertEqual(mirrors["MIRROR"], pmir)
@@ -911,7 +904,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
side_effect=[pmir, smir],
) as mocksearch:
mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), "amd64"
+ cfg, get_cloud(), "amd64"
)
calls = [call(["pfailme", pmir]), call(["sfailme", smir])]
@@ -961,7 +954,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
cc_apt_configure.util, "search_for_mirror"
) as mockse:
mirrors = cc_apt_configure.find_apt_mirror_info(
- cfg, FakeCloud(), arch
+ cfg, get_cloud(), arch
)
mockse.assert_not_called()
diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py
index 39614635..a0b402ac 100644
--- a/tests/unittests/config/test_cc_ca_certs.py
+++ b/tests/unittests/config/test_cc_ca_certs.py
@@ -421,7 +421,14 @@ class TestCACertsSchema:
"config, error_msg",
(
# Valid, yet deprecated schemas
- ({"ca-certs": {"remove-defaults": True}}, None),
+ (
+ {"ca-certs": {"remove-defaults": True}},
+ "Cloud config schema deprecations: "
+ "ca-certs: DEPRECATED. Dropped after April 2027. "
+ "Use ``ca_certs``., "
+ "ca-certs.remove-defaults: DEPRECATED. "
+ "Dropped after April 2027. Use ``remove_defaults``.",
+ ),
# Invalid schemas
(
{"ca_certs": 1},
diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
index 9bdc9c74..0f9cc232 100644
--- a/tests/unittests/config/test_cc_grub_dpkg.py
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -12,7 +12,7 @@ from cloudinit.config.schema import (
validate_cloudconfig_schema,
)
from cloudinit.subp import ProcessExecutionError
-from tests.unittests.helpers import skipUnlessJsonSchema
+from tests.unittests.helpers import does_not_raise, skipUnlessJsonSchema
class TestFetchIdevs:
@@ -194,26 +194,59 @@ class TestHandle:
class TestGrubDpkgSchema:
@pytest.mark.parametrize(
- "config, error_msg",
+ "config, expectation, has_errors",
(
- ({"grub_dpkg": {"grub-pc/install_devices_empty": False}}, None),
- ({"grub_dpkg": {"grub-pc/install_devices_empty": "off"}}, None),
+ (
+ {"grub_dpkg": {"grub-pc/install_devices_empty": False}},
+ does_not_raise(),
+ None,
+ ),
+ (
+ {"grub_dpkg": {"grub-pc/install_devices_empty": "off"}},
+ pytest.raises(
+ SchemaValidationError,
+ match=(
+ r"^Cloud config schema deprecations:"
+ r" grub_dpkg.grub-pc/install_devices_empty:"
+ r" DEPRECATED. Use a boolean value instead.$"
+ ),
+ ),
+ False,
+ ),
(
{"grub_dpkg": {"enabled": "yes"}},
- "'yes' is not of type 'boolean'",
+ pytest.raises(
+ SchemaValidationError,
+ match="'yes' is not of type 'boolean'",
+ ),
+ True,
),
(
{"grub_dpkg": {"grub-pc/install_devices": ["/dev/sda"]}},
- r"\['/dev/sda'\] is not of type 'string'",
+ pytest.raises(
+ SchemaValidationError,
+ match=r"\['/dev/sda'\] is not of type 'string'",
+ ),
+ True,
+ ),
+ (
+ {"grub-dpkg": {"grub-pc/install_devices_empty": False}},
+ pytest.raises(
+ SchemaValidationError,
+ match=(
+ r"^Cloud config schema deprecations: grub-dpkg:"
+ r" DEPRECATED. Use ``grub_dpkg`` instead$"
+ ),
+ ),
+ False,
),
),
)
@skipUnlessJsonSchema()
- def test_schema_validation(self, config, error_msg):
+ def test_schema_validation(self, config, expectation, has_errors):
"""Assert expected schema validation and error messages."""
schema = get_schema()
- if error_msg is None:
+ with expectation as exc_info:
validate_cloudconfig_schema(config, schema, strict=True)
- else:
- with pytest.raises(SchemaValidationError, match=error_msg):
- validate_cloudconfig_schema(config, schema, strict=True)
+ if has_errors is not None:
+ assert has_errors == exc_info.value.has_errors()
diff --git a/tests/unittests/config/test_cc_landscape.py b/tests/unittests/config/test_cc_landscape.py
index 79ea6b0a..b08e3d44 100644
--- a/tests/unittests/config/test_cc_landscape.py
+++ b/tests/unittests/config/test_cc_landscape.py
@@ -186,6 +186,15 @@ class TestLandscapeSchema:
# tags are comma-delimited
({"landscape": {"client": {"tags": "1,2,3"}}}, None),
({"landscape": {"client": {"tags": "1"}}}, None),
+ (
+ {
+ "landscape": {
+ "client": {},
+ "random-config-value": {"tags": "1"},
+ }
+ },
+ "Additional properties are not allowed",
+ ),
# Require client key
({"landscape": {}}, "'client' is a required property"),
# tags are not whitespace-delimited
diff --git a/tests/unittests/config/test_cc_lxd.py b/tests/unittests/config/test_cc_lxd.py
index 3b444127..8b75a1f7 100644
--- a/tests/unittests/config/test_cc_lxd.py
+++ b/tests/unittests/config/test_cc_lxd.py
@@ -1,5 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
import re
+from copy import deepcopy
from unittest import mock
import pytest
@@ -27,33 +28,82 @@ class TestLxd(t_help.CiTestCase):
}
}
}
+ backend_def = (
+ ("zfs", "zfs", "zfsutils-linux"),
+ ("btrfs", "mkfs.btrfs", "btrfs-progs"),
+ ("lvm", "lvcreate", "lvm2"),
+ ("dir", None, None),
+ )
- @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
- @mock.patch("cloudinit.config.cc_lxd.subp")
- def test_lxd_init(self, mock_subp, m_maybe_clean):
- cc = get_cloud()
- mock_subp.which.return_value = True
- m_maybe_clean.return_value = None
- cc_lxd.handle("cc_lxd", self.lxd_cfg, cc, self.logger, [])
- self.assertTrue(mock_subp.which.called)
- # no bridge config, so maybe_cleanup should not be called.
- self.assertFalse(m_maybe_clean.called)
- self.assertEqual(
- [
- mock.call(["lxd", "waitready", "--timeout=300"]),
- mock.call(
+ @mock.patch("cloudinit.config.cc_lxd.util.system_info")
+ @mock.patch("cloudinit.config.cc_lxd.os.path.exists", return_value=True)
+ @mock.patch("cloudinit.config.cc_lxd.subp.subp", return_value=True)
+ @mock.patch("cloudinit.config.cc_lxd.subp.which", return_value=False)
+ @mock.patch(
+ "cloudinit.config.cc_lxd.maybe_cleanup_default", return_value=None
+ )
+ def test_lxd_init(self, maybe_clean, which, subp, exists, system_info):
+ system_info.return_value = {"uname": [0, 1, "mykernel"]}
+ cc = get_cloud(mocked_distro=True)
+ install = cc.distro.install_packages
+
+ for backend, cmd, package in self.backend_def:
+ lxd_cfg = deepcopy(self.lxd_cfg)
+ lxd_cfg["lxd"]["init"]["storage_backend"] = backend
+ subp.call_args_list = []
+ install.call_args_list = []
+ exists.call_args_list = []
+ cc_lxd.handle("cc_lxd", lxd_cfg, cc, self.logger, [])
+ if cmd:
+ which.assert_called_with(cmd)
+ # no bridge config, so maybe_cleanup should not be called.
+ self.assertFalse(maybe_clean.called)
+ self.assertEqual(
+ [
+ mock.call(list(filter(None, ["lxd", package]))),
+ ],
+ install.call_args_list,
+ )
+ self.assertEqual(
+ [
+ mock.call(["lxd", "waitready", "--timeout=300"]),
+ mock.call(
+ [
+ "lxd",
+ "init",
+ "--auto",
+ "--network-address=0.0.0.0",
+ f"--storage-backend={backend}",
+ "--storage-pool=poolname",
+ ]
+ ),
+ ],
+ subp.call_args_list,
+ )
+
+ if backend == "lvm":
+ self.assertEqual(
[
- "lxd",
- "init",
- "--auto",
- "--network-address=0.0.0.0",
- "--storage-backend=zfs",
- "--storage-pool=poolname",
- ]
- ),
- ],
- mock_subp.subp.call_args_list,
- )
+ mock.call(
+ "/lib/modules/mykernel/"
+ "kernel/drivers/md/dm-thin-pool.ko"
+ )
+ ],
+ exists.call_args_list,
+ )
+ else:
+ self.assertEqual([], exists.call_args_list)
+
+ @mock.patch("cloudinit.config.cc_lxd.subp.which", return_value=False)
+ def test_lxd_package_install(self, m_which):
+ for backend, _, package in self.backend_def:
+ lxd_cfg = deepcopy(self.lxd_cfg)
+ lxd_cfg["lxd"]["init"]["storage_backend"] = backend
+
+ packages = cc_lxd.get_required_packages(lxd_cfg["lxd"]["init"])
+ assert "lxd" in packages
+ if package:
+ assert package in packages
@mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default")
@mock.patch("cloudinit.config.cc_lxd.subp")
@@ -174,6 +224,7 @@ class TestLxd(t_help.CiTestCase):
"ipv6_netmask": "64",
"ipv6_nat": "true",
"domain": "lxd",
+ "mtu": 9000,
}
self.assertEqual(
cc_lxd.bridge_to_cmd(data),
@@ -188,6 +239,7 @@ class TestLxd(t_help.CiTestCase):
"ipv6.address=fd98:9e0:3744::1/64",
"ipv6.nat=true",
"dns.domain=lxd",
+ "bridge.mtu=9000",
],
["network", "attach-profile", "testbr0", "default", "eth0"],
),
@@ -199,6 +251,7 @@ class TestLxd(t_help.CiTestCase):
"ipv6_address": "fd98:9e0:3744::1",
"ipv6_netmask": "64",
"ipv6_nat": "true",
+ "mtu": -1,
}
self.assertEqual(
cc_lxd.bridge_to_cmd(data),
@@ -286,17 +339,36 @@ class TestLXDSchema:
# Only allow init.storage_backend values zfs and dir
(
{"lxd": {"init": {"storage_backend": "1zfs"}}},
- re.escape("not one of ['zfs', 'dir']"),
+ re.escape("not one of ['zfs', 'dir', 'lvm', 'btrfs']"),
),
+ ({"lxd": {"init": {"storage_backend": "lvm"}}}, None),
+ ({"lxd": {"init": {"storage_backend": "btrfs"}}}, None),
+ ({"lxd": {"init": {"storage_backend": "zfs"}}}, None),
# Require bridge.mode
({"lxd": {"bridge": {}}}, "bridge: 'mode' is a required property"),
# Require init or bridge keys
({"lxd": {}}, "does not have enough properties"),
+ # Require bridge.mode
+ ({"lxd": {"bridge": {"mode": "new", "mtu": 9000}}}, None),
+ # LXD's default value
+ ({"lxd": {"bridge": {"mode": "new", "mtu": -1}}}, None),
+ # No additionalProperties
+ (
+ {"lxd": {"init": {"invalid": None}}},
+ "Additional properties are not allowed",
+ ),
+ (
+ {"lxd": {"bridge": {"mode": None, "garbage": None}}},
+ "Additional properties are not allowed",
+ ),
],
)
@t_help.skipUnlessJsonSchema()
def test_schema_validation(self, config, error_msg):
- with pytest.raises(SchemaValidationError, match=error_msg):
+ if error_msg:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_mcollective.py b/tests/unittests/config/test_cc_mcollective.py
index aa726dd3..a581f9bb 100644
--- a/tests/unittests/config/test_cc_mcollective.py
+++ b/tests/unittests/config/test_cc_mcollective.py
@@ -172,6 +172,11 @@ class TestMcollectiveSchema:
),
# Allow undocumented keys client keys below 'conf' without error
({"mcollective": {"conf": {"customkey": 1}}}, None),
+ # Don't allow undocumented keys that don't match expected type
+ (
+ {"mcollective": {"conf": {"": {"test": None}}}},
+ "does not match any of the regexes:",
+ ),
(
{"mcollective": {"conf": {"public-cert": 1}}},
"mcollective.conf.public-cert: 1 is not of type 'string'",
diff --git a/tests/unittests/config/test_cc_mounts.py b/tests/unittests/config/test_cc_mounts.py
index 8ae28099..0073829a 100644
--- a/tests/unittests/config/test_cc_mounts.py
+++ b/tests/unittests/config/test_cc_mounts.py
@@ -1,13 +1,21 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import math
import os.path
import re
+from collections import namedtuple
from unittest import mock
import pytest
+from pytest import approx
from cloudinit.config import cc_mounts
-from cloudinit.config.cc_mounts import create_swapfile
+from cloudinit.config.cc_mounts import (
+ GB,
+ MB,
+ create_swapfile,
+ suggested_swapsize,
+)
from cloudinit.config.schema import (
SchemaValidationError,
get_schema,
@@ -524,6 +532,35 @@ class TestCreateSwapfile:
msg = "fallocate swap creation failed, will attempt with dd"
assert msg in caplog.text
+ # See https://help.ubuntu.com/community/SwapFaq
+ @pytest.mark.parametrize(
+ "memsize,expected",
+ [
+ (256 * MB, 256 * MB),
+ (512 * MB, 512 * MB),
+ (1 * GB, 1 * GB),
+ (2 * GB, 2 * GB),
+ (4 * GB, 4 * GB),
+ (8 * GB, 4 * GB),
+ (16 * GB, 4 * GB),
+ (32 * GB, 6 * GB),
+ (64 * GB, 8 * GB),
+ (128 * GB, 11 * GB),
+ (256 * GB, 16 * GB),
+ (512 * GB, 23 * GB),
+ ],
+ )
+ def test_suggested_swapsize(self, memsize, expected, mocker):
+ mock_stat = namedtuple("mock_stat", "f_frsize f_bfree")
+ mocker.patch(
+ "os.statvfs",
+ # Don't care about available disk space for the purposes of this
+ # test
+ return_value=mock_stat(math.inf, math.inf),
+ )
+ size = suggested_swapsize(memsize, math.inf, "dontcare")
+ assert expected == approx(size)
+
class TestMountsSchema:
@pytest.mark.parametrize(
diff --git a/tests/unittests/config/test_cc_package_update_upgrade_install.py b/tests/unittests/config/test_cc_package_update_upgrade_install.py
index 1bdddfcc..e8fce98f 100644
--- a/tests/unittests/config/test_cc_package_update_upgrade_install.py
+++ b/tests/unittests/config/test_cc_package_update_upgrade_install.py
@@ -18,6 +18,30 @@ class TestPackageUpdateUpgradeSchema:
({"packages": ["p1", ["p2", "p3", "p4"]]}, ""),
# empty packages list
({"packages": []}, "is too short"),
+ (
+ {"apt_update": False},
+ (
+ "deprecations: apt_update: DEPRECATED."
+ " Dropped after April 2027. Use ``package_update``."
+ " Default: ``false``"
+ ),
+ ),
+ (
+ {"apt_upgrade": False},
+ (
+ "deprecations: apt_upgrade: DEPRECATED."
+ " Dropped after April 2027. Use ``package_upgrade``."
+ " Default: ``false``"
+ ),
+ ),
+ (
+ {"apt_reboot_if_required": False},
+ (
+ "deprecations: apt_reboot_if_required: DEPRECATED."
+ " Dropped after April 2027."
+ " Use ``package_reboot_if_required``. Default: ``false``"
+ ),
+ ),
],
)
@skipUnlessJsonSchema()
diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py
index cdd36fe0..5b970002 100644
--- a/tests/unittests/config/test_cc_power_state_change.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -173,9 +173,21 @@ class TestPowerStateChangeSchema:
r"'test' is not one of \['poweroff', 'reboot', 'halt'\]",
),
# Delay can be a number, a +number, or "now"
- ({"power_state": {"mode": "halt", "delay": "5"}}, None),
+ (
+ {"power_state": {"mode": "halt", "delay": "5"}},
+ (
+ "power_state.delay: DEPRECATED."
+ " Dropped after April 2027. Use ``now`` or integer type."
+ ),
+ ),
({"power_state": {"mode": "halt", "delay": "now"}}, None),
- ({"power_state": {"mode": "halt", "delay": "+5"}}, None),
+ (
+ {"power_state": {"mode": "halt", "delay": "+5"}},
+ (
+ "power_state.delay: DEPRECATED."
+ " Dropped after April 2027. Use ``now`` or integer type."
+ ),
+ ),
({"power_state": {"mode": "halt", "delay": "+"}}, ""),
({"power_state": {"mode": "halt", "delay": "++5"}}, ""),
({"power_state": {"mode": "halt", "delay": "-5"}}, ""),
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
index ac7abadb..1482162a 100644
--- a/tests/unittests/config/test_cc_set_passwords.py
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -5,22 +5,24 @@ from unittest import mock
import pytest
-from cloudinit import subp, util
+from cloudinit import features, subp, util
from cloudinit.config import cc_set_passwords as setpass
from cloudinit.config.schema import (
SchemaValidationError,
get_schema,
validate_cloudconfig_schema,
)
-from tests.unittests.helpers import CiTestCase, skipUnlessJsonSchema
+from tests.unittests.helpers import skipUnlessJsonSchema
from tests.unittests.util import get_cloud
MODPATH = "cloudinit.config.cc_set_passwords."
+LOG = logging.getLogger(__name__)
-@pytest.fixture()
-def mock_uses_systemd(mocker):
+@pytest.fixture(autouse=True)
+def common_fixtures(mocker):
mocker.patch("cloudinit.distros.uses_systemd", return_value=True)
+ mocker.patch("cloudinit.util.write_to_console")
class TestHandleSSHPwauth:
@@ -72,7 +74,7 @@ class TestHandleSSHPwauth:
),
),
)
- @mock.patch(MODPATH + "update_ssh_config")
+ @mock.patch(f"{MODPATH}update_ssh_config")
@mock.patch("cloudinit.distros.subp.subp")
def test_restart_ssh_only_when_changes_made_and_ssh_installed(
self,
@@ -98,11 +100,9 @@ class TestHandleSSHPwauth:
r.msg for r in caplog.records if r.levelname == "DEBUG"
)
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(f"{MODPATH}update_ssh_config", return_value=True)
@mock.patch("cloudinit.distros.subp.subp")
- def test_unchanged_value_does_nothing(
- self, m_subp, update_ssh_config, mock_uses_systemd
- ):
+ def test_unchanged_value_does_nothing(self, m_subp, update_ssh_config):
"""If 'unchanged', then no updates to config and no restart."""
update_ssh_config.assert_not_called()
cloud = get_cloud("ubuntu")
@@ -113,10 +113,10 @@ class TestHandleSSHPwauth:
@pytest.mark.allow_subp_for("systemctl")
@mock.patch("cloudinit.distros.subp.subp")
- def test_valid_value_changes_updates_ssh(self, m_subp, mock_uses_systemd):
+ def test_valid_value_changes_updates_ssh(self, m_subp):
"""If value is a valid changed value, then update will be called."""
cloud = get_cloud("ubuntu")
- upname = MODPATH + "update_ssh_config"
+ upname = f"{MODPATH}update_ssh_config"
optname = "PasswordAuthentication"
for n, value in enumerate(util.FALSE_STRINGS + util.TRUE_STRINGS, 1):
optval = "yes" if value in util.TRUE_STRINGS else "no"
@@ -213,7 +213,7 @@ class TestHandleSSHPwauth:
),
),
)
- @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(f"{MODPATH}update_ssh_config", return_value=True)
@mock.patch("cloudinit.distros.subp.subp")
def test_no_restart_when_service_is_not_running(
self,
@@ -249,33 +249,41 @@ class TestHandleSSHPwauth:
assert cloud.distro.uses_systemd.call_count == 1
-@pytest.mark.usefixtures("mock_uses_systemd")
-class TestSetPasswordsHandle(CiTestCase):
- """Test cc_set_passwords.handle"""
+def get_chpasswd_calls(cfg, cloud, log):
+ with mock.patch(f"{MODPATH}subp.subp") as subp:
+ with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
+ setpass.handle(
+ "IGNORED",
+ cfg=cfg,
+ cloud=cloud,
+ log=log,
+ args=[],
+ )
+ return chpasswd.call_args[0], subp.call_args
- with_logs = True
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_empty_config(self, m_subp):
+class TestSetPasswordsHandle:
+ """Test cc_set_passwords.handle"""
+
+ @mock.patch(f"{MODPATH}subp.subp")
+ def test_handle_on_empty_config(self, m_subp, caplog):
"""handle logs that no password has changed when config is empty."""
- cloud = self.tmp_cloud(distro="ubuntu")
- setpass.handle(
- "IGNORED", cfg={}, cloud=cloud, log=self.logger, args=[]
- )
- self.assertEqual(
- "DEBUG: Leaving SSH config 'PasswordAuthentication' unchanged. "
- "ssh_pwauth=None\n",
- self.logs.getvalue(),
- )
- self.assertEqual(
- [mock.call(["systemctl", "status", "ssh"], capture=True)],
- m_subp.call_args_list,
- )
+ cloud = get_cloud()
+ setpass.handle("IGNORED", cfg={}, cloud=cloud, log=LOG, args=[])
+ assert (
+ "Leaving SSH config 'PasswordAuthentication' unchanged. "
+ "ssh_pwauth=None"
+ ) in caplog.text
+ assert [
+ mock.call(["systemctl", "status", "ssh"], capture=True)
+ ] == m_subp.call_args_list
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp):
+ @mock.patch(f"{MODPATH}subp.subp")
+ def test_handle_on_chpasswd_list_parses_common_hashes(
+ self, _m_subp, caplog
+ ):
"""handle parses command password hashes."""
- cloud = self.tmp_cloud(distro="ubuntu")
+ cloud = get_cloud()
valid_hashed_pwds = [
"root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/"
"Dlew1Va",
@@ -283,88 +291,405 @@ class TestSetPasswordsHandle(CiTestCase):
"SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1",
]
cfg = {"chpasswd": {"list": valid_hashed_pwds}}
- with mock.patch.object(setpass, "chpasswd") as chpasswd:
- setpass.handle(
- "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
- )
- self.assertIn(
- "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue()
- )
- self.assertIn(
- "DEBUG: Setting hashed password for ['root', 'ubuntu']",
- self.logs.getvalue(),
- )
- valid = "\n".join(valid_hashed_pwds) + "\n"
- called = chpasswd.call_args[0][1]
- self.assertEqual(valid, called)
+ with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ assert "Handling input for chpasswd as list." in caplog.text
+ assert "Setting hashed password for ['root', 'ubuntu']" in caplog.text
+
+ first_arg = chpasswd.call_args[0]
+ for i, val in enumerate(*first_arg):
+ assert valid_hashed_pwds[i] == ":".join(val)
+
+ @mock.patch(f"{MODPATH}subp.subp")
+ def test_handle_on_chpasswd_users_parses_common_hashes(
+ self, _m_subp, caplog
+ ):
+ """handle parses command password hashes."""
+ cloud = get_cloud()
+ valid_hashed_pwds = [
+ {
+ "name": "root",
+ "password": "$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/Dlew1Va", # noqa: E501
+ },
+ {
+ "name": "ubuntu",
+ "password": "$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1", # noqa: E501
+ },
+ ]
+ cfg = {"chpasswd": {"users": valid_hashed_pwds}}
+ with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ assert "Handling input for chpasswd as list." not in caplog.text
+ assert "Setting hashed password for ['root', 'ubuntu']" in caplog.text
+ first_arg = chpasswd.call_args[0]
+ for i, (name, password) in enumerate(*first_arg):
+ assert valid_hashed_pwds[i]["name"] == name
+ assert valid_hashed_pwds[i]["password"] == password
- @mock.patch(MODPATH + "util.is_BSD", return_value=True)
- @mock.patch(MODPATH + "subp.subp")
+ @pytest.mark.parametrize(
+ "user_cfg",
+ [
+ {
+ "list": [
+ "ubuntu:passw0rd",
+ "sadegh:$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/",
+ ]
+ },
+ {
+ "users": [
+ {
+ "name": "ubuntu",
+ "password": "passw0rd",
+ "type": "text",
+ },
+ {
+ "name": "sadegh",
+ "password": "$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/", # noqa: E501
+ },
+ ]
+ },
+ ],
+ )
def test_bsd_calls_custom_pw_cmds_to_set_and_expire_passwords(
- self, m_subp, m_is_bsd
+ self, user_cfg, mocker
):
"""BSD don't use chpasswd"""
+ mocker.patch(f"{MODPATH}util.is_BSD", return_value=True)
+ m_subp = mocker.patch(f"{MODPATH}subp.subp")
cloud = get_cloud(distro="freebsd")
- valid_pwds = ["ubuntu:passw0rd"]
- cfg = {"chpasswd": {"list": valid_pwds}}
+ cfg = {"chpasswd": user_cfg}
with mock.patch.object(
cloud.distro, "uses_systemd", return_value=False
):
- setpass.handle(
- "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
- )
- self.assertEqual(
- [
- mock.call(
- ["pw", "usermod", "ubuntu", "-h", "0"],
- data="passw0rd",
- logstring="chpasswd for ubuntu",
- ),
- mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]),
- mock.call(["service", "sshd", "status"], capture=True),
- ],
- m_subp.call_args_list,
- )
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ assert [
+ mock.call(
+ ["pw", "usermod", "ubuntu", "-h", "0"],
+ data="passw0rd",
+ logstring="chpasswd for ubuntu",
+ ),
+ mock.call(
+ ["pw", "usermod", "sadegh", "-H", "0"],
+ data="$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/",
+ logstring="chpasswd for sadegh",
+ ),
+ mock.call(["pw", "usermod", "ubuntu", "-p", "01-Jan-1970"]),
+ mock.call(["pw", "usermod", "sadegh", "-p", "01-Jan-1970"]),
+ mock.call(["service", "sshd", "status"], capture=True),
+ ] == m_subp.call_args_list
- @mock.patch(MODPATH + "util.multi_log")
- @mock.patch(MODPATH + "subp.subp")
- def test_handle_on_chpasswd_list_creates_random_passwords(
- self, m_subp, m_multi_log
- ):
+ @pytest.mark.parametrize(
+ "user_cfg",
+ [
+ {"expire": "false", "list": ["root:R", "ubuntu:RANDOM"]},
+ {
+ "expire": "false",
+ "users": [
+ {
+ "name": "root",
+ "type": "RANDOM",
+ },
+ {
+ "name": "ubuntu",
+ "type": "RANDOM",
+ },
+ ],
+ },
+ ],
+ )
+ def test_random_passwords(self, user_cfg, mocker, caplog):
"""handle parses command set random passwords."""
- cloud = self.tmp_cloud(distro="ubuntu")
- valid_random_pwds = ["root:R", "ubuntu:RANDOM"]
- cfg = {"chpasswd": {"expire": "false", "list": valid_random_pwds}}
- with mock.patch.object(setpass, "chpasswd") as chpasswd:
- setpass.handle(
- "IGNORED", cfg=cfg, cloud=cloud, log=self.logger, args=[]
- )
- self.assertIn(
- "DEBUG: Handling input for chpasswd as list.", self.logs.getvalue()
- )
- self.assertEqual(1, chpasswd.call_count)
- passwords, _ = chpasswd.call_args
- user_pass = {
- user: password
- for user, password in (
- line.split(":") for line in passwords[1].splitlines()
- )
- }
+ m_multi_log = mocker.patch(f"{MODPATH}util.multi_log")
+ mocker.patch(f"{MODPATH}subp.subp")
+
+ cloud = get_cloud()
+ cfg = {"chpasswd": user_cfg}
+
+ with mock.patch.object(setpass.Distro, "chpasswd") as chpasswd:
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+ dbg_text = "Handling input for chpasswd as list."
+ if "list" in cfg["chpasswd"]:
+ assert dbg_text in caplog.text
+ else:
+ assert dbg_text not in caplog.text
+ assert 1 == chpasswd.call_count
+ user_pass = dict(*chpasswd.call_args[0])
- self.assertEqual(1, m_multi_log.call_count)
- self.assertEqual(
- mock.call(mock.ANY, stderr=False, fallback_to_stdout=False),
- m_multi_log.call_args,
+ assert 1 == m_multi_log.call_count
+ assert (
+ mock.call(mock.ANY, stderr=False, fallback_to_stdout=False)
+ == m_multi_log.call_args
)
- self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys()))
+ assert {"root", "ubuntu"} == set(user_pass.keys())
written_lines = m_multi_log.call_args[0][0].splitlines()
for password in user_pass.values():
for line in written_lines:
if password in line:
break
else:
- self.fail("Password not emitted to console")
+ pytest.fail("Password not emitted to console")
+
+ @pytest.mark.parametrize(
+ "list_def, users_def",
+ [
+ # demonstrate that new addition matches current behavior
+ (
+ {
+ "chpasswd": {
+ "list": [
+ "root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqY"
+ "pUW.BrPx/Dlew1Va",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoak"
+ "MMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXaz"
+ "GGx3oo1",
+ "dog:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC"
+ "7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx"
+ "3oo1",
+ "Till:RANDOM",
+ ]
+ }
+ },
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "root",
+ "password": "$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y"
+ "5WojbXWqnqYpUW.BrPx/Dlew1Va",
+ },
+ {
+ "name": "ubuntu",
+ "password": "$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9"
+ "acWCVEoakMMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSw"
+ "OlbOQSW/HpXazGGx3oo1",
+ },
+ {
+ "name": "dog",
+ "type": "hash",
+ "password": "$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9"
+ "acWCVEoakMMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSw"
+ "OlbOQSW/HpXazGGx3oo1",
+ },
+ {
+ "name": "Till",
+ "type": "RANDOM",
+ },
+ ]
+ }
+ },
+ ),
+ # Duplicate user: demonstrate no change in current duplicate
+ # behavior
+ (
+ {
+ "chpasswd": {
+ "list": [
+ "root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqY"
+ "pUW.BrPx/Dlew1Va",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoak"
+ "MMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXaz"
+ "GGx3oo1",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoak"
+ "MMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXaz"
+ "GGx3oo1",
+ ]
+ }
+ },
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "root",
+ "password": "$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y"
+ "5WojbXWqnqYpUW.BrPx/Dlew1Va",
+ },
+ {
+ "name": "ubuntu",
+ "password": "$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9"
+ "acWCVEoakMMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSw"
+ "OlbOQSW/HpXazGGx3oo1",
+ },
+ {
+ "name": "ubuntu",
+ "password": "$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9"
+ "acWCVEoakMMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSw"
+ "OlbOQSW/HpXazGGx3oo1",
+ },
+ ]
+ }
+ },
+ ),
+ # Duplicate user: demonstrate duplicate across users/list doesn't
+ # change
+ (
+ {
+ "chpasswd": {
+ "list": [
+ "root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqY"
+ "pUW.BrPx/Dlew1Va",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoak"
+ "MMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXaz"
+ "GGx3oo1",
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoak"
+ "MMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXaz"
+ "GGx3oo1",
+ ]
+ }
+ },
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "root",
+ "password": "$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y"
+ "5WojbXWqnqYpUW.BrPx/Dlew1Va",
+ },
+ {
+ "name": "ubuntu",
+ "password": "$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9"
+ "acWCVEoakMMC7dR5"
+ "2qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx"
+ "3oo1",
+ },
+ ],
+ "list": [
+ "ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoak"
+ "MMC7dR52qSDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXaz"
+ "GGx3oo1",
+ ],
+ }
+ },
+ ),
+ ],
+ )
+ def test_chpasswd_parity(self, list_def, users_def):
+ """Assert that two different configs cause identical calls"""
+
+ cloud = get_cloud()
+
+ def_1 = get_chpasswd_calls(list_def, cloud, LOG)
+ def_2 = get_chpasswd_calls(users_def, cloud, LOG)
+ assert def_1 == def_2
+ assert def_1[-1] == mock.call(
+ ["systemctl", "status", "ssh"], capture=True
+ )
+ for val in def_1:
+ assert val
+
+
+expire_cases = [
+ {
+ "chpasswd": {
+ "expire": True,
+ "list": [
+ "user1:password",
+ "user2:R",
+ "user3:$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/",
+ ],
+ }
+ },
+ {
+ "chpasswd": {
+ "expire": True,
+ "users": [
+ {
+ "name": "user1",
+ "password": "password",
+ "type": "text",
+ },
+ {
+ "name": "user2",
+ "type": "RANDOM",
+ },
+ {
+ "name": "user3",
+ "password": "$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/", # noqa: E501
+ },
+ ],
+ }
+ },
+ {
+ "chpasswd": {
+ "expire": False,
+ "list": [
+ "user1:password",
+ "user2:R",
+ "user3:$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/",
+ ],
+ }
+ },
+ {
+ "chpasswd": {
+ "expire": False,
+ "users": [
+ {
+ "name": "user1",
+ "password": "password",
+ "type": "text",
+ },
+ {
+ "name": "user2",
+ "type": "RANDOM",
+ },
+ {
+ "name": "user3",
+ "password": "$6$cTpht$Z2pSYxleRWK8IrsynFzHcrnPlpUhA7N9AM/", # noqa: E501
+ },
+ ],
+ }
+ },
+]
+
+
+class TestExpire:
+ @pytest.mark.parametrize("cfg", expire_cases)
+ def test_expire(self, cfg, mocker, caplog):
+ cloud = get_cloud()
+ mocker.patch(f"{MODPATH}subp.subp")
+ mocker.patch.object(cloud.distro, "chpasswd")
+ m_expire = mocker.patch.object(cloud.distro, "expire_passwd")
+
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+
+ if bool(cfg["chpasswd"]["expire"]):
+ assert m_expire.call_args_list == [
+ mock.call("user1"),
+ mock.call("user2"),
+ mock.call("user3"),
+ ]
+ assert (
+ "Expired passwords for: ['user1', 'user2', 'user3'] users"
+ in caplog.text
+ )
+ else:
+ assert m_expire.call_args_list == []
+ assert "Expired passwords" not in caplog.text
+
+ @pytest.mark.parametrize("cfg", expire_cases)
+ def test_expire_old_behavior(self, cfg, mocker, caplog):
+ # Previously expire didn't apply to hashed passwords.
+ # Ensure we can preserve that case on older releases
+ features.EXPIRE_APPLIES_TO_HASHED_USERS = False
+ cloud = get_cloud()
+ mocker.patch(f"{MODPATH}subp.subp")
+ mocker.patch.object(cloud.distro, "chpasswd")
+ m_expire = mocker.patch.object(cloud.distro, "expire_passwd")
+
+ setpass.handle("IGNORED", cfg=cfg, cloud=cloud, log=LOG, args=[])
+
+ if bool(cfg["chpasswd"]["expire"]):
+ assert m_expire.call_args_list == [
+ mock.call("user1"),
+ mock.call("user2"),
+ ]
+ assert (
+ "Expired passwords for: ['user1', 'user2'] users"
+ in caplog.text
+ )
+ else:
+ assert m_expire.call_args_list == []
+ assert "Expired passwords" not in caplog.text
class TestSetPasswordsSchema:
@@ -375,9 +700,99 @@ class TestSetPasswordsSchema:
({"ssh_pwauth": True}, None),
({"ssh_pwauth": "yes"}, None),
({"ssh_pwauth": "unchanged"}, None),
- ({"chpasswd": {"list": "blah"}}, None),
+ ({"chpasswd": {"list": "blah"}}, "DEPRECATED"),
+ # Valid combinations
+ (
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "what-if-1",
+ "type": "text",
+ "password": "correct-horse-battery-staple",
+ },
+ {
+ "name": "what-if-2",
+ "type": "hash",
+ "password": "no-magic-parsing-done-here",
+ },
+ {
+ "name": "what-if-3",
+ "password": "type-is-optional-default-"
+ "value-is-hash",
+ },
+ {
+ "name": "what-if-4",
+ "type": "RANDOM",
+ },
+ ]
+ }
+ },
+ None,
+ ),
+ (
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "what-if-1",
+ "type": "plaintext",
+ "password": "type-has-two-legal-values: "
+ "{'hash', 'text'}",
+ }
+ ]
+ }
+ },
+ "is not valid under any of the given schemas",
+ ),
+ (
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "what-if-1",
+ "type": "RANDOM",
+ "password": "but you want random?",
+ }
+ ]
+ }
+ },
+ "is not valid under any of the given schemas",
+ ),
+ (
+ {"chpasswd": {"users": [{"password": "."}]}},
+ "is not valid under any of the given schemas",
+ ),
+ # when type != RANDOM, password is a required key
+ (
+ {
+ "chpasswd": {
+ "users": [{"name": "what-if-1", "type": "hash"}]
+ }
+ },
+ "is not valid under any of the given schemas",
+ ),
+ pytest.param(
+ {
+ "chpasswd": {
+ "users": [
+ {
+ "name": "sonata",
+ "password": "dit",
+ "dat": "dot",
+ }
+ ]
+ }
+ },
+ "is not valid under any of the given schemas",
+ id="dat_is_an_additional_property",
+ ),
+ (
+ {"chpasswd": {"users": [{"name": "."}]}},
+ "is not valid under any of the given schemas",
+ ),
# Test regex
- ({"chpasswd": {"list": ["user:pass"]}}, None),
+ ({"chpasswd": {"list": ["user:pass"]}}, "DEPRECATED"),
# Test valid
({"password": "pass"}, None),
# Test invalid values
diff --git a/tests/unittests/config/test_cc_ubuntu_advantage.py b/tests/unittests/config/test_cc_ubuntu_advantage.py
index 0c5544e1..657bfe51 100644
--- a/tests/unittests/config/test_cc_ubuntu_advantage.py
+++ b/tests/unittests/config/test_cc_ubuntu_advantage.py
@@ -1,4 +1,5 @@
# This file is part of cloud-init. See LICENSE file for license information.
+import logging
import re
import pytest
@@ -8,59 +9,137 @@ from cloudinit.config.cc_ubuntu_advantage import (
configure_ua,
handle,
maybe_install_ua_tools,
+ supplemental_schema_validation,
)
from cloudinit.config.schema import (
SchemaValidationError,
get_schema,
validate_cloudconfig_schema,
)
-from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from tests.unittests.helpers import does_not_raise, mock, skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
# Module path used in mocks
MPATH = "cloudinit.config.cc_ubuntu_advantage"
-class FakeCloud(object):
- def __init__(self, distro):
- self.distro = distro
-
-
-class TestConfigureUA(CiTestCase):
-
- with_logs = True
- allowed_subp = [CiTestCase.SUBP_SHELL_TRUE]
-
- def setUp(self):
- super(TestConfigureUA, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch("%s.subp.subp" % MPATH)
+@mock.patch(f"{MPATH}.subp.subp")
+class TestConfigureUA:
def test_configure_ua_attach_error(self, m_subp):
"""Errors from ua attach command are raised."""
m_subp.side_effect = subp.ProcessExecutionError(
"Invalid token SomeToken"
)
- with self.assertRaises(RuntimeError) as context_manager:
- configure_ua(token="SomeToken")
- self.assertEqual(
+ match = (
"Failure attaching Ubuntu Advantage:\nUnexpected error while"
" running command.\nCommand: -\nExit code: -\nReason: -\n"
- "Stdout: Invalid token SomeToken\nStderr: -",
- str(context_manager.exception),
+ "Stdout: Invalid token SomeToken\nStderr: -"
)
+ with pytest.raises(RuntimeError, match=match):
+ configure_ua(token="SomeToken")
- @mock.patch("%s.subp.subp" % MPATH)
- def test_configure_ua_attach_with_token(self, m_subp):
- """When token is provided, attach the machine to ua using the token."""
- configure_ua(token="SomeToken")
- m_subp.assert_called_once_with(["ua", "attach", "SomeToken"])
- self.assertEqual(
- "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
- self.logs.getvalue(),
- )
+ @pytest.mark.parametrize(
+ "kwargs, call_args_list, log_record_tuples",
+ [
+ # When token is provided, attach the machine to ua using the token.
+ pytest.param(
+ {"token": "SomeToken"},
+ [mock.call(["ua", "attach", "SomeToken"])],
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ )
+ ],
+ id="with_token",
+ ),
+ # When services is an empty list, do not auto-enable attach.
+ pytest.param(
+ {"token": "SomeToken", "enable": []},
+ [mock.call(["ua", "attach", "SomeToken"])],
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ )
+ ],
+ id="with_empty_services",
+ ),
+ # When services a list, only enable specific services.
+ pytest.param(
+ {"token": "SomeToken", "enable": ["fips"]},
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ )
+ ],
+ id="with_specific_services",
+ ),
+ # When services a string, treat as singleton list and warn
+ pytest.param(
+ {"token": "SomeToken", "enable": "fips"},
+ [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(
+ ["ua", "enable", "--assume-yes", "fips"], capture=True
+ ),
+ ],
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ ),
+ (
+ MPATH,
+ logging.WARNING,
+ "ubuntu_advantage: enable should be a list, not a "
+ "string; treating as a single enable",
+ ),
+ ],
+ id="with_string_services",
+ ),
+ # When services not string or list, warn but still attach
+ pytest.param(
+ {"token": "SomeToken", "enable": {"deffo": "wont work"}},
+ [mock.call(["ua", "attach", "SomeToken"])],
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ ),
+ (
+ MPATH,
+ logging.WARNING,
+ "ubuntu_advantage: enable should be a list, not a"
+ " dict; skipping enabling services",
+ ),
+ ],
+ id="with_weird_services",
+ ),
+ ],
+ )
+ @mock.patch(f"{MPATH}.maybe_install_ua_tools", mock.MagicMock())
+ def test_configure_ua_attach(
+ self, m_subp, kwargs, call_args_list, log_record_tuples, caplog
+ ):
+ configure_ua(**kwargs)
+ assert call_args_list == m_subp.call_args_list
+ for record_tuple in log_record_tuples:
+ assert record_tuple in caplog.record_tuples
- @mock.patch("%s.subp.subp" % MPATH)
- def test_configure_ua_attach_on_service_error(self, m_subp):
+ def test_configure_ua_attach_on_service_error(self, m_subp, caplog):
"""all services should be enabled and then any failures raised"""
def fake_subp(cmd, capture=None):
@@ -75,102 +154,86 @@ class TestConfigureUA(CiTestCase):
m_subp.side_effect = fake_subp
- with self.assertRaises(RuntimeError) as context_manager:
+ with pytest.raises(
+ RuntimeError,
+ match=re.escape(
+ 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"'
+ ),
+ ):
configure_ua(token="SomeToken", enable=["esm", "cc", "fips"])
- self.assertEqual(
- m_subp.call_args_list,
- [
- mock.call(["ua", "attach", "SomeToken"]),
- mock.call(
- ["ua", "enable", "--assume-yes", "esm"], capture=True
- ),
- mock.call(
- ["ua", "enable", "--assume-yes", "cc"], capture=True
- ),
- mock.call(
- ["ua", "enable", "--assume-yes", "fips"], capture=True
- ),
- ],
- )
- self.assertIn(
- 'WARNING: Failure enabling "esm":\nUnexpected error'
+ assert m_subp.call_args_list == [
+ mock.call(["ua", "attach", "SomeToken"]),
+ mock.call(["ua", "enable", "--assume-yes", "esm"], capture=True),
+ mock.call(["ua", "enable", "--assume-yes", "cc"], capture=True),
+ mock.call(["ua", "enable", "--assume-yes", "fips"], capture=True),
+ ]
+ assert (
+ MPATH,
+ logging.WARNING,
+ 'Failure enabling "esm":\nUnexpected error'
" while running command.\nCommand: -\nExit code: -\nReason: -\n"
- "Stdout: Invalid ESM credentials\nStderr: -\n",
- self.logs.getvalue(),
- )
- self.assertIn(
- 'WARNING: Failure enabling "cc":\nUnexpected error'
+ "Stdout: Invalid ESM credentials\nStderr: -",
+ ) in caplog.record_tuples
+ assert (
+ MPATH,
+ logging.WARNING,
+ 'Failure enabling "cc":\nUnexpected error'
" while running command.\nCommand: -\nExit code: -\nReason: -\n"
- "Stdout: Invalid CC credentials\nStderr: -\n",
- self.logs.getvalue(),
- )
- self.assertEqual(
- 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"',
- str(context_manager.exception),
+ "Stdout: Invalid CC credentials\nStderr: -",
+ ) in caplog.record_tuples
+ assert 'Failure enabling "fips"' not in caplog.text
+
+ def test_configure_ua_config_with_weird_params(self, m_subp, caplog):
+ """When configs not string or list, warn but still attach"""
+ configure_ua(
+ token="SomeToken", config=["http_proxy=http://some-proxy.net:3128"]
)
-
- @mock.patch("%s.subp.subp" % MPATH)
- def test_configure_ua_attach_with_empty_services(self, m_subp):
- """When services is an empty list, do not auto-enable attach."""
- configure_ua(token="SomeToken", enable=[])
- m_subp.assert_called_once_with(["ua", "attach", "SomeToken"])
- self.assertEqual(
- "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("%s.subp.subp" % MPATH)
- def test_configure_ua_attach_with_specific_services(self, m_subp):
- """When services a list, only enable specific services."""
- configure_ua(token="SomeToken", enable=["fips"])
- self.assertEqual(
- m_subp.call_args_list,
- [
- mock.call(["ua", "attach", "SomeToken"]),
- mock.call(
- ["ua", "enable", "--assume-yes", "fips"], capture=True
- ),
- ],
- )
- self.assertEqual(
- "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
- self.logs.getvalue(),
- )
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
- @mock.patch("%s.subp.subp" % MPATH)
- def test_configure_ua_attach_with_string_services(self, m_subp):
- """When services a string, treat as singleton list and warn"""
- configure_ua(token="SomeToken", enable="fips")
- self.assertEqual(
- m_subp.call_args_list,
- [
- mock.call(["ua", "attach", "SomeToken"]),
- mock.call(
- ["ua", "enable", "--assume-yes", "fips"], capture=True
- ),
- ],
- )
- self.assertEqual(
- "WARNING: ubuntu_advantage: enable should be a list, not a"
- " string; treating as a single enable\n"
- "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
- self.logs.getvalue(),
+ assert [
+ mock.call(["ua", "attach", "SomeToken"])
+ ] == m_subp.call_args_list
+ assert (
+ MPATH,
+ logging.WARNING,
+ "ubuntu_advantage: config should be a dict, not a"
+ " list; skipping enabling config parameters",
+ ) == caplog.record_tuples[-2]
+ assert (
+ MPATH,
+ logging.DEBUG,
+ "Attaching to Ubuntu Advantage. ua attach SomeToken",
+ ) == caplog.record_tuples[-1]
+
+ def test_configure_ua_config_error_invalid_url(self, m_subp, caplog):
+ """Errors from ua config command are raised."""
+ m_subp.side_effect = subp.ProcessExecutionError(
+ 'Failure enabling "http_proxy"'
)
+ with pytest.raises(
+ RuntimeError,
+ match=re.escape(
+ 'Failure enabling Ubuntu Advantage config(s): "http_proxy"'
+ ),
+ ):
+ configure_ua(
+ token="SomeToken", config={"http_proxy": "not-a-valid-url"}
+ )
- @mock.patch("%s.subp.subp" % MPATH)
- def test_configure_ua_attach_with_weird_services(self, m_subp):
- """When services not string or list, warn but still attach"""
- configure_ua(token="SomeToken", enable={"deffo": "wont work"})
- self.assertEqual(
- m_subp.call_args_list, [mock.call(["ua", "attach", "SomeToken"])]
- )
- self.assertEqual(
- "WARNING: ubuntu_advantage: enable should be a list, not a"
- " dict; skipping enabling services\n"
- "DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n",
- self.logs.getvalue(),
+ def test_configure_ua_config_error_non_string_values(self, m_subp):
+ """ValueError raised for any values expected as string type."""
+ cfg = {
+ "global_apt_http_proxy": "noscheme",
+ "http_proxy": ["no-proxy"],
+ "https_proxy": 1,
+ }
+ match = re.escape(
+ "Expected URL scheme http/https for"
+ " ua:config:global_apt_http_proxy. Found: noscheme\n"
+ "Expected a URL for ua:config:http_proxy. Found: ['no-proxy']\n"
+ "Expected a URL for ua:config:https_proxy. Found: 1"
)
+ with pytest.raises(ValueError, match=match):
+ supplemental_schema_validation(cfg)
+ assert 0 == m_subp.call_count
class TestUbuntuAdvantageSchema:
@@ -197,160 +260,223 @@ class TestUbuntuAdvantageSchema:
validate_cloudconfig_schema(config, get_schema(), strict=True)
-class TestHandle(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestHandle, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- def test_handle_no_config(self, m_maybe_install_ua_tools):
- """When no ua-related configuration is provided, nothing happens."""
- cfg = {}
- handle("ua-test", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertIn(
- "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'"
- " configuration found",
- self.logs.getvalue(),
- )
- self.assertEqual(m_maybe_install_ua_tools.call_count, 0)
+class TestHandle:
- @mock.patch("%s.configure_ua" % MPATH)
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- def test_handle_tries_to_install_ubuntu_advantage_tools(
- self, m_install, m_cfg
- ):
- """If ubuntu_advantage is provided, try installing ua-tools package."""
- cfg = {"ubuntu_advantage": {"token": "valid"}}
- mycloud = FakeCloud(None)
- handle("nomatter", cfg=cfg, cloud=mycloud, log=self.logger, args=None)
- m_install.assert_called_once_with(mycloud)
+ cloud = get_cloud()
- @mock.patch("%s.configure_ua" % MPATH)
- @mock.patch("%s.maybe_install_ua_tools" % MPATH)
- def test_handle_passes_credentials_and_services_to_configure_ua(
- self, m_install, m_configure_ua
+ @pytest.mark.parametrize(
+ [
+ "cfg",
+ "cloud",
+ "log_record_tuples",
+ "maybe_install_call_args_list",
+ "configure_ua_call_args_list",
+ ],
+ [
+ # When no ua-related configuration is provided, nothing happens.
+ pytest.param(
+ {},
+ None,
+ [
+ (
+ MPATH,
+ logging.DEBUG,
+ "Skipping module named nomatter, no 'ubuntu_advantage'"
+ " configuration found",
+ )
+ ],
+ [],
+ [],
+ id="no_config",
+ ),
+ # If ubuntu_advantage is provided, try installing ua-tools package.
+ pytest.param(
+ {"ubuntu_advantage": {"token": "valid"}},
+ cloud,
+ [],
+ [mock.call(cloud)],
+ None,
+ id="tries_to_install_ubuntu_advantage_tools",
+ ),
+ # All ubuntu_advantage config keys are passed to configure_ua.
+ pytest.param(
+ {"ubuntu_advantage": {"token": "token", "enable": ["esm"]}},
+ cloud,
+ [],
+ [mock.call(cloud)],
+ [mock.call(token="token", enable=["esm"], config=None)],
+ id="passes_credentials_and_services_to_configure_ua",
+ ),
+ # Warning when ubuntu-advantage key is present with new config
+ pytest.param(
+ {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}},
+ None,
+ [
+ (
+ MPATH,
+ logging.WARNING,
+ 'Deprecated configuration key "ubuntu-advantage"'
+ " provided. Expected underscore delimited "
+ '"ubuntu_advantage"; will attempt to continue.',
+ )
+ ],
+ None,
+ [mock.call(token="token", enable=["esm"], config=None)],
+ id="warns_on_deprecated_ubuntu_advantage_key_w_config",
+ ),
+ # ubuntu_advantage should be preferred over ubuntu-advantage
+ pytest.param(
+ {
+ "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]},
+ "ubuntu_advantage": {"token": "token", "enable": ["esm"]},
+ },
+ None,
+ [
+ (
+ MPATH,
+ logging.WARNING,
+ 'Deprecated configuration key "ubuntu-advantage"'
+ " provided. Expected underscore delimited "
+ '"ubuntu_advantage"; will attempt to continue.',
+ )
+ ],
+ None,
+ [mock.call(token="token", enable=["esm"], config=None)],
+ id="prefers_new_style_config",
+ ),
+ ],
+ )
+ @mock.patch(f"{MPATH}.configure_ua")
+ @mock.patch(f"{MPATH}.maybe_install_ua_tools")
+ def test_handle(
+ self,
+ m_maybe_install_ua_tools,
+ m_configure_ua,
+ cfg,
+ cloud,
+ log_record_tuples,
+ maybe_install_call_args_list,
+ configure_ua_call_args_list,
+ caplog,
):
- """All ubuntu_advantage config keys are passed to configure_ua."""
- cfg = {"ubuntu_advantage": {"token": "token", "enable": ["esm"]}}
- handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
- m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+ handle("nomatter", cfg=cfg, cloud=cloud, log=None, args=None)
+ for record_tuple in log_record_tuples:
+ assert record_tuple in caplog.record_tuples
+ if maybe_install_call_args_list is not None:
+ assert (
+ maybe_install_call_args_list
+ == m_maybe_install_ua_tools.call_args_list
+ )
+ if configure_ua_call_args_list is not None:
+ assert configure_ua_call_args_list == m_configure_ua.call_args_list
- @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
+ @pytest.mark.parametrize(
+ "cfg, handle_kwargs, match",
+ [
+ pytest.param(
+ {"ubuntu-advantage": {"commands": "nogo"}},
+ dict(cloud=None, args=None),
+ (
+ 'Deprecated configuration "ubuntu-advantage: commands" '
+ 'provided. Expected "token"'
+ ),
+ id="key_dashed",
+ ),
+ pytest.param(
+ {"ubuntu_advantage": {"commands": "nogo"}},
+ dict(cloud=None, args=None),
+ (
+ 'Deprecated configuration "ubuntu-advantage: commands" '
+ 'provided. Expected "token"'
+ ),
+ id="key_underscore",
+ ),
+ ],
+ )
@mock.patch("%s.configure_ua" % MPATH)
- def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config(
- self, m_configure_ua
+ def test_handle_error_on_deprecated_commands_key_dashed(
+ self, m_configure_ua, cfg, handle_kwargs, match
):
- """Warning when ubuntu-advantage key is present with new config"""
- cfg = {"ubuntu-advantage": {"token": "token", "enable": ["esm"]}}
- handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'WARNING: Deprecated configuration key "ubuntu-advantage"'
- ' provided. Expected underscore delimited "ubuntu_advantage";'
- " will attempt to continue.",
- self.logs.getvalue().splitlines()[0],
- )
- m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
-
- def test_handle_error_on_deprecated_commands_key_dashed(self):
- """Error when commands is present in ubuntu-advantage key."""
- cfg = {"ubuntu-advantage": {"commands": "nogo"}}
- with self.assertRaises(RuntimeError) as context_manager:
- handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"',
- str(context_manager.exception),
- )
+ with pytest.raises(RuntimeError, match=match):
+ handle("nomatter", cfg=cfg, log=mock.Mock(), **handle_kwargs)
+ assert 0 == m_configure_ua.call_count
- def test_handle_error_on_deprecated_commands_key_underscored(self):
- """Error when commands is present in ubuntu_advantage key."""
- cfg = {"ubuntu_advantage": {"commands": "nogo"}}
- with self.assertRaises(RuntimeError) as context_manager:
- handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'Deprecated configuration "ubuntu-advantage: commands" provided.'
- ' Expected "token"',
- str(context_manager.exception),
- )
-
- @mock.patch("%s.maybe_install_ua_tools" % MPATH, mock.MagicMock())
- @mock.patch("%s.configure_ua" % MPATH)
- def test_handle_prefers_new_style_config(self, m_configure_ua):
- """ubuntu_advantage should be preferred over ubuntu-advantage"""
- cfg = {
- "ubuntu-advantage": {"token": "nope", "enable": ["wrong"]},
- "ubuntu_advantage": {"token": "token", "enable": ["esm"]},
- }
- handle("nomatter", cfg=cfg, cloud=None, log=self.logger, args=None)
- self.assertEqual(
- 'WARNING: Deprecated configuration key "ubuntu-advantage"'
- ' provided. Expected underscore delimited "ubuntu_advantage";'
- " will attempt to continue.",
- self.logs.getvalue().splitlines()[0],
- )
- m_configure_ua.assert_called_once_with(token="token", enable=["esm"])
+@mock.patch(f"{MPATH}.subp.which")
+class TestMaybeInstallUATools:
+ @pytest.mark.parametrize(
+ [
+ "which_return",
+ "update_side_effect",
+ "install_side_effect",
+ "expectation",
+ "log_msg",
+ ],
+ [
+ # Do nothing if ubuntu-advantage-tools already exists.
+ pytest.param(
+ "/usr/bin/ua", # already installed
+ RuntimeError("Some apt error"),
+ None,
+ does_not_raise(), # No RuntimeError
+ None,
+ id="noop_when_ua_tools_present",
+ ),
+ # logs and raises apt update errors
+ pytest.param(
+ None,
+ RuntimeError("Some apt error"),
+ None,
+ pytest.raises(RuntimeError, match="Some apt error"),
+ "Package update failed\nTraceback",
+ id="raises_update_errors",
+ ),
+ # logs and raises package install errors
+ pytest.param(
+ None,
+ None,
+ RuntimeError("Some install error"),
+ pytest.raises(RuntimeError, match="Some install error"),
+ "Failed to install ubuntu-advantage-tools\n",
+ id="raises_install_errors",
+ ),
+ ],
+ )
+ def test_maybe_install_ua_tools(
+ self,
+ m_which,
+ which_return,
+ update_side_effect,
+ install_side_effect,
+ expectation,
+ log_msg,
+ caplog,
+ ):
+ m_which.return_value = which_return
+ cloud = mock.MagicMock()
+ if install_side_effect is None:
+ cloud.distro.update_package_sources.side_effect = (
+ update_side_effect
+ )
+ else:
+ cloud.distro.update_package_sources.return_value = None
+ cloud.distro.install_packages.side_effect = install_side_effect
+ with expectation:
+ maybe_install_ua_tools(cloud=cloud)
+ if log_msg is not None:
+ assert log_msg in caplog.text
-class TestMaybeInstallUATools(CiTestCase):
-
- with_logs = True
-
- def setUp(self):
- super(TestMaybeInstallUATools, self).setUp()
- self.tmp = self.tmp_dir()
-
- @mock.patch("%s.subp.which" % MPATH)
- def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which):
- """Do nothing if ubuntu-advantage-tools already exists."""
- m_which.return_value = "/usr/bin/ua" # already installed
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- "Some apt error"
- )
- maybe_install_ua_tools(cloud=FakeCloud(distro)) # No RuntimeError
-
- @mock.patch("%s.subp.which" % MPATH)
- def test_maybe_install_ua_tools_raises_update_errors(self, m_which):
- """maybe_install_ua_tools logs and raises apt update errors."""
- m_which.return_value = None
- distro = mock.MagicMock()
- distro.update_package_sources.side_effect = RuntimeError(
- "Some apt error"
- )
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- self.assertEqual("Some apt error", str(context_manager.exception))
- self.assertIn("Package update failed\nTraceback", self.logs.getvalue())
-
- @mock.patch("%s.subp.which" % MPATH)
- def test_maybe_install_ua_raises_install_errors(self, m_which):
- """maybe_install_ua_tools logs and raises package install errors."""
- m_which.return_value = None
- distro = mock.MagicMock()
- distro.update_package_sources.return_value = None
- distro.install_packages.side_effect = RuntimeError(
- "Some install error"
- )
- with self.assertRaises(RuntimeError) as context_manager:
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- self.assertEqual("Some install error", str(context_manager.exception))
- self.assertIn(
- "Failed to install ubuntu-advantage-tools\n", self.logs.getvalue()
- )
-
- @mock.patch("%s.subp.which" % MPATH)
def test_maybe_install_ua_tools_happy_path(self, m_which):
"""maybe_install_ua_tools installs ubuntu-advantage-tools."""
m_which.return_value = None
- distro = mock.MagicMock() # No errors raised
- maybe_install_ua_tools(cloud=FakeCloud(distro))
- distro.update_package_sources.assert_called_once_with()
- distro.install_packages.assert_called_once_with(
- ["ubuntu-advantage-tools"]
- )
+ cloud = mock.MagicMock() # No errors raised
+ maybe_install_ua_tools(cloud=cloud)
+ assert [
+ mock.call()
+ ] == cloud.distro.update_package_sources.call_args_list
+ assert [
+ mock.call(["ubuntu-advantage-tools"])
+ ] == cloud.distro.install_packages.call_args_list
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_ubuntu_autoinstall.py b/tests/unittests/config/test_cc_ubuntu_autoinstall.py
new file mode 100644
index 00000000..87f44f82
--- /dev/null
+++ b/tests/unittests/config/test_cc_ubuntu_autoinstall.py
@@ -0,0 +1,141 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import logging
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_ubuntu_autoinstall
+from cloudinit.config.schema import (
+ SchemaValidationError,
+ get_schema,
+ validate_cloudconfig_schema,
+)
+from tests.unittests.helpers import skipUnlessJsonSchema
+from tests.unittests.util import get_cloud
+
+LOG = logging.getLogger(__name__)
+
+MODPATH = "cloudinit.config.cc_ubuntu_autoinstall."
+
+SAMPLE_SNAP_LIST_OUTPUT = """
+Name Version Rev Tracking ...
+core20 20220527 1518 latest/stable ...
+lxd git-69dc707 23315 latest/edge ...
+"""
+SAMPLE_SNAP_LIST_SUBIQUITY = (
+ SAMPLE_SNAP_LIST_OUTPUT
+ + """
+subiquity 22.06.01 23315 latest/stable ...
+"""
+)
+SAMPLE_SNAP_LIST_DESKTOP_INSTALLER = (
+ SAMPLE_SNAP_LIST_OUTPUT
+ + """
+ubuntu-desktop-installer 22.06.01 23315 latest/stable ...
+"""
+)
+
+
+class TestvalidateConfigSchema:
+ @pytest.mark.parametrize(
+ "src_cfg,error_msg",
+ [
+ pytest.param(
+ {"autoinstall": 1},
+ "autoinstall: Expected dict type but found: int",
+ id="err_non_dict",
+ ),
+ pytest.param(
+ {"autoinstall": {}},
+ "autoinstall: Missing required 'version' key",
+ id="err_require_version_key",
+ ),
+ pytest.param(
+ {"autoinstall": {"version": "v1"}},
+ "autoinstall.version: Expected int type but found: str",
+ id="err_version_non_int",
+ ),
+ ],
+ )
+ def test_runtime_validation_errors(self, src_cfg, error_msg):
+ """cloud-init raises errors at runtime on invalid autoinstall config"""
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ cc_ubuntu_autoinstall.validate_config_schema(src_cfg)
+
+
+@mock.patch(MODPATH + "subp")
+class TestHandleAutoinstall:
+ """Test cc_ubuntu_autoinstall handling of config."""
+
+ @pytest.mark.parametrize(
+ "cfg,snap_list,subp_calls,logs",
+ [
+ pytest.param(
+ {},
+ SAMPLE_SNAP_LIST_OUTPUT,
+ [],
+ ["Skipping module named name, no 'autoinstall' key"],
+ id="skip_no_cfg",
+ ),
+ pytest.param(
+ {"autoinstall": {"version": 1}},
+ SAMPLE_SNAP_LIST_OUTPUT,
+ [mock.call(["snap", "list"])],
+ [
+ "Skipping autoinstall module. Expected one of the Ubuntu"
+ " installer snap packages to be present: subiquity,"
+ " ubuntu-desktop-installer"
+ ],
+ id="valid_autoinstall_schema_checks_snaps",
+ ),
+ pytest.param(
+ {"autoinstall": {"version": 1}},
+ SAMPLE_SNAP_LIST_SUBIQUITY,
+ [mock.call(["snap", "list"])],
+ [
+ "Valid autoinstall schema. Config will be processed by"
+ " subiquity"
+ ],
+ id="valid_autoinstall_schema_sees_subiquity",
+ ),
+ pytest.param(
+ {"autoinstall": {"version": 1}},
+ SAMPLE_SNAP_LIST_DESKTOP_INSTALLER,
+ [mock.call(["snap", "list"])],
+ [
+ "Valid autoinstall schema. Config will be processed by"
+ " ubuntu-desktop-installer"
+ ],
+ id="valid_autoinstall_schema_sees_desktop_installer",
+ ),
+ ],
+ )
+ def test_handle_autoinstall_cfg(
+ self, subp, cfg, snap_list, subp_calls, logs, caplog
+ ):
+ subp.return_value = snap_list, ""
+ cloud = get_cloud(distro="ubuntu")
+ cc_ubuntu_autoinstall.handle("name", cfg, cloud, LOG, None)
+ assert subp_calls == subp.call_args_list
+ for log in logs:
+ assert log in caplog.text
+
+
+class TestAutoInstallSchema:
+ @pytest.mark.parametrize(
+ "config, error_msg",
+ (
+ (
+ {"autoinstall": {}},
+ "autoinstall: 'version' is a required property",
+ ),
+ ),
+ )
+ @skipUnlessJsonSchema()
+ def test_schema_validation(self, config, error_msg):
+ if error_msg is None:
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
+ else:
+ with pytest.raises(SchemaValidationError, match=error_msg):
+ validate_cloudconfig_schema(config, get_schema(), strict=True)
diff --git a/tests/unittests/config/test_cc_users_groups.py b/tests/unittests/config/test_cc_users_groups.py
index a33fd55f..bbb8bec4 100644
--- a/tests/unittests/config/test_cc_users_groups.py
+++ b/tests/unittests/config/test_cc_users_groups.py
@@ -298,45 +298,116 @@ class TestHandleUsersGroups(CiTestCase):
class TestUsersGroupsSchema:
@pytest.mark.parametrize(
- "config, error_msg",
+ "config, problem_msg, has_errors",
[
# Validate default settings not covered by examples
- ({"groups": ["anygrp"]}, None),
- ({"groups": "anygrp,anyothergroup"}, None), # DEPRECATED
+ ({"groups": ["anygrp"]}, None, False),
+ ({"groups": "anygrp,anyothergroup"}, None, False), # DEPRECATED
# Create anygrp with user1 as member
- ({"groups": [{"anygrp": "user1"}]}, None),
+ ({"groups": [{"anygrp": "user1"}]}, None, False),
# Create anygrp with user1 as member using object/string syntax
- ({"groups": {"anygrp": "user1"}}, None),
+ ({"groups": {"anygrp": "user1"}}, None, False),
# Create anygrp with user1 as member using object/list syntax
- ({"groups": {"anygrp": ["user1"]}}, None),
- ({"groups": [{"anygrp": ["user1", "user2"]}]}, None),
+ ({"groups": {"anygrp": ["user1"]}}, None, False),
+ ({"groups": [{"anygrp": ["user1", "user2"]}]}, None, False),
# Make default username "olddefault": DEPRECATED
- ({"user": "olddefault"}, None),
+ ({"user": "olddefault"}, None, False),
# Create multiple users, and include default user. DEPRECATED
- ({"users": "oldstyle,default"}, None),
- ({"users": ["default"]}, None),
- ({"users": ["default", ["aaa", "bbb"]]}, None),
- ({"users": ["foobar"]}, None), # no default user creation
- ({"users": [{"name": "bbsw"}]}, None),
+ ({"users": [{"name": "bbsw"}]}, None, False),
+ (
+ {"users": [{"name": "bbsw", "garbage-key": None}]},
+ "is not valid under any of the given schemas",
+ True,
+ ),
+ (
+ {"groups": {"": "bbsw"}},
+ "does not match any of the regexes",
+ True,
+ ),
(
{"users": [{"name": "bbsw", "groups": ["anygrp"]}]},
None,
+ False,
), # user with a list of groups
- ({"groups": [{"yep": ["user1"]}]}, None),
+ ({"groups": [{"yep": ["user1"]}]}, None, False),
+ ({"users": "oldstyle,default"}, None, False),
+ ({"users": ["default"]}, None, False),
+ ({"users": ["default", ["aaa", "bbb"]]}, None, False),
+ ({"users": ["foobar"]}, None, False), # no default user creation
+ (
+ {"users": [{"name": "bbsw", "lock-passwd": True}]},
+ "users.0.lock-passwd: DEPRECATED."
+ " Dropped after April 2027. Use ``lock_passwd``."
+ " Default: ``true``",
+ False,
+ ),
+ # users.groups supports comma-delimited str, list and object type
+ (
+ {"users": [{"name": "bbsw", "groups": "adm, sudo"}]},
+ None,
+ False,
+ ),
+ (
+ {
+ "users": [
+ {"name": "bbsw", "groups": {"adm": None, "sudo": None}}
+ ]
+ },
+ "Cloud config schema deprecations: users.0.groups.adm:"
+ " DEPRECATED. When providing an object for"
+ " users.groups the ``<group_name>`` keys are the groups to"
+ " add this user to,",
+ False,
+ ),
+ ({"groups": [{"yep": ["user1"]}]}, None, False),
(
{"user": ["no_list_allowed"]},
re.escape("user: ['no_list_allowed'] is not valid "),
+ True,
),
(
{"groups": {"anygrp": 1}},
"groups.anygrp: 1 is not of type 'string', 'array'",
+ True,
+ ),
+ (
+ {
+ "users": [{"inactive": True, "name": "cloudy"}],
+ },
+ "errors: users.0: {'inactive': True",
+ True,
+ ),
+ (
+ {
+ "users": [
+ {
+ "expiredate": "2038-01-19",
+ "groups": "users",
+ "name": "foobar",
+ }
+ ]
+ },
+ None,
+ False,
+ ),
+ (
+ {"user": {"name": "aciba", "groups": {"sbuild": None}}},
+ (
+ "deprecations: user.groups.sbuild: DEPRECATED. "
+ "When providing an object for users.groups the "
+ "``<group_name>`` keys are the groups to add this user to"
+ ),
+ False,
),
],
)
@skipUnlessJsonSchema()
- def test_schema_validation(self, config, error_msg):
- if error_msg is None:
+ def test_schema_validation(self, config, problem_msg, has_errors):
+ if problem_msg is None:
validate_cloudconfig_schema(config, get_schema(), strict=True)
else:
- with pytest.raises(SchemaValidationError, match=error_msg):
+ with pytest.raises(
+ SchemaValidationError, match=problem_msg
+ ) as exc_info:
validate_cloudconfig_schema(config, get_schema(), strict=True)
+ assert has_errors == exc_info.value.has_errors()
diff --git a/tests/unittests/config/test_cc_write_files.py b/tests/unittests/config/test_cc_write_files.py
index 01c920e8..a9a40265 100644
--- a/tests/unittests/config/test_cc_write_files.py
+++ b/tests/unittests/config/test_cc_write_files.py
@@ -65,6 +65,7 @@ VALID_SCHEMA = {
class TestWriteFiles(FilesystemMockingTestCase):
with_logs = True
+ owner = "root:root"
def setUp(self):
super(TestWriteFiles, self).setUp()
@@ -75,7 +76,11 @@ class TestWriteFiles(FilesystemMockingTestCase):
self.patchUtils(self.tmp)
expected = "hello world\n"
filename = "/tmp/my.file"
- write_files("test_simple", [{"content": expected, "path": filename}])
+ write_files(
+ "test_simple",
+ [{"content": expected, "path": filename}],
+ self.owner,
+ )
self.assertEqual(util.load_file(filename), expected)
def test_append(self):
@@ -88,13 +93,14 @@ class TestWriteFiles(FilesystemMockingTestCase):
write_files(
"test_append",
[{"content": added, "path": filename, "append": "true"}],
+ self.owner,
)
self.assertEqual(util.load_file(filename), expected)
def test_yaml_binary(self):
self.patchUtils(self.tmp)
data = util.load_yaml(YAML_TEXT)
- write_files("testname", data["write_files"])
+ write_files("testname", data["write_files"], self.owner)
for path, content in YAML_CONTENT_EXPECTED.items():
self.assertEqual(util.load_file(path), content)
@@ -128,7 +134,7 @@ class TestWriteFiles(FilesystemMockingTestCase):
files.append(cur)
expected.append((cur["path"], data))
- write_files("test_decoding", files)
+ write_files("test_decoding", files, self.owner)
for path, content in expected:
self.assertEqual(util.load_file(path, decode=False), content)
diff --git a/tests/unittests/config/test_cc_yum_add_repo.py b/tests/unittests/config/test_cc_yum_add_repo.py
index 010bea18..d821d40a 100644
--- a/tests/unittests/config/test_cc_yum_add_repo.py
+++ b/tests/unittests/config/test_cc_yum_add_repo.py
@@ -150,6 +150,24 @@ class TestAddYumRepoSchema:
{"yum_repos": {"My Repo": {"enabled": "nope"}}},
"yum_repos.My Repo.enabled: 'nope' is not of type 'boolean'",
),
+ (
+ {
+ "yum_repos": {
+ "hotwheels repo": {"": "config option requires a name"}
+ }
+ },
+ "does not match any of the regexes",
+ ),
+ (
+ {
+ "yum_repos": {
+ "matchbox repo": {
+ "$$$$$": "config option requires a valid name"
+ }
+ }
+ },
+ "does not match any of the regexes",
+ ),
],
)
@helpers.skipUnlessJsonSchema()
diff --git a/tests/unittests/config/test_modules.py b/tests/unittests/config/test_modules.py
new file mode 100644
index 00000000..bc105064
--- /dev/null
+++ b/tests/unittests/config/test_modules.py
@@ -0,0 +1,174 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+
+import importlib
+import logging
+from pathlib import Path
+from typing import List
+
+import pytest
+
+from cloudinit import util
+from cloudinit.config.modules import ModuleDetails, Modules, _is_active
+from cloudinit.config.schema import MetaSchema
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import FREQUENCIES
+from tests.unittests.helpers import cloud_init_project_dir, mock
+
+M_PATH = "cloudinit.config.modules."
+
+
+def get_module_names() -> List[str]:
+ """Return list of module names in cloudinit/config"""
+ files = list(
+ Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py")
+ )
+
+ return [mod.stem for mod in files]
+
+
+def get_modules():
+ examples = []
+ for mod_name in get_module_names():
+ module = importlib.import_module(f"cloudinit.config.{mod_name}")
+ for i, example in enumerate(module.meta.get("examples", [])):
+ examples.append(
+ pytest.param(
+ mod_name, module, example, id=f"{mod_name}_example_{i}"
+ )
+ )
+ return examples
+
+
+class TestModules:
+ @pytest.mark.parametrize("frequency", FREQUENCIES)
+ @pytest.mark.parametrize(
+ "activate_by_schema_keys, cfg, active",
+ [
+ (None, {}, True),
+ (None, {"module_name": {"x": "y"}}, True),
+ ([], {"module_name": {"x": "y"}}, True),
+ (["module_name"], {"module_name": {"x": "y"}}, True),
+ (
+ ["module_name", "other_module"],
+ {"module_name": {"x": "y"}},
+ True,
+ ),
+ (["module_name"], {"other_module": {"x": "y"}}, False),
+ (
+ ["x"],
+ {"module_name": {"x": "y"}, "other_module": {"x": "y"}},
+ False,
+ ),
+ ],
+ )
+ def test__is_inapplicable(
+ self, activate_by_schema_keys, cfg, active, frequency
+ ):
+ module = mock.Mock()
+ module.meta = MetaSchema(
+ name="module_name",
+ id="cc_module_name",
+ title="title",
+ description="description",
+ distros=[ALL_DISTROS],
+ examples=["example_0", "example_1"],
+ frequency=frequency,
+ )
+ if activate_by_schema_keys is not None:
+ module.meta["activate_by_schema_keys"] = activate_by_schema_keys
+ module_details = ModuleDetails(
+ module=module,
+ name="name",
+ frequency=frequency,
+ run_args=[],
+ )
+ assert active == _is_active(module_details, cfg)
+
+ @pytest.mark.parametrize("mod_name, module, example", get_modules())
+ def test__is_inapplicable_examples(self, mod_name, module, example):
+ module_details = ModuleDetails(
+ module=module,
+ name=mod_name,
+ frequency=["always"],
+ run_args=[],
+ )
+ assert True is _is_active(module_details, util.load_yaml(example))
+
+ @pytest.mark.parametrize("frequency", FREQUENCIES)
+ @pytest.mark.parametrize("active", [True, False])
+ def test_run_section(self, frequency, active, caplog, mocker):
+ mocker.patch(M_PATH + "_is_active", return_value=active)
+
+ mods = Modules(
+ init=mock.Mock(), cfg_files=mock.Mock(), reporter=mock.Mock()
+ )
+ mods._cached_cfg = {}
+ raw_name = "my_module"
+ module = mock.Mock()
+ module.meta = MetaSchema(
+ name=raw_name,
+ id=f"cc_{raw_name}",
+ title="title",
+ description="description",
+ distros=[ALL_DISTROS],
+ examples=["example_0", "example_1"],
+ frequency=frequency,
+ )
+ module_details = ModuleDetails(
+ module=module,
+ name=raw_name,
+ frequency=frequency,
+ run_args=["<arg>"],
+ )
+ mocker.patch.object(
+ mods,
+ "_fixup_modules",
+ return_value=[module_details],
+ )
+ m_run_modules = mocker.patch.object(mods, "_run_modules")
+
+ assert mods.run_section("not_matter")
+ if active:
+ assert [
+ mock.call([list(module_details)])
+ ] == m_run_modules.call_args_list
+ assert not caplog.text
+ else:
+ assert [mock.call([])] == m_run_modules.call_args_list
+ assert (
+ logging.INFO,
+ (
+ f"Skipping modules '{raw_name}' because no applicable"
+ " config is provided."
+ ),
+ ) == caplog.record_tuples[-1][1:]
+
+ @pytest.mark.parametrize("mod_name, module, example", get_modules())
+ def test_run_section_examples(
+ self, mod_name, module, example, caplog, mocker
+ ):
+ mods = Modules(
+ init=mock.Mock(), cfg_files=mock.Mock(), reporter=mock.Mock()
+ )
+ cfg = util.load_yaml(example)
+ cfg["unverified_modules"] = [mod_name] # Force to run unverified mod
+ mods._cached_cfg = cfg
+ module_details = ModuleDetails(
+ module=module,
+ name=mod_name,
+ frequency=["always"],
+ run_args=[],
+ )
+ mocker.patch.object(
+ mods,
+ "_fixup_modules",
+ return_value=[module_details],
+ )
+ mocker.patch.object(module, "handle")
+ m_run_modules = mocker.patch.object(mods, "_run_modules")
+ assert mods.run_section("not_matter")
+ assert [
+ mock.call([list(module_details)])
+ ] == m_run_modules.call_args_list
+ assert "Skipping" not in caplog.text
diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py
index 4a41c4c1..76640436 100644
--- a/tests/unittests/config/test_schema.py
+++ b/tests/unittests/config/test_schema.py
@@ -9,11 +9,12 @@ import logging
import os
import re
import sys
-from copy import copy
+from collections import namedtuple
+from copy import deepcopy
from pathlib import Path
from textwrap import dedent
from types import ModuleType
-from typing import List
+from typing import List, Optional, Sequence, Set
import pytest
@@ -21,12 +22,14 @@ from cloudinit.config.schema import (
CLOUD_CONFIG_HEADER,
VERSIONED_USERDATA_SCHEMA_FILE,
MetaSchema,
+ SchemaProblem,
SchemaValidationError,
annotated_cloudconfig_file,
get_jsonschema_validator,
get_meta_doc,
get_schema,
get_schema_dir,
+ handle_schema_args,
load_doc,
main,
validate_cloudconfig_file,
@@ -37,13 +40,19 @@ from cloudinit.distros import OSFAMILIES
from cloudinit.safeyaml import load, load_with_marks
from cloudinit.settings import FREQUENCIES
from cloudinit.util import load_file, write_file
+from tests.hypothesis import given
+from tests.hypothesis_jsonschema import from_schema
from tests.unittests.helpers import (
CiTestCase,
cloud_init_project_dir,
+ does_not_raise,
mock,
+ skipUnlessHypothesisJsonSchema,
skipUnlessJsonSchema,
)
+M_PATH = "cloudinit.config.schema."
+
def get_schemas() -> dict:
"""Return all legacy module schemas
@@ -156,6 +165,7 @@ class TestGetSchema:
{"$ref": "#/$defs/cc_apk_configure"},
{"$ref": "#/$defs/cc_apt_configure"},
{"$ref": "#/$defs/cc_apt_pipelining"},
+ {"$ref": "#/$defs/cc_ubuntu_autoinstall"},
{"$ref": "#/$defs/cc_bootcmd"},
{"$ref": "#/$defs/cc_byobu"},
{"$ref": "#/$defs/cc_ca_certs"},
@@ -240,10 +250,12 @@ class SchemaValidationErrorTest(CiTestCase):
def test_schema_validation_error_expects_schema_errors(self):
"""SchemaValidationError is initialized from schema_errors."""
- errors = (
- ("key.path", 'unexpected key "junk"'),
- ("key2.path", '"-123" is not a valid "hostname" format'),
- )
+ errors = [
+ SchemaProblem("key.path", 'unexpected key "junk"'),
+ SchemaProblem(
+ "key2.path", '"-123" is not a valid "hostname" format'
+ ),
+ ]
exception = SchemaValidationError(schema_errors=errors)
self.assertIsInstance(exception, Exception)
self.assertEqual(exception.schema_errors, errors)
@@ -286,7 +298,7 @@ class TestValidateCloudConfigSchema:
assert "cloudinit.config.schema" == module
assert logging.WARNING == log_level
assert (
- "Invalid cloud-config provided: \np1: -1 is not of type 'string'"
+ "Invalid cloud-config provided:\np1: -1 is not of type 'string'"
== log_msg
)
@@ -374,6 +386,201 @@ class TestValidateCloudConfigSchema:
in caplog.text
)
+ @skipUnlessJsonSchema()
+ @pytest.mark.parametrize("log_deprecations", [True, False])
+ @pytest.mark.parametrize(
+ "schema,config,expected_msg",
+ [
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "a-b": {
+ "type": "string",
+ "deprecated": True,
+ "description": "<desc>",
+ },
+ "a_b": {"type": "string", "description": "noop"},
+ },
+ },
+ {"a-b": "asdf"},
+ "Deprecated cloud-config provided:\na-b: DEPRECATED. <desc>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "x": {
+ "oneOf": [
+ {"type": "integer", "description": "noop"},
+ {
+ "type": "string",
+ "deprecated": True,
+ "description": "<desc>",
+ },
+ ]
+ },
+ },
+ },
+ {"x": "+5"},
+ "Deprecated cloud-config provided:\nx: DEPRECATED. <desc>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "x": {
+ "allOf": [
+ {"type": "string", "description": "noop"},
+ {
+ "deprecated": True,
+ "description": "<desc>",
+ },
+ ]
+ },
+ },
+ },
+ {"x": "5"},
+ "Deprecated cloud-config provided:\nx: DEPRECATED. <desc>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "x": {
+ "anyOf": [
+ {"type": "integer", "description": "noop"},
+ {
+ "type": "string",
+ "deprecated": True,
+ "description": "<desc>",
+ },
+ ]
+ },
+ },
+ },
+ {"x": "5"},
+ "Deprecated cloud-config provided:\nx: DEPRECATED. <desc>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "x": {
+ "type": "string",
+ "deprecated": True,
+ "description": "<desc>",
+ },
+ },
+ },
+ {"x": "+5"},
+ "Deprecated cloud-config provided:\nx: DEPRECATED. <desc>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "x": {
+ "type": "string",
+ "deprecated": False,
+ "description": "<desc>",
+ },
+ },
+ },
+ {"x": "+5"},
+ None,
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "my_ref": {
+ "deprecated": True,
+ "description": "<desc>",
+ }
+ },
+ "properties": {
+ "x": {
+ "allOf": [
+ {"type": "string", "description": "noop"},
+ {"$ref": "#/$defs/my_ref"},
+ ]
+ },
+ },
+ },
+ {"x": "+5"},
+ "Deprecated cloud-config provided:\nx: DEPRECATED. <desc>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "my_ref": {
+ "deprecated": True,
+ }
+ },
+ "properties": {
+ "x": {
+ "allOf": [
+ {
+ "type": "string",
+ "description": "noop",
+ },
+ {"$ref": "#/$defs/my_ref"},
+ ]
+ },
+ },
+ },
+ {"x": "+5"},
+ "Deprecated cloud-config provided:\nx: DEPRECATED.",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "patternProperties": {
+ "^.+$": {
+ "minItems": 1,
+ "deprecated": True,
+ "description": "<desc>",
+ }
+ },
+ },
+ {"a-b": "asdf"},
+ "Deprecated cloud-config provided:\na-b: DEPRECATED. <desc>",
+ ),
+ pytest.param(
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "patternProperties": {
+ "^.+$": {
+ "minItems": 1,
+ "deprecated": True,
+ }
+ },
+ },
+ {"a-b": "asdf"},
+ "Deprecated cloud-config provided:\na-b: DEPRECATED.",
+ id="deprecated_pattern_property_without_description",
+ ),
+ ],
+ )
+ def test_validateconfig_logs_deprecations(
+ self, schema, config, expected_msg, log_deprecations, caplog
+ ):
+ validate_cloudconfig_schema(
+ config,
+ schema,
+ strict_metaschema=True,
+ log_deprecations=log_deprecations,
+ )
+ if expected_msg is None:
+ return
+ log_record = (M_PATH[:-1], logging.WARNING, expected_msg)
+ if log_deprecations:
+ assert log_record == caplog.record_tuples[-1]
+ else:
+ assert log_record not in caplog.record_tuples
+
class TestCloudConfigExamples:
metas = get_metas()
@@ -481,7 +688,7 @@ class TestValidateCloudConfigFile:
@skipUnlessJsonSchema()
@pytest.mark.parametrize("annotate", (True, False))
- def test_validateconfig_file_sctrictly_validates_schema(
+ def test_validateconfig_file_strictly_validates_schema(
self, annotate, tmpdir
):
"""validate_cloudconfig_file raises errors on invalid schema."""
@@ -514,14 +721,22 @@ class TestSchemaDocMarkdown:
"frequency": "frequency",
"distros": ["debian", "rhel"],
"examples": [
- 'ex1:\n [don\'t, expand, "this"]',
- "ex2: true",
+ 'prop1:\n [don\'t, expand, "this"]',
+ "prop2: true",
],
}
- def test_get_meta_doc_returns_restructured_text(self):
+ @pytest.mark.parametrize(
+ "meta_update",
+ [
+ None,
+ {"activate_by_schema_keys": None},
+ {"activate_by_schema_keys": []},
+ ],
+ )
+ def test_get_meta_doc_returns_restructured_text(self, meta_update):
"""get_meta_doc returns restructured text for a cloudinit schema."""
- full_schema = copy(self.required_schema)
+ full_schema = deepcopy(self.required_schema)
full_schema.update(
{
"properties": {
@@ -533,8 +748,11 @@ class TestSchemaDocMarkdown:
}
}
)
+ meta = deepcopy(self.meta)
+ if meta_update:
+ meta.update(meta_update)
- doc = get_meta_doc(self.meta, full_schema)
+ doc = get_meta_doc(meta, full_schema)
assert (
dedent(
"""
@@ -555,10 +773,65 @@ class TestSchemaDocMarkdown:
**Examples**::
- ex1:
+ prop1:
+ [don't, expand, "this"]
+ # --- Example2 ---
+ prop2: true
+ """
+ )
+ == doc
+ )
+
+ def test_get_meta_doc_full_with_activate_by_schema_keys(self):
+ full_schema = deepcopy(self.required_schema)
+ full_schema.update(
+ {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "description": "prop-description",
+ "items": {"type": "string"},
+ },
+ "prop2": {
+ "type": "boolean",
+ "description": "prop2-description",
+ },
+ },
+ }
+ )
+
+ meta = deepcopy(self.meta)
+ meta["activate_by_schema_keys"] = ["prop1", "prop2"]
+
+ doc = get_meta_doc(meta, full_schema)
+ assert (
+ dedent(
+ """
+ name
+ ----
+ **Summary:** title
+
+ description
+
+ **Internal name:** ``id``
+
+ **Module frequency:** frequency
+
+ **Supported distros:** debian, rhel
+
+ **Activate only on keys:** ``prop1``, ``prop2``
+
+ **Config schema**:
+ **prop1:** (array of string) prop-description
+
+ **prop2:** (boolean) prop2-description
+
+ **Examples**::
+
+ prop1:
[don't, expand, "this"]
# --- Example2 ---
- ex2: true
+ prop2: true
"""
)
== doc
@@ -719,7 +992,7 @@ class TestSchemaDocMarkdown:
def test_get_meta_doc_handles_string_examples(self):
"""get_meta_doc properly indented examples as a list of strings."""
- full_schema = copy(self.required_schema)
+ full_schema = deepcopy(self.required_schema)
full_schema.update(
{
"examples": [
@@ -743,10 +1016,10 @@ class TestSchemaDocMarkdown:
**Examples**::
- ex1:
+ prop1:
[don't, expand, "this"]
# --- Example2 ---
- ex2: true
+ prop2: true
"""
)
in get_meta_doc(self.meta, full_schema)
@@ -792,7 +1065,8 @@ class TestSchemaDocMarkdown:
in get_meta_doc(self.meta, schema)
)
- def test_get_meta_doc_raises_key_errors(self):
+ @pytest.mark.parametrize("key", meta.keys())
+ def test_get_meta_doc_raises_key_errors(self, key):
"""get_meta_doc raises KeyErrors on missing keys."""
schema = {
"properties": {
@@ -804,18 +1078,51 @@ class TestSchemaDocMarkdown:
}
}
}
- for key in self.meta:
- invalid_meta = copy(self.meta)
- invalid_meta.pop(key)
- with pytest.raises(KeyError) as context_mgr:
- get_meta_doc(invalid_meta, schema)
- assert key in str(context_mgr.value)
+ invalid_meta = deepcopy(self.meta)
+ invalid_meta.pop(key)
+ with pytest.raises(
+ KeyError,
+ match=f"Missing required keys in module meta: {{'{key}'}}",
+ ):
+ get_meta_doc(invalid_meta, schema)
+
+ @pytest.mark.parametrize(
+ "key,expectation",
+ [
+ ("activate_by_schema_keys", does_not_raise()),
+ (
+ "additional_key",
+ pytest.raises(
+ KeyError,
+ match=(
+ "Additional unexpected keys found in module meta:"
+ " {'additional_key'}"
+ ),
+ ),
+ ),
+ ],
+ )
+ def test_get_meta_doc_additional_keys(self, key, expectation):
+ schema = {
+ "properties": {
+ "prop1": {
+ "type": "array",
+ "items": {
+ "oneOf": [{"type": "string"}, {"type": "integer"}]
+ },
+ }
+ }
+ }
+ invalid_meta = deepcopy(self.meta)
+ invalid_meta[key] = []
+ with expectation:
+ get_meta_doc(invalid_meta, schema)
def test_label_overrides_property_name(self):
"""get_meta_doc overrides property name with label."""
schema = {
"properties": {
- "prop1": {
+ "old_prop1": {
"type": "string",
"label": "label1",
},
@@ -846,9 +1153,119 @@ class TestSchemaDocMarkdown:
assert "**prop_no_label:** (string)" in meta_doc
assert "Each object in **array_label** list" in meta_doc
- assert "prop1" not in meta_doc
+ assert "old_prop1" not in meta_doc
assert ".*" not in meta_doc
+ @pytest.mark.parametrize(
+ "schema,expected_doc",
+ [
+ (
+ {
+ "properties": {
+ "prop1": {
+ "type": ["string", "integer"],
+ "deprecated": True,
+ "description": "<description>",
+ }
+ }
+ },
+ "**prop1:** (string/integer) DEPRECATED. <description>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "prop1": {
+ "type": ["string", "integer"],
+ "description": "<description>",
+ "deprecated": True,
+ },
+ },
+ },
+ "**prop1:** (string/integer) DEPRECATED. <description>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {"my_ref": {"deprecated": True}},
+ "properties": {
+ "prop1": {
+ "allOf": [
+ {
+ "type": ["string", "integer"],
+ "description": "<description>",
+ },
+ {"$ref": "#/$defs/my_ref"},
+ ]
+ }
+ },
+ },
+ "**prop1:** (string/integer) DEPRECATED. <description>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "$defs": {
+ "my_ref": {
+ "deprecated": True,
+ "description": "<description>",
+ }
+ },
+ "properties": {
+ "prop1": {
+ "allOf": [
+ {"type": ["string", "integer"]},
+ {"$ref": "#/$defs/my_ref"},
+ ]
+ }
+ },
+ },
+ "**prop1:** (string/integer) DEPRECATED. <description>",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "prop1": {
+ "description": "<description>",
+ "anyOf": [
+ {
+ "type": ["string", "integer"],
+ "description": "<deprecated_description>",
+ "deprecated": True,
+ },
+ ],
+ },
+ },
+ },
+ "**prop1:** (UNDEFINED) <description>\n",
+ ),
+ (
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "properties": {
+ "prop1": {
+ "anyOf": [
+ {
+ "type": ["string", "integer"],
+ "description": "<deprecated_description>",
+ "deprecated": True,
+ },
+ {
+ "type": "number",
+ "description": "<description>",
+ },
+ ]
+ },
+ },
+ },
+ "**prop1:** (UNDEFINED)\n",
+ ),
+ ],
+ )
+ def test_get_meta_doc_render_deprecated_info(self, schema, expected_doc):
+ assert expected_doc in get_meta_doc(self.meta, schema)
+
class TestAnnotatedCloudconfigFile:
def test_annotated_cloudconfig_file_no_schema_errors(self):
@@ -856,7 +1273,10 @@ class TestAnnotatedCloudconfigFile:
content = b"ntp:\n pools: [ntp1.pools.com]\n"
parse_cfg, schemamarks = load_with_marks(content)
assert content == annotated_cloudconfig_file(
- parse_cfg, content, schema_errors=[], schemamarks=schemamarks
+ parse_cfg,
+ content,
+ schemamarks=schemamarks,
+ schema_errors=[],
)
def test_annotated_cloudconfig_file_with_non_dict_cloud_config(self):
@@ -878,8 +1298,8 @@ class TestAnnotatedCloudconfigFile:
assert expected == annotated_cloudconfig_file(
None,
content,
- schema_errors=[("", "None is not of type 'object'")],
schemamarks={},
+ schema_errors=[SchemaProblem("", "None is not of type 'object'")],
)
def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self):
@@ -908,12 +1328,15 @@ class TestAnnotatedCloudconfigFile:
)
parsed_config, schemamarks = load_with_marks(content[13:])
schema_errors = [
- ("ntp", "Some type error"),
- ("ntp.pools.0", "-99 is not a string"),
- ("ntp.pools.1", "75 is not a string"),
+ SchemaProblem("ntp", "Some type error"),
+ SchemaProblem("ntp.pools.0", "-99 is not a string"),
+ SchemaProblem("ntp.pools.1", "75 is not a string"),
]
assert expected == annotated_cloudconfig_file(
- parsed_config, content, schema_errors, schemamarks=schemamarks
+ parsed_config,
+ content,
+ schemamarks=schemamarks,
+ schema_errors=schema_errors,
)
def test_annotated_cloudconfig_file_annotates_separate_line_items(self):
@@ -938,11 +1361,14 @@ class TestAnnotatedCloudconfigFile:
)
parsed_config, schemamarks = load_with_marks(content[13:])
schema_errors = [
- ("ntp.pools.0", "-99 is not a string"),
- ("ntp.pools.1", "75 is not a string"),
+ SchemaProblem("ntp.pools.0", "-99 is not a string"),
+ SchemaProblem("ntp.pools.1", "75 is not a string"),
]
assert expected in annotated_cloudconfig_file(
- parsed_config, content, schema_errors, schemamarks=schemamarks
+ parsed_config,
+ content,
+ schemamarks=schemamarks,
+ schema_errors=schema_errors,
)
@@ -1125,3 +1551,126 @@ class TestMeta:
assert "distros" in module.meta
assert {module.meta["frequency"]}.issubset(FREQUENCIES)
assert set(module.meta["distros"]).issubset(all_distros)
+
+
+def remove_modules(schema, modules: Set[str]) -> dict:
+ indices_to_delete = set()
+ for module in set(modules):
+ for index, ref_dict in enumerate(schema["allOf"]):
+ if ref_dict["$ref"] == f"#/$defs/{module}":
+ indices_to_delete.add(index)
+ continue # module found
+ for index in indices_to_delete:
+ schema["allOf"].pop(index)
+ return schema
+
+
+def remove_defs(schema, defs: Set[str]) -> dict:
+ defs_to_delete = set(schema["$defs"].keys()).intersection(set(defs))
+ for key in defs_to_delete:
+ del schema["$defs"][key]
+ return schema
+
+
+def clean_schema(
+ schema=None,
+ modules: Optional[Sequence[str]] = None,
+ defs: Optional[Sequence[str]] = None,
+):
+ schema = deepcopy(schema or get_schema())
+ if modules:
+ remove_modules(schema, set(modules))
+ if defs:
+ remove_defs(schema, set(defs))
+ return schema
+
+
+@pytest.mark.hypothesis_slow
+class TestSchemaFuzz:
+
+ # Avoid https://github.com/Zac-HD/hypothesis-jsonschema/issues/97
+ SCHEMA = clean_schema(
+ modules=["cc_users_groups"],
+ defs=["users_groups.groups_by_groupname", "users_groups.user"],
+ )
+
+ @skipUnlessHypothesisJsonSchema()
+ @given(from_schema(SCHEMA))
+ def test_validate_full_schema(self, config):
+ try:
+ validate_cloudconfig_schema(config, strict=True)
+ except SchemaValidationError as ex:
+ if ex.has_errors():
+ raise
+
+
+class TestHandleSchemaArgs:
+
+ Args = namedtuple("Args", "config_file docs system annotate")
+
+ @pytest.mark.parametrize(
+ "annotate, expected_output",
+ [
+ (
+ True,
+ dedent(
+ """\
+ #cloud-config
+ packages:
+ - htop
+ apt_update: true # D1
+ apt_upgrade: true # D2
+ apt_reboot_if_required: true # D3
+
+ # Deprecations: -------------
+ # D1: DEPRECATED. Dropped after April 2027. Use ``package_update``. Default: ``false``
+ # D2: DEPRECATED. Dropped after April 2027. Use ``package_upgrade``. Default: ``false``
+ # D3: DEPRECATED. Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``
+
+
+ Valid cloud-config: {}
+ """ # noqa: E501
+ ),
+ ),
+ (
+ False,
+ dedent(
+ """\
+ Cloud config schema deprecations: \
+apt_reboot_if_required: DEPRECATED. Dropped after April 2027. Use ``package_reboot_if_required``. Default: ``false``, \
+apt_update: DEPRECATED. Dropped after April 2027. Use ``package_update``. Default: ``false``, \
+apt_upgrade: DEPRECATED. Dropped after April 2027. Use ``package_upgrade``. Default: ``false``
+ Valid cloud-config: {}
+ """ # noqa: E501
+ ),
+ ),
+ ],
+ )
+ def test_handle_schema_args_annotate_deprecated_config(
+ self, annotate, expected_output, caplog, capsys, tmpdir
+ ):
+ user_data_fn = tmpdir.join("user-data")
+ with open(user_data_fn, "w") as f:
+ f.write(
+ dedent(
+ """\
+ #cloud-config
+ packages:
+ - htop
+ apt_update: true
+ apt_upgrade: true
+ apt_reboot_if_required: true
+ """
+ )
+ )
+ args = self.Args(
+ config_file=str(user_data_fn),
+ annotate=annotate,
+ docs=None,
+ system=None,
+ )
+ handle_schema_args("unused", args)
+ out, err = capsys.readouterr()
+ assert expected_output.format(user_data_fn) == out
+ assert not err
+ assert "deprec" not in caplog.text
diff --git a/tests/unittests/distros/test_create_users.py b/tests/unittests/distros/test_create_users.py
index ddb039bd..f57bfd75 100644
--- a/tests/unittests/distros/test_create_users.py
+++ b/tests/unittests/distros/test_create_users.py
@@ -135,6 +135,42 @@ class TestCreateUser(CiTestCase):
]
self.assertEqual(m_subp.call_args_list, expected)
+ @mock.patch("cloudinit.distros.util.is_group", return_value=False)
+ def test_create_groups_with_dict_deprecated(
+ self, m_is_group, m_subp, m_is_snappy
+ ):
+ """users.groups supports a dict value, but emit deprecation log."""
+ user = "foouser"
+ self.dist.create_user(user, groups={"group1": None, "group2": None})
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ mock.call(["groupadd", "group2"]),
+ self._useradd2call([user, "--groups", "group1,group2", "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+ self.assertIn(
+ "WARNING: DEPRECATED: The user foouser has a 'groups' config"
+ " value of type dict which is deprecated and will be removed in a"
+ " future version of cloud-init. Use a comma-delimited string or"
+ " array instead: group1,group2.",
+ self.logs.getvalue(),
+ )
+
+ @mock.patch("cloudinit.distros.util.is_group", return_value=False)
+ def test_create_groups_with_list(self, m_is_group, m_subp, m_is_snappy):
+ """users.groups supports a list value."""
+ user = "foouser"
+ self.dist.create_user(user, groups=["group1", "group2"])
+ expected = [
+ mock.call(["groupadd", "group1"]),
+ mock.call(["groupadd", "group2"]),
+ self._useradd2call([user, "--groups", "group1,group2", "-m"]),
+ mock.call(["passwd", "-l", user]),
+ ]
+ self.assertEqual(m_subp.call_args_list, expected)
+ self.assertNotIn("WARNING: DEPRECATION: ", self.logs.getvalue())
+
def test_explicit_sudo_false(self, m_subp, m_is_snappy):
user = "foouser"
self.dist.create_user(user, sudo=False)
diff --git a/tests/unittests/distros/test_netconfig.py b/tests/unittests/distros/test_netconfig.py
index a25be481..38e92f0e 100644
--- a/tests/unittests/distros/test_netconfig.py
+++ b/tests/unittests/distros/test_netconfig.py
@@ -9,6 +9,7 @@ from unittest import mock
from cloudinit import distros, helpers, safeyaml, settings, subp, util
from cloudinit.distros.parsers.sys_conf import SysConf
+from cloudinit.net.activators import IfUpDownActivator
from tests.unittests.helpers import FilesystemMockingTestCase, dir2dict
BASE_NET_CFG = """
@@ -252,12 +253,17 @@ class TestNetCfgDistroBase(FilesystemMockingTestCase):
super(TestNetCfgDistroBase, self).setUp()
self.add_patch("cloudinit.util.system_is_snappy", "m_snappy")
- def _get_distro(self, dname, renderers=None):
+ def _get_distro(self, dname, renderers=None, activators=None):
cls = distros.fetch(dname)
cfg = settings.CFG_BUILTIN
cfg["system_info"]["distro"] = dname
+ system_info_network_cfg = {}
if renderers:
- cfg["system_info"]["network"] = {"renderers": renderers}
+ system_info_network_cfg["renderers"] = renderers
+ if activators:
+ system_info_network_cfg["activators"] = activators
+ if system_info_network_cfg:
+ cfg["system_info"]["network"] = system_info_network_cfg
paths = helpers.Paths({})
return cls(dname, cfg.get("system_info"), paths)
@@ -371,7 +377,9 @@ ifconfig_eth1=DHCP
class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
def setUp(self):
super(TestNetCfgDistroUbuntuEni, self).setUp()
- self.distro = self._get_distro("ubuntu", renderers=["eni"])
+ self.distro = self._get_distro(
+ "ubuntu", renderers=["eni"], activators=["eni"]
+ )
def eni_path(self):
return "/etc/network/interfaces.d/50-cloud-init.cfg"
@@ -398,6 +406,51 @@ class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase):
self.assertEqual(expected, results[cfgpath])
self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+ def test_apply_network_config_and_bringup_filters_priority_eni_ub(self):
+ """Network activator search priority can be overridden from config."""
+ expected_cfgs = {
+ self.eni_path(): V1_NET_CFG_OUTPUT,
+ }
+ # ub_distro.apply_network_config(V1_NET_CFG, False)
+ with mock.patch(
+ "cloudinit.net.activators.select_activator"
+ ) as select_activator:
+ select_activator.return_value = IfUpDownActivator
+ self._apply_and_verify_eni(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ bringup=True,
+ )
+ # 2nd call to select_activator via distro.network_activator prop
+ assert IfUpDownActivator == self.distro.network_activator
+ self.assertEqual(
+ [mock.call(priority=["eni"])] * 2, select_activator.call_args_list
+ )
+
+ def test_apply_network_config_and_bringup_activator_defaults_ub(self):
+ """Network activator search priority defaults when unspecified."""
+ expected_cfgs = {
+ self.eni_path(): V1_NET_CFG_OUTPUT,
+ }
+ # Don't set activators to see DEFAULT_PRIORITY
+ self.distro = self._get_distro("ubuntu", renderers=["eni"])
+ with mock.patch(
+ "cloudinit.net.activators.select_activator"
+ ) as select_activator:
+ select_activator.return_value = IfUpDownActivator
+ self._apply_and_verify_eni(
+ self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs=expected_cfgs.copy(),
+ bringup=True,
+ )
+ # 2nd call to select_activator via distro.network_activator prop
+ assert IfUpDownActivator == self.distro.network_activator
+ self.assertEqual(
+ [mock.call(priority=None)] * 2, select_activator.call_args_list
+ )
+
def test_apply_network_config_eni_ub(self):
expected_cfgs = {
self.eni_path(): V1_NET_CFG_OUTPUT,
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 9d5a7ed2..31e0188c 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -30,6 +30,7 @@ from cloudinit.config.schema import (
)
from cloudinit.sources import DataSourceNone
from cloudinit.templater import JINJA_AVAILABLE
+from tests.hypothesis_jsonschema import HAS_HYPOTHESIS_JSONSCHEMA
_real_subp = subp.subp
@@ -522,6 +523,13 @@ def skipIfJinja():
return skipIf(JINJA_AVAILABLE, "Jinja dependency present.")
+def skipUnlessHypothesisJsonSchema():
+ return skipIf(
+ not HAS_HYPOTHESIS_JSONSCHEMA,
+ "No python-hypothesis-jsonschema dependency present.",
+ )
+
+
# older versions of mock do not have the useful 'assert_not_called'
if not hasattr(mock.Mock, "assert_not_called"):
diff --git a/tests/unittests/net/test_dns.py b/tests/unittests/net/test_dns.py
new file mode 100644
index 00000000..606efecb
--- /dev/null
+++ b/tests/unittests/net/test_dns.py
@@ -0,0 +1,32 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from unittest import mock
+
+from cloudinit import safeyaml
+from cloudinit.net import network_state
+
+
+class TestNetDns:
+ @mock.patch("cloudinit.net.network_state.get_interfaces_by_mac")
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ def test_system_mac_address_does_not_break_dns_parsing(
+ self, by_mac_state, by_mac_init
+ ):
+ by_mac_state.return_value = {"00:11:22:33:44:55": "foobar"}
+ by_mac_init.return_value = {"00:11:22:33:44:55": "foobar"}
+ state = network_state.parse_net_config_data(
+ safeyaml.load(
+ """\
+version: 2
+ethernets:
+ eth:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ addresses: [10.0.0.2/24]
+ gateway4: 10.0.0.1
+ nameservers:
+ addresses: [10.0.0.3]
+"""
+ )
+ )
+ assert "10.0.0.3" in state.dns_nameservers
diff --git a/tests/unittests/net/test_network_state.py b/tests/unittests/net/test_network_state.py
index ec21d007..b76b5dd7 100644
--- a/tests/unittests/net/test_network_state.py
+++ b/tests/unittests/net/test_network_state.py
@@ -101,17 +101,19 @@ class TestNetworkStateParseConfig(CiTestCase):
class TestNetworkStateParseConfigV2(CiTestCase):
def test_version_2_ignores_renderer_key(self):
ncfg = {"version": 2, "renderer": "networkd", "ethernets": {}}
- nsi = network_state.NetworkStateInterpreter(
- version=ncfg["version"], config=ncfg
- )
- nsi.parse_config(skip_broken=False)
- self.assertEqual(ncfg, nsi.as_dict()["config"])
+ with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"):
+ nsi = network_state.NetworkStateInterpreter(
+ version=ncfg["version"], config=ncfg
+ )
+ nsi.parse_config(skip_broken=False)
+ self.assertEqual(ncfg, nsi.as_dict()["config"])
class TestNetworkStateParseNameservers:
def _parse_network_state_from_config(self, config):
- yaml = safeyaml.load(config)
- return network_state.parse_net_config_data(yaml["network"])
+ with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml["network"])
def test_v1_nameservers_valid(self):
config = self._parse_network_state_from_config(
@@ -136,7 +138,9 @@ class TestNetworkStateParseNameservers:
V1_CONFIG_NAMESERVERS_INVALID
)
- def test_v2_nameservers(self):
+ def test_v2_nameservers(self, mocker):
+ mocker.patch("cloudinit.net.network_state.get_interfaces_by_mac")
+ mocker.patch("cloudinit.net.get_interfaces_by_mac")
config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS)
# Ensure DNS defined on interface exists on interface
diff --git a/tests/unittests/net/test_networkd.py b/tests/unittests/net/test_networkd.py
index ec1d04e9..ee50e640 100644
--- a/tests/unittests/net/test_networkd.py
+++ b/tests/unittests/net/test_networkd.py
@@ -1,5 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
from cloudinit import safeyaml
from cloudinit.net import network_state, networkd
@@ -47,13 +49,15 @@ Domains=foo.local bar.local
class TestNetworkdRenderState:
def _parse_network_state_from_config(self, config):
- yaml = safeyaml.load(config)
- return network_state.parse_net_config_data(yaml["network"])
+ with mock.patch("cloudinit.net.network_state.get_interfaces_by_mac"):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml["network"])
def test_networkd_render_with_set_name(self):
- ns = self._parse_network_state_from_config(V2_CONFIG_SET_NAME)
- renderer = networkd.Renderer()
- rendered_content = renderer._render_content(ns)
+ with mock.patch("cloudinit.net.get_interfaces_by_mac"):
+ ns = self._parse_network_state_from_config(V2_CONFIG_SET_NAME)
+ renderer = networkd.Renderer()
+ rendered_content = renderer._render_content(ns)
assert "eth0" in rendered_content
assert rendered_content["eth0"] == V2_CONFIG_SET_NAME_RENDERED_ETH0
diff --git a/tests/unittests/runs/test_simple_run.py b/tests/unittests/runs/test_simple_run.py
index 2b51117c..7b364a3e 100644
--- a/tests/unittests/runs/test_simple_run.py
+++ b/tests/unittests/runs/test_simple_run.py
@@ -23,7 +23,10 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
"datasource_list": ["None"],
"runcmd": ["ls /etc"], # test ALL_DISTROS
"spacewalk": {}, # test non-ubuntu distros module definition
- "system_info": {"paths": {"run_dir": self.new_root}},
+ "system_info": {
+ "paths": {"run_dir": self.new_root},
+ "distro": "ubuntu",
+ },
"write_files": [
{
"path": "/etc/blah.ini",
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index b1edf1f3..9ddb1f56 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -35,17 +35,15 @@ from tests.unittests.helpers import (
mock,
populate_dir,
resourceLocation,
- wrap_and_call,
)
MOCKPATH = "cloudinit.sources.DataSourceAzure."
@pytest.fixture
-def azure_ds(patched_data_dir_path, paths):
+def azure_ds(patched_data_dir_path, mock_dmi_read_dmi_data, paths):
"""Provide DataSourceAzure instance with mocks for minimal test case."""
- with mock.patch(MOCKPATH + "_is_platform_viable", return_value=True):
- yield dsaz.DataSourceAzure(sys_cfg={}, distro=mock.Mock(), paths=paths)
+ yield dsaz.DataSourceAzure(sys_cfg={}, distro=mock.Mock(), paths=paths)
@pytest.fixture
@@ -85,6 +83,16 @@ def mock_azure_report_failure_to_fabric():
@pytest.fixture
+def mock_chassis_asset_tag():
+ with mock.patch.object(
+ dsaz.ChassisAssetTag,
+ "query_system",
+ return_value=dsaz.ChassisAssetTag.AZURE_CLOUD.value,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
def mock_device_driver():
with mock.patch(
MOCKPATH + "device_driver",
@@ -117,6 +125,8 @@ def mock_dmi_read_dmi_data():
def fake_read(key: str) -> str:
if key == "system-uuid":
return "fake-system-uuid"
+ elif key == "chassis-asset-tag":
+ return "7783-7084-3265-9085-8269-3286-77"
raise RuntimeError()
with mock.patch(
@@ -476,158 +486,222 @@ IMDS_NETWORK_METADATA = {
EXAMPLE_UUID = "d0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8"
-class TestNetworkConfig:
-
- maxDiff = None
- fallback_config = {
- "version": 1,
- "config": [
- {
- "type": "physical",
- "name": "eth0",
- "mac_address": "00:11:22:33:44:55",
- "params": {"driver": "hv_netsvc"},
- "subnets": [{"type": "dhcp"}],
- }
- ],
- }
-
- def test_single_ipv4_nic_configuration(self, azure_ds, mock_device_driver):
- """Network config emits dhcp on single nic with ipv4"""
- expected = {
- "ethernets": {
- "eth0": {
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 100},
- "dhcp6": False,
- "match": {"macaddress": "00:0d:3a:04:75:98"},
- "set-name": "eth0",
- }
- },
- "version": 2,
- }
- azure_ds._metadata_imds = NETWORK_METADATA
-
- assert azure_ds.network_config == expected
-
- def test_increases_route_metric_for_non_primary_nics(
- self, azure_ds, mock_device_driver
- ):
- """Network config increases route-metric for each nic"""
- expected = {
- "ethernets": {
- "eth0": {
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 100},
- "dhcp6": False,
- "match": {"macaddress": "00:0d:3a:04:75:98"},
- "set-name": "eth0",
+class TestGenerateNetworkConfig:
+ @pytest.mark.parametrize(
+ "label,metadata,expected",
+ [
+ (
+ "simple interface",
+ NETWORK_METADATA["network"],
+ {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
},
- "eth1": {
- "set-name": "eth1",
- "match": {"macaddress": "22:0d:3a:04:75:98"},
- "dhcp6": False,
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 200},
+ ),
+ (
+ "multiple interfaces with increasing route metric",
+ {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {"ipAddress": []},
+ "ipv4": {
+ "subnet": [
+ {"prefix": "24", "address": "10.0.0.0"}
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ }
+ ],
+ },
+ }
+ ]
+ * 3
},
- "eth2": {
- "set-name": "eth2",
- "match": {"macaddress": "33:0d:3a:04:75:98"},
- "dhcp6": False,
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 300},
+ {
+ "ethernets": {
+ "eth0": {
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": False,
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ },
+ "eth1": {
+ "set-name": "eth1",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 200},
+ },
+ "eth2": {
+ "set-name": "eth2",
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 300},
+ },
+ },
+ "version": 2,
},
- },
- "version": 2,
- }
- imds_data = copy.deepcopy(NETWORK_METADATA)
- imds_data["network"]["interface"].append(SECONDARY_INTERFACE)
- third_intf = copy.deepcopy(SECONDARY_INTERFACE)
- third_intf["macAddress"] = third_intf["macAddress"].replace("22", "33")
- third_intf["ipv4"]["subnet"][0]["address"] = "10.0.2.0"
- third_intf["ipv4"]["ipAddress"][0]["privateIpAddress"] = "10.0.2.6"
- imds_data["network"]["interface"].append(third_intf)
- azure_ds._metadata_imds = imds_data
-
- assert azure_ds.network_config == expected
-
- def test_ipv4_secondary_ips_will_be_static_addrs(
- self, azure_ds, mock_device_driver
+ ),
+ (
+ "secondary IPv4s are static",
+ {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {
+ "subnet": [
+ {
+ "prefix": "10",
+ "address": "2001:dead:beef::16",
+ }
+ ],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"}
+ ],
+ },
+ "ipv4": {
+ "subnet": [
+ {"prefix": "24", "address": "10.0.0.0"},
+ ],
+ "ipAddress": [
+ {
+ "privateIpAddress": "10.0.0.4",
+ "publicIpAddress": "104.46.124.81",
+ },
+ {
+ "privateIpAddress": "11.0.0.5",
+ "publicIpAddress": "104.46.124.82",
+ },
+ {
+ "privateIpAddress": "12.0.0.6",
+ "publicIpAddress": "104.46.124.83",
+ },
+ ],
+ },
+ }
+ ]
+ },
+ {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["11.0.0.5/24", "12.0.0.6/24"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ },
+ ),
+ (
+ "ipv6 secondaries",
+ {
+ "interface": [
+ {
+ "macAddress": "000D3A047598",
+ "ipv6": {
+ "subnet": [
+ {
+ "prefix": "10",
+ "address": "2001:dead:beef::16",
+ }
+ ],
+ "ipAddress": [
+ {"privateIpAddress": "2001:dead:beef::1"},
+ {"privateIpAddress": "2001:dead:beef::2"},
+ ],
+ },
+ }
+ ]
+ },
+ {
+ "ethernets": {
+ "eth0": {
+ "addresses": ["2001:dead:beef::2/10"],
+ "dhcp4": True,
+ "dhcp4-overrides": {"route-metric": 100},
+ "dhcp6": True,
+ "dhcp6-overrides": {"route-metric": 100},
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "set-name": "eth0",
+ }
+ },
+ "version": 2,
+ },
+ ),
+ ],
+ )
+ def test_parsing_scenarios(
+ self, label, mock_device_driver, metadata, expected
):
- """Network config emits primary ipv4 as dhcp others are static"""
- expected = {
- "ethernets": {
- "eth0": {
- "addresses": ["10.0.0.5/24"],
- "dhcp4": True,
- "dhcp4-overrides": {"route-metric": 100},
- "dhcp6": True,
- "dhcp6-overrides": {"route-metric": 100},
- "match": {"macaddress": "00:0d:3a:04:75:98"},
- "set-name": "eth0",
- }
- },
- "version": 2,
- }
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data["network"]["interface"][0]
- nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
-
- nic1["ipv6"] = {
- "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}],
- }
- azure_ds._metadata_imds = imds_data
+ assert (
+ dsaz.generate_network_config_from_instance_network_metadata(
+ metadata
+ )
+ == expected
+ )
- assert azure_ds.network_config == expected
+ def test_match_hv_netvsc(self, mock_device_driver):
+ mock_device_driver.return_value = "hv_netvsc"
- def test_ipv6_secondary_ips_will_be_static_cidrs(
- self, azure_ds, mock_device_driver
- ):
- """Network config emits primary ipv6 as dhcp others are static"""
- expected = {
+ assert dsaz.generate_network_config_from_instance_network_metadata(
+ NETWORK_METADATA["network"]
+ ) == {
"ethernets": {
"eth0": {
- "addresses": ["10.0.0.5/24", "2001:dead:beef::2/10"],
"dhcp4": True,
"dhcp4-overrides": {"route-metric": 100},
- "dhcp6": True,
- "dhcp6-overrides": {"route-metric": 100},
- "match": {"macaddress": "00:0d:3a:04:75:98"},
+ "dhcp6": False,
+ "match": {
+ "macaddress": "00:0d:3a:04:75:98",
+ "driver": "hv_netvsc",
+ },
"set-name": "eth0",
}
},
"version": 2,
}
- imds_data = copy.deepcopy(NETWORK_METADATA)
- nic1 = imds_data["network"]["interface"][0]
- nic1["ipv4"]["ipAddress"].append({"privateIpAddress": "10.0.0.5"})
-
- # Secondary ipv6 addresses currently ignored/unconfigured
- nic1["ipv6"] = {
- "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}],
- "ipAddress": [
- {"privateIpAddress": "2001:dead:beef::1"},
- {"privateIpAddress": "2001:dead:beef::2"},
- ],
- }
- azure_ds._metadata_imds = imds_data
- assert azure_ds.network_config == expected
- def test_match_driver_for_netvsc(self, azure_ds, mock_device_driver):
- """Network config emits driver when using netvsc."""
- mock_device_driver.return_value = "hv_netvsc"
+class TestNetworkConfig:
+ fallback_config = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "mac_address": "00:11:22:33:44:55",
+ "params": {"driver": "hv_netsvc"},
+ "subnets": [{"type": "dhcp"}],
+ }
+ ],
+ }
+
+ def test_single_ipv4_nic_configuration(self, azure_ds, mock_device_driver):
+ """Network config emits dhcp on single nic with ipv4"""
expected = {
"ethernets": {
"eth0": {
"dhcp4": True,
"dhcp4-overrides": {"route-metric": 100},
"dhcp6": False,
- "match": {
- "macaddress": "00:0d:3a:04:75:98",
- "driver": "hv_netvsc",
- },
+ "match": {"macaddress": "00:0d:3a:04:75:98"},
"set-name": "eth0",
}
},
@@ -986,7 +1060,6 @@ scbus-1 on xpt0 bus 0
dsaz.BUILTIN_DS_CONFIG["data_dir"] = self.waagent_d
- self.m_is_platform_viable = mock.MagicMock(autospec=True)
self.m_get_metadata_from_fabric = mock.MagicMock(return_value=[])
self.m_report_failure_to_fabric = mock.MagicMock(autospec=True)
self.m_get_interfaces = mock.MagicMock(
@@ -1010,6 +1083,10 @@ scbus-1 on xpt0 bus 0
return self.instance_id
elif key == "chassis-asset-tag":
return "7783-7084-3265-9085-8269-3286-77"
+ raise RuntimeError()
+
+ self.m_read_dmi_data = mock.MagicMock(autospec=True)
+ self.m_read_dmi_data.side_effect = _dmi_mocks
self.apply_patches(
[
@@ -1018,7 +1095,6 @@ scbus-1 on xpt0 bus 0
"list_possible_azure_ds",
self.m_list_possible_azure_ds,
),
- (dsaz, "_is_platform_viable", self.m_is_platform_viable),
(
dsaz,
"get_metadata_from_fabric",
@@ -1045,7 +1121,7 @@ scbus-1 on xpt0 bus 0
(
dsaz.dmi,
"read_dmi_data",
- mock.MagicMock(side_effect=_dmi_mocks),
+ self.m_read_dmi_data,
),
(
dsaz.util,
@@ -1115,14 +1191,16 @@ scbus-1 on xpt0 bus 0
# Return a non-matching asset tag value
data = {}
dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = False
+ self.m_read_dmi_data.side_effect = lambda x: "notazure"
with mock.patch.object(
dsrc, "crawl_metadata"
) as m_crawl_metadata, mock.patch.object(
dsrc, "_report_failure"
) as m_report_failure:
ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
+ assert self.m_read_dmi_data.mock_calls == [
+ mock.call("chassis-asset-tag")
+ ]
self.assertFalse(ret)
# Assert that for non viable platforms,
# there is no communication with the Azure datasource.
@@ -1139,27 +1217,22 @@ scbus-1 on xpt0 bus 0
data = {}
dsrc = self._get_ds(data)
with mock.patch.object(dsrc, "_report_failure") as m_report_failure:
- self.m_is_platform_viable.return_value = True
ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
self.assertFalse(ret)
self.assertEqual(1, m_report_failure.call_count)
def test_crawl_metadata_exception_returns_no_datasource(self):
data = {}
dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
m_crawl_metadata.side_effect = Exception
ret = dsrc.get_data()
- self.m_is_platform_viable.assert_called_with(dsrc.seed_dir)
self.assertEqual(1, m_crawl_metadata.call_count)
self.assertFalse(ret)
def test_crawl_metadata_exception_should_report_failure_with_msg(self):
data = {}
dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
with mock.patch.object(
dsrc, "crawl_metadata"
) as m_crawl_metadata, mock.patch.object(
@@ -1175,7 +1248,6 @@ scbus-1 on xpt0 bus 0
def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self):
data = {}
dsrc = self._get_ds(data)
- self.m_is_platform_viable.return_value = True
with mock.patch.object(dsrc, "crawl_metadata") as m_crawl_metadata:
m_crawl_metadata.side_effect = Exception
dsrc.get_data()
@@ -1637,15 +1709,12 @@ scbus-1 on xpt0 bus 0
# passwd is crypt formated string $id$salt$encrypted
# encrypting plaintext with salt value of everything up to final '$'
# should equal that after the '$'
- pos = defuser["passwd"].rfind("$") + 1
+ pos = defuser["hashed_passwd"].rfind("$") + 1
self.assertEqual(
- defuser["passwd"],
- crypt.crypt("mypass", defuser["passwd"][0:pos]),
+ defuser["hashed_passwd"],
+ crypt.crypt("mypass", defuser["hashed_passwd"][0:pos]),
)
- # the same hashed value should also be present in cfg['password']
- self.assertEqual(defuser["passwd"], dsrc.cfg["password"])
-
assert dsrc.cfg["ssh_pwauth"] is True
def test_password_with_disable_ssh_pw_auth_true(self):
@@ -3491,54 +3560,40 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
self.assertIn(mock.call(path), calls)
-class TestWBIsPlatformViable(CiTestCase):
- """White box tests for _is_platform_viable."""
+class TestIsPlatformViable:
+ @pytest.mark.parametrize(
+ "tag",
+ [
+ dsaz.ChassisAssetTag.AZURE_CLOUD.value,
+ ],
+ )
+ def test_true_on_azure_chassis(
+ self, azure_ds, mock_chassis_asset_tag, tag
+ ):
+ mock_chassis_asset_tag.return_value = tag
- with_logs = True
+ assert dsaz.is_platform_viable(None) is True
- @mock.patch(MOCKPATH + "dmi.read_dmi_data")
- def test_true_on_non_azure_chassis(self, m_read_dmi_data):
- """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG."""
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG
- self.assertTrue(dsaz._is_platform_viable("doesnotmatter"))
+ def test_true_on_azure_ovf_env_in_seed_dir(
+ self, azure_ds, mock_chassis_asset_tag, tmpdir
+ ):
+ mock_chassis_asset_tag.return_value = "notazure"
- @mock.patch(MOCKPATH + "os.path.exists")
- @mock.patch(MOCKPATH + "dmi.read_dmi_data")
- def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist):
- """Return True if ovf-env.xml exists in known seed dirs."""
- # Non-matching Azure chassis-asset-tag
- m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
-
- m_exist.return_value = True
- self.assertTrue(dsaz._is_platform_viable("/some/seed/dir"))
- m_exist.called_once_with("/other/seed/dir")
-
- def test_false_on_no_matching_azure_criteria(self):
- """Report non-azure on unmatched asset tag, ovf-env absent and no dev.
-
- Return False when the asset tag doesn't match Azure's static
- AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs
- and no devices have a label starting with prefix 'rd_rdfe_'.
- """
- self.assertFalse(
- wrap_and_call(
- MOCKPATH,
- {
- "os.path.exists": False,
- # Non-matching Azure chassis-asset-tag
- "dmi.read_dmi_data": dsaz.AZURE_CHASSIS_ASSET_TAG + "X",
- "subp.which": None,
- },
- dsaz._is_platform_viable,
- "doesnotmatter",
- )
- )
- self.assertIn(
- "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format(
- dsaz.AZURE_CHASSIS_ASSET_TAG + "X"
- ),
- self.logs.getvalue(),
- )
+ seed_path = Path(azure_ds.seed_dir, "ovf-env.xml")
+ seed_path.parent.mkdir(exist_ok=True, parents=True)
+ seed_path.write_text("")
+
+ assert dsaz.is_platform_viable(seed_path.parent) is True
+
+ def test_false_on_no_matching_azure_criteria(
+ self, azure_ds, mock_chassis_asset_tag
+ ):
+ mock_chassis_asset_tag.return_value = None
+
+ seed_path = Path(azure_ds.seed_dir, "ovf-env.xml")
+ seed_path.parent.mkdir(exist_ok=True, parents=True)
+
+ assert dsaz.is_platform_viable(seed_path) is False
class TestRandomSeed(CiTestCase):
@@ -4049,7 +4104,8 @@ class TestProvisioning:
# Verify DMI usage.
assert self.mock_dmi_read_dmi_data.mock_calls == [
- mock.call("system-uuid")
+ mock.call("chassis-asset-tag"),
+ mock.call("system-uuid"),
]
assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
@@ -4126,7 +4182,8 @@ class TestProvisioning:
# Verify DMI usage.
assert self.mock_dmi_read_dmi_data.mock_calls == [
- mock.call("system-uuid")
+ mock.call("chassis-asset-tag"),
+ mock.call("system-uuid"),
]
assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
@@ -4229,7 +4286,8 @@ class TestProvisioning:
# Verify DMI usage.
assert self.mock_dmi_read_dmi_data.mock_calls == [
- mock.call("system-uuid")
+ mock.call("chassis-asset-tag"),
+ mock.call("system-uuid"),
]
assert self.azure_ds.metadata["instance-id"] == "fake-system-uuid"
diff --git a/tests/unittests/sources/test_azure_helper.py b/tests/unittests/sources/test_azure_helper.py
index 4279dc4f..8107b114 100644
--- a/tests/unittests/sources/test_azure_helper.py
+++ b/tests/unittests/sources/test_azure_helper.py
@@ -9,11 +9,14 @@ from xml.sax.saxutils import escape, unescape
import pytest
+from cloudinit import url_helper
from cloudinit.sources.helpers import azure as azure_helper
from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim
from cloudinit.util import load_file
from tests.unittests.helpers import CiTestCase, ExitStack, mock
+from .test_azure import construct_ovf_env
+
GOAL_STATE_TEMPLATE = """\
<?xml version="1.0" encoding="utf-8"?>
<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
@@ -82,6 +85,34 @@ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent(
)
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
+MOCKPATH = "cloudinit.sources.helpers.azure."
+
+
+@pytest.fixture
+def mock_dmi_read_dmi_data():
+ with mock.patch(
+ MOCKPATH + "dmi.read_dmi_data",
+ autospec=True,
+ ) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_readurl():
+ with mock.patch(MOCKPATH + "url_helper.readurl", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_sleep():
+ with mock.patch(MOCKPATH + "sleep", autospec=True) as m:
+ yield m
+
+
+@pytest.fixture
+def mock_time():
+ with mock.patch(MOCKPATH + "time", autospec=True) as m:
+ yield m
class SentinelException(Exception):
@@ -331,145 +362,99 @@ class TestAzureEndpointHttpClient(CiTestCase):
self.assertEqual(1, self.m_http_with_retries.call_count)
-class TestAzureHelperHttpWithRetries(CiTestCase):
-
- with_logs = True
+class TestHttpWithRetries:
+ @pytest.fixture(autouse=True)
+ def setup(self, mock_readurl, mock_sleep, mock_time):
+ self.m_readurl = mock_readurl
+ self.m_sleep = mock_sleep
+ self.m_time = mock_time
+ self.m_time.return_value = 0
- max_readurl_attempts = 240
- default_readurl_timeout = 5
- sleep_duration_between_retries = 5
- periodic_logging_attempts = 12
-
- def setUp(self):
- super(TestAzureHelperHttpWithRetries, self).setUp()
- patches = ExitStack()
- self.addCleanup(patches.close)
-
- self.m_readurl = patches.enter_context(
- mock.patch.object(
- azure_helper.url_helper, "readurl", mock.MagicMock()
+ @pytest.mark.parametrize(
+ "times,try_count,retry_sleep,timeout_minutes",
+ [
+ ([0, 0], 1, 5, 0),
+ ([0, 55], 1, 5, 1),
+ ([0, 54, 55], 2, 5, 1),
+ ([0, 15, 30, 45, 60], 4, 5, 1),
+ ([0, 594, 595], 2, 5, 10),
+ ],
+ )
+ def test_timeouts(
+ self, caplog, times, try_count, retry_sleep, timeout_minutes
+ ):
+ error = url_helper.UrlError("retry", code=404)
+ self.m_readurl.side_effect = error
+ self.m_time.side_effect = times
+
+ with pytest.raises(url_helper.UrlError) as exc_info:
+ azure_helper.http_with_retries(
+ "testurl",
+ headers={},
+ retry_sleep=retry_sleep,
+ timeout_minutes=timeout_minutes,
)
- )
- self.m_sleep = patches.enter_context(
- mock.patch.object(azure_helper.time, "sleep", autospec=True)
- )
- def test_http_with_retries(self):
- self.m_readurl.return_value = "TestResp"
- self.assertEqual(
- azure_helper.http_with_retries("testurl", headers={}),
- self.m_readurl.return_value,
- )
- self.assertEqual(self.m_readurl.call_count, 1)
+ assert exc_info.value == error
+ assert self.m_readurl.call_count == try_count
+ for i in range(1, try_count + 1):
+ assert (
+ "cloudinit.sources.helpers.azure",
+ 10,
+ "Failed HTTP request with Azure endpoint testurl during "
+ "attempt %d with exception: retry (code=404 headers={})" % i,
+ ) in caplog.record_tuples
+ assert self.m_time.mock_calls == (try_count + 1) * [mock.call()]
+ assert self.m_sleep.mock_calls == (try_count - 1) * [
+ mock.call(retry_sleep)
+ ]
- def test_http_with_retries_propagates_readurl_exc_and_logs_exc(self):
- self.m_readurl.side_effect = SentinelException
+ @pytest.mark.parametrize(
+ "times,try_count,retry_sleep,timeout_minutes",
+ [
+ ([0, 0], 1, 5, 0),
+ ([0, 55], 1, 5, 1),
+ ([0, 54, 55], 2, 5, 1),
+ ([0, 15, 30, 45, 60], 4, 5, 1),
+ ([0, 594, 595], 2, 5, 10),
+ ],
+ )
+ def test_success(
+ self, caplog, times, try_count, retry_sleep, timeout_minutes
+ ):
+ self.m_readurl.side_effect = (try_count - 1) * [
+ url_helper.UrlError("retry", code=404)
+ ] + ["data"]
+ self.m_time.side_effect = times
- self.assertRaises(
- SentinelException,
- azure_helper.http_with_retries,
+ resp = azure_helper.http_with_retries(
"testurl",
headers={},
- )
- self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts)
-
- self.assertIsNotNone(
- re.search(
- r"Failed HTTP request with Azure endpoint \S* during "
- r"attempt \d+ with exception: \S*",
- self.logs.getvalue(),
- )
- )
- self.assertIsNone(
- re.search(
- r"Successful HTTP request with Azure endpoint \S* after "
- r"\d+ attempts",
- self.logs.getvalue(),
- )
+ retry_sleep=retry_sleep,
+ timeout_minutes=timeout_minutes,
)
- def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc(
- self,
- ):
- self.m_readurl.side_effect = [
- SentinelException
- ] * self.periodic_logging_attempts + ["TestResp"]
- self.m_readurl.return_value = "TestResp"
-
- response = azure_helper.http_with_retries("testurl", headers={})
- self.assertEqual(response, self.m_readurl.return_value)
- self.assertEqual(
- self.m_readurl.call_count, self.periodic_logging_attempts + 1
- )
-
- # Ensure that cloud-init did sleep between each failed request
- self.assertEqual(
- self.m_sleep.call_count, self.periodic_logging_attempts
- )
- self.m_sleep.assert_called_with(self.sleep_duration_between_retries)
-
- def test_http_with_retries_long_delay_logs_periodic_failure_msg(self):
- self.m_readurl.side_effect = [
- SentinelException
- ] * self.periodic_logging_attempts + ["TestResp"]
- self.m_readurl.return_value = "TestResp"
-
- azure_helper.http_with_retries("testurl", headers={})
-
- self.assertEqual(
- self.m_readurl.call_count, self.periodic_logging_attempts + 1
- )
- self.assertIsNotNone(
- re.search(
- r"Failed HTTP request with Azure endpoint \S* during "
- r"attempt \d+ with exception: \S*",
- self.logs.getvalue(),
- )
- )
- self.assertIsNotNone(
- re.search(
- r"Successful HTTP request with Azure endpoint \S* after "
- r"\d+ attempts",
- self.logs.getvalue(),
- )
- )
-
- def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg(
- self,
- ):
- self.m_readurl.side_effect = [SentinelException] * (
- self.periodic_logging_attempts - 1
- ) + ["TestResp"]
- self.m_readurl.return_value = "TestResp"
+ assert resp == "data"
+ assert self.m_readurl.call_count == try_count
+ assert self.m_time.mock_calls == (try_count) * [mock.call()]
+ assert self.m_sleep.mock_calls == (try_count - 1) * [
+ mock.call(retry_sleep)
+ ]
- azure_helper.http_with_retries("testurl", headers={})
- self.assertEqual(
- self.m_readurl.call_count, self.periodic_logging_attempts
- )
+ for i in range(1, try_count):
+ assert (
+ "cloudinit.sources.helpers.azure",
+ 10,
+ "Failed HTTP request with Azure endpoint testurl during "
+ "attempt %d with exception: retry (code=404 headers={})" % i,
+ ) in caplog.record_tuples
- self.assertIsNone(
- re.search(
- r"Failed HTTP request with Azure endpoint \S* during "
- r"attempt \d+ with exception: \S*",
- self.logs.getvalue(),
- )
- )
- self.assertIsNotNone(
- re.search(
- r"Successful HTTP request with Azure endpoint \S* after "
- r"\d+ attempts",
- self.logs.getvalue(),
- )
- )
-
- def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self):
- testurl = mock.MagicMock()
- kwargs = {
- "headers": mock.MagicMock(),
- "data": mock.MagicMock(),
- }
- azure_helper.http_with_retries(testurl, **kwargs)
- self.m_readurl.assert_called_once_with(testurl, **kwargs, timeout=5)
+ assert (
+ "cloudinit.sources.helpers.azure",
+ 10,
+ "Successful HTTP request with Azure endpoint testurl after "
+ "%d attempts" % try_count,
+ ) in caplog.record_tuples
class TestOpenSSLManager(CiTestCase):
@@ -584,7 +569,7 @@ class TestGoalStateHealthReporter(CiTestCase):
self.addCleanup(patches.close)
patches.enter_context(
- mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ mock.patch.object(azure_helper, "sleep", mock.MagicMock())
)
self.read_file_or_url = patches.enter_context(
mock.patch.object(azure_helper.url_helper, "read_file_or_url")
@@ -993,7 +978,7 @@ class TestWALinuxAgentShim(CiTestCase):
mock.patch.object(azure_helper, "OpenSSLManager", autospec=True)
)
patches.enter_context(
- mock.patch.object(azure_helper.time, "sleep", mock.MagicMock())
+ mock.patch.object(azure_helper, "sleep", mock.MagicMock())
)
self.test_incarnation = "TestIncarnation"
@@ -1226,58 +1211,58 @@ class TestWALinuxAgentShim(CiTestCase):
def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self):
self.AzureEndpointHttpClient.return_value.get.side_effect = (
- SentinelException
+ url_helper.UrlError("retry", code=404)
)
shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
- SentinelException, shim.register_with_azure_and_fetch_data
+ url_helper.UrlError, shim.register_with_azure_and_fetch_data
)
def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self):
self.AzureEndpointHttpClient.return_value.get.side_effect = (
- SentinelException
+ url_helper.UrlError("retry", code=404)
)
shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
- SentinelException,
+ url_helper.UrlError,
shim.register_with_azure_and_report_failure,
description="TestDesc",
)
def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self):
- self.GoalState.side_effect = SentinelException
+ self.GoalState.side_effect = url_helper.UrlError("retry", code=404)
shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
- SentinelException, shim.register_with_azure_and_fetch_data
+ url_helper.UrlError, shim.register_with_azure_and_fetch_data
)
def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc(
self,
):
- self.GoalState.side_effect = SentinelException
+ self.GoalState.side_effect = url_helper.UrlError("retry", code=404)
shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
- SentinelException,
+ url_helper.UrlError,
shim.register_with_azure_and_report_failure,
description="TestDesc",
)
def test_failure_to_send_report_ready_health_doc_bubbles_up(self):
self.AzureEndpointHttpClient.return_value.post.side_effect = (
- SentinelException
+ url_helper.UrlError("retry", code=404)
)
shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
- SentinelException, shim.register_with_azure_and_fetch_data
+ url_helper.UrlError, shim.register_with_azure_and_fetch_data
)
def test_failure_to_send_report_failure_health_doc_bubbles_up(self):
self.AzureEndpointHttpClient.return_value.post.side_effect = (
- SentinelException
+ url_helper.UrlError("retry", code=404)
)
shim = wa_shim(endpoint="test_endpoint")
self.assertRaises(
- SentinelException,
+ url_helper.UrlError,
shim.register_with_azure_and_report_failure,
description="TestDesc",
)
@@ -1305,11 +1290,11 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase):
self.assertEqual(1, self.m_shim.return_value.clean_up.call_count)
def test_failure_in_registration_propagates_exc_and_calls_clean_up(self):
- self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = ( # noqa: E501
- SentinelException
+ self.m_shim.return_value.register_with_azure_and_fetch_data.side_effect = url_helper.UrlError( # noqa: E501
+ "retry", code=404
)
self.assertRaises(
- SentinelException,
+ url_helper.UrlError,
azure_helper.get_metadata_from_fabric,
"test_endpoint",
)
@@ -1414,4 +1399,295 @@ class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase):
)
+class TestChassisAssetTag:
+ def test_true_azure_cloud(self, caplog, mock_dmi_read_dmi_data):
+ mock_dmi_read_dmi_data.return_value = (
+ azure_helper.ChassisAssetTag.AZURE_CLOUD.value
+ )
+
+ asset_tag = azure_helper.ChassisAssetTag.query_system()
+
+ assert asset_tag == azure_helper.ChassisAssetTag.AZURE_CLOUD
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.sources.helpers.azure",
+ 10,
+ "Azure chassis asset tag: "
+ "'7783-7084-3265-9085-8269-3286-77' (AZURE_CLOUD)",
+ )
+ ]
+
+ @pytest.mark.parametrize("tag", [None, "", "notazure"])
+ def test_false_on_nonazure_chassis(
+ self, caplog, mock_dmi_read_dmi_data, tag
+ ):
+ mock_dmi_read_dmi_data.return_value = tag
+
+ asset_tag = azure_helper.ChassisAssetTag.query_system()
+
+ assert asset_tag is None
+ assert caplog.record_tuples == [
+ (
+ "cloudinit.sources.helpers.azure",
+ 10,
+ "Non-Azure chassis asset tag: %r" % tag,
+ )
+ ]
+
+
+class TestOvfEnvXml:
+ @pytest.mark.parametrize(
+ "ovf,expected",
+ [
+ # Defaults for construct_ovf_env() with explicit OvfEnvXml values.
+ (
+ construct_ovf_env(),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ custom_data=None,
+ disable_ssh_password_auth=None,
+ public_keys=[],
+ preprovisioned_vm=False,
+ preprovisioned_vm_type=None,
+ ),
+ ),
+ # Defaults for construct_ovf_env() with default OvfEnvXml values.
+ (
+ construct_ovf_env(),
+ azure_helper.OvfEnvXml(
+ username="test-user", hostname="test-host"
+ ),
+ ),
+ # Username.
+ (
+ construct_ovf_env(username="other-user"),
+ azure_helper.OvfEnvXml(
+ username="other-user", hostname="test-host"
+ ),
+ ),
+ # Password.
+ (
+ construct_ovf_env(password="test-password"),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ password="test-password",
+ ),
+ ),
+ # Hostname.
+ (
+ construct_ovf_env(hostname="other-host"),
+ azure_helper.OvfEnvXml(
+ username="test-user", hostname="other-host"
+ ),
+ ),
+ # Empty public keys.
+ (
+ construct_ovf_env(public_keys=[]),
+ azure_helper.OvfEnvXml(
+ username="test-user", hostname="test-host", public_keys=[]
+ ),
+ ),
+ # One public key.
+ (
+ construct_ovf_env(
+ public_keys=[
+ {"fingerprint": "fp1", "path": "path1", "value": ""}
+ ]
+ ),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ public_keys=[
+ {"fingerprint": "fp1", "path": "path1", "value": ""}
+ ],
+ ),
+ ),
+ # Two public keys.
+ (
+ construct_ovf_env(
+ public_keys=[
+ {"fingerprint": "fp1", "path": "path1", "value": ""},
+ {
+ "fingerprint": "fp2",
+ "path": "path2",
+ "value": "somevalue",
+ },
+ ]
+ ),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ public_keys=[
+ {"fingerprint": "fp1", "path": "path1", "value": ""},
+ {
+ "fingerprint": "fp2",
+ "path": "path2",
+ "value": "somevalue",
+ },
+ ],
+ ),
+ ),
+ # Custom data.
+ (
+ construct_ovf_env(custom_data="foo"),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ custom_data=b"foo",
+ ),
+ ),
+ # Disable ssh password auth.
+ (
+ construct_ovf_env(disable_ssh_password_auth=True),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ disable_ssh_password_auth=True,
+ ),
+ ),
+ # Preprovisioned vm.
+ (
+ construct_ovf_env(preprovisioned_vm=False),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ preprovisioned_vm=False,
+ ),
+ ),
+ (
+ construct_ovf_env(preprovisioned_vm=True),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ preprovisioned_vm=True,
+ ),
+ ),
+ # Preprovisioned vm type.
+ (
+ construct_ovf_env(preprovisioned_vm_type="testpps"),
+ azure_helper.OvfEnvXml(
+ username="test-user",
+ hostname="test-host",
+ preprovisioned_vm_type="testpps",
+ ),
+ ),
+ ],
+ )
+ def test_valid_ovf_scenarios(self, ovf, expected):
+ assert azure_helper.OvfEnvXml.parse_text(ovf) == expected
+
+ @pytest.mark.parametrize(
+ "ovf,error",
+ [
+ (
+ construct_ovf_env(username=None),
+ "No ovf-env.xml configuration for 'UserName'",
+ ),
+ (
+ construct_ovf_env(hostname=None),
+ "No ovf-env.xml configuration for 'HostName'",
+ ),
+ ],
+ )
+ def test_missing_required_fields(self, ovf, error):
+ with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info:
+ azure_helper.OvfEnvXml.parse_text(ovf)
+
+ assert str(exc_info.value) == error
+
+ def test_multiple_sections_fails(self):
+ ovf = """\
+ <ns0:Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns1="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns1:ProvisioningSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:LinuxProvisioningConfigurationSet>
+ <ns1:ConfigurationSetType>
+ LinuxProvisioningConfiguration
+ </ns1:ConfigurationSetType>
+ </ns1:LinuxProvisioningConfigurationSet>
+ </ns1:ProvisioningSection>
+ <ns1:ProvisioningSection>
+ </ns1:ProvisioningSection>
+ </ns0:Environment>"""
+
+ with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info:
+ azure_helper.OvfEnvXml.parse_text(ovf)
+
+ assert (
+ str(exc_info.value)
+ == "Multiple configuration matches in ovf-exml.xml "
+ "for 'ProvisioningSection' (2)"
+ )
+
+ def test_multiple_properties_fails(self):
+ ovf = """\
+ <ns0:Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns1="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns1:ProvisioningSection>
+ <ns1:LinuxProvisioningConfigurationSet>
+ <ns1:ConfigurationSetType>
+ LinuxProvisioningConfiguration
+ </ns1:ConfigurationSetType>
+ <ns1:HostName>test-host</ns1:HostName>
+ <ns1:HostName>test-host2</ns1:HostName>
+ <ns1:UserName>test-user</ns1:UserName>
+ </ns1:LinuxProvisioningConfigurationSet>
+ </ns1:ProvisioningSection>
+ <ns1:PlatformSettingsSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:PlatformSettings>
+ </ns1:PlatformSettings>
+ </ns1:PlatformSettingsSection>
+ </ns0:Environment>"""
+
+ with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info:
+ azure_helper.OvfEnvXml.parse_text(ovf)
+
+ assert (
+ str(exc_info.value)
+ == "Multiple configuration matches in ovf-exml.xml "
+ "for 'HostName' (2)"
+ )
+
+ def test_non_azure_ovf(self):
+ ovf = """\
+ <ns0:Environment xmlns="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ </ns0:Environment>"""
+
+ with pytest.raises(azure_helper.NonAzureDataSource) as exc_info:
+ azure_helper.OvfEnvXml.parse_text(ovf)
+
+ assert (
+ str(exc_info.value)
+ == "Ignoring non-Azure ovf-env.xml: ProvisioningSection not found"
+ )
+
+ @pytest.mark.parametrize(
+ "ovf,error",
+ [
+ ("", "Invalid ovf-env.xml: no element found: line 1, column 0"),
+ (
+ "<!!!!>",
+ "Invalid ovf-env.xml: not well-formed (invalid token): "
+ "line 1, column 2",
+ ),
+ ("badxml", "Invalid ovf-env.xml: syntax error: line 1, column 0"),
+ ],
+ )
+ def test_invalid_xml(self, ovf, error):
+ with pytest.raises(azure_helper.BrokenAzureDataSource) as exc_info:
+ azure_helper.OvfEnvXml.parse_text(ovf)
+
+ assert str(exc_info.value) == error
+
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_ec2.py b/tests/unittests/sources/test_ec2.py
index b7476391..c12613ec 100644
--- a/tests/unittests/sources/test_ec2.py
+++ b/tests/unittests/sources/test_ec2.py
@@ -548,7 +548,7 @@ class TestEc2(test_helpers.HttprettyTestCase):
):
del responses.mock._urls[index]
elif hasattr(responses.mock, "_matches"):
- # Can be removed when Focal and Impish are EOL
+ # Can be removed when Focal is EOL
for index, response in enumerate(responses.mock._matches):
if response.url.startswith(
"http://169.254.169.254/2009-04-04/meta-data/"
diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py
index 794a9654..94c7ae13 100644
--- a/tests/unittests/test_features.py
+++ b/tests/unittests/test_features.py
@@ -44,11 +44,15 @@ def create_override(request):
class TestFeatures:
+ """default pytest-xdist behavior may fail due to these tests"""
+
+ @pytest.mark.serial
def test_feature_without_override(self):
from cloudinit.features import ERROR_ON_USER_DATA_FAILURE
assert ERROR_ON_USER_DATA_FAILURE is True
+ @pytest.mark.serial
@pytest.mark.parametrize(
"create_override",
[{"ERROR_ON_USER_DATA_FAILURE": False}],
@@ -59,6 +63,7 @@ class TestFeatures:
assert ERROR_ON_USER_DATA_FAILURE is False
+ @pytest.mark.serial
@pytest.mark.parametrize(
"create_override", [{"SPAM": True}], indirect=True
)
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index 7494b438..afd9056a 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -6,6 +6,7 @@ import pytest
from cloudinit.net.activators import (
DEFAULT_PRIORITY,
+ NAME_TO_ACTIVATOR,
IfUpDownActivator,
NetplanActivator,
NetworkdActivator,
@@ -79,23 +80,23 @@ def unavailable_mocks():
class TestSearchAndSelect:
- def test_defaults(self, available_mocks):
- resp = search_activator()
- assert resp == DEFAULT_PRIORITY
+ def test_empty_list(self, available_mocks):
+ resp = search_activator(priority=DEFAULT_PRIORITY, target=None)
+ assert resp == [NAME_TO_ACTIVATOR[name] for name in DEFAULT_PRIORITY]
activator = select_activator()
- assert activator == DEFAULT_PRIORITY[0]
+ assert activator == NAME_TO_ACTIVATOR[DEFAULT_PRIORITY[0]]
def test_priority(self, available_mocks):
- new_order = [NetplanActivator, NetworkManagerActivator]
- resp = search_activator(priority=new_order)
- assert resp == new_order
+ new_order = ["netplan", "network-manager"]
+ resp = search_activator(priority=new_order, target=None)
+ assert resp == [NAME_TO_ACTIVATOR[name] for name in new_order]
activator = select_activator(priority=new_order)
- assert activator == new_order[0]
+ assert activator == NAME_TO_ACTIVATOR[new_order[0]]
def test_target(self, available_mocks):
- search_activator(target="/tmp")
+ search_activator(priority=DEFAULT_PRIORITY, target="/tmp")
assert "/tmp" == available_mocks.m_which.call_args[1]["target"]
select_activator(target="/tmp")
@@ -106,20 +107,22 @@ class TestSearchAndSelect:
return_value=False,
)
def test_first_not_available(self, m_available, available_mocks):
- resp = search_activator()
- assert resp == DEFAULT_PRIORITY[1:]
+ resp = search_activator(priority=DEFAULT_PRIORITY, target=None)
+ assert resp == [
+ NAME_TO_ACTIVATOR[activator] for activator in DEFAULT_PRIORITY[1:]
+ ]
resp = select_activator()
- assert resp == DEFAULT_PRIORITY[1]
+ assert resp == NAME_TO_ACTIVATOR[DEFAULT_PRIORITY[1]]
def test_priority_not_exist(self, available_mocks):
with pytest.raises(ValueError):
- search_activator(priority=["spam", "eggs"])
+ search_activator(priority=["spam", "eggs"], target=None)
with pytest.raises(ValueError):
select_activator(priority=["spam", "eggs"])
def test_none_available(self, unavailable_mocks):
- resp = search_activator()
+ resp = search_activator(priority=DEFAULT_PRIORITY, target=None)
assert resp == []
with pytest.raises(NoActivatorException):
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index b8e16e31..9722ddd5 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -342,24 +342,6 @@ OS_RELEASE_OPENMANDRIVA = dedent(
)
-class FakeCloud(object):
- def __init__(self, hostname, fqdn):
- self.hostname = hostname
- self.fqdn = fqdn
- self.calls = []
-
- def get_hostname(self, fqdn=None, metadata_only=None):
- myargs = {}
- if fqdn is not None:
- myargs["fqdn"] = fqdn
- if metadata_only is not None:
- myargs["metadata_only"] = metadata_only
- self.calls.append(myargs)
- if fqdn:
- return DataSourceHostname(self.fqdn, False)
- return DataSourceHostname(self.hostname, False)
-
-
class TestUtil:
def test_parse_mount_info_no_opts_no_arg(self):
result = util.parse_mount_info("/home", MOUNT_INFO, LOG)
@@ -613,37 +595,48 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self):
"""When cfg has a hostname without a '.' query cloud.get_hostname."""
- mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
+ cloud = mock.MagicMock()
+ cloud.get_hostname.return_value = DataSourceHostname(
+ "cloudhost.mycloud.com", False
+ )
hostname, fqdn, _ = util.get_hostname_fqdn(
- cfg={"hostname": "myhost"}, cloud=mycloud
+ cfg={"hostname": "myhost"}, cloud=cloud
)
self.assertEqual("myhost", hostname)
self.assertEqual("cloudhost.mycloud.com", fqdn)
- self.assertEqual(
- [{"fqdn": True, "metadata_only": False}], mycloud.calls
- )
+ assert [
+ mock.call(fqdn=True, metadata_only=False)
+ ] == cloud.get_hostname.call_args_list
def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self):
"""When cfg has neither hostname nor fqdn cloud.get_hostname."""
- mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
- hostname, fqdn, _ = util.get_hostname_fqdn(cfg={}, cloud=mycloud)
+ cloud = mock.MagicMock()
+ cloud.get_hostname.side_effect = (
+ DataSourceHostname("cloudhost.mycloud.com", False),
+ DataSourceHostname("cloudhost", False),
+ )
+ hostname, fqdn, _ = util.get_hostname_fqdn(cfg={}, cloud=cloud)
self.assertEqual("cloudhost", hostname)
self.assertEqual("cloudhost.mycloud.com", fqdn)
- self.assertEqual(
- [{"fqdn": True, "metadata_only": False}, {"metadata_only": False}],
- mycloud.calls,
- )
+ assert [
+ mock.call(fqdn=True, metadata_only=False),
+ mock.call(metadata_only=False),
+ ] == cloud.get_hostname.call_args_list
def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
"""Calls to cloud.get_hostname pass the metadata_only parameter."""
- mycloud = FakeCloud("cloudhost", "cloudhost.mycloud.com")
- _hn, _fqdn, _def_hostname = util.get_hostname_fqdn(
- cfg={}, cloud=mycloud, metadata_only=True
+ cloud = mock.MagicMock()
+ cloud.get_hostname.side_effect = (
+ DataSourceHostname("cloudhost.mycloud.com", False),
+ DataSourceHostname("cloudhost", False),
)
- self.assertEqual(
- [{"fqdn": True, "metadata_only": True}, {"metadata_only": True}],
- mycloud.calls,
+ _hn, _fqdn, _def_hostname = util.get_hostname_fqdn(
+ cfg={}, cloud=cloud, metadata_only=True
)
+ assert [
+ mock.call(fqdn=True, metadata_only=True),
+ mock.call(metadata_only=True),
+ ] == cloud.get_hostname.call_args_list
class TestBlkid(CiTestCase):
@@ -2088,7 +2081,8 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
self._createConsole(self.root)
logged_string = "something very important"
util.multi_log(logged_string)
- self.assertEqual(logged_string, open("/dev/console").read())
+ with open("/dev/console") as f:
+ self.assertEqual(logged_string, f.read())
def test_logs_dont_go_to_stdout_if_console_exists(self):
self._createConsole(self.root)
diff --git a/tests/unittests/util.py b/tests/unittests/util.py
index 3f0fe400..c7dc73b9 100644
--- a/tests/unittests/util.py
+++ b/tests/unittests/util.py
@@ -1,10 +1,14 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from unittest import mock
+
from cloudinit import cloud, distros, helpers
from cloudinit.sources import DataSourceHostname
from cloudinit.sources.DataSourceNone import DataSourceNone
-def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None):
+def get_cloud(
+ distro=None, paths=None, sys_cfg=None, metadata=None, mocked_distro=False
+):
"""Obtain a "cloud" that can be used for testing.
Modules take a 'cloud' parameter to call into things that are
@@ -18,6 +22,8 @@ def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None):
sys_cfg = sys_cfg or {}
cls = distros.fetch(distro) if distro else MockDistro
mydist = cls(distro, sys_cfg, paths)
+ if mocked_distro:
+ mydist = mock.Mock(wraps=mydist)
myds = DataSourceTesting(sys_cfg, mydist, paths)
if metadata:
myds.metadata.update(metadata)
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 16a89f9b..b53dc757 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -5,9 +5,9 @@ akutz
AlexBaranowski
Aman306
andgein
+andrew-lee-metaswitch
andrewbogott
andrewlukoshko
-andrew-lee-metaswitch
antonyc
aswinrajamannar
beantaxi
@@ -19,6 +19,7 @@ bmhughes
candlerb
cawamata
cclauss
+chifac08
chrislalos
ciprianbadescu
citrus-it
@@ -61,6 +62,7 @@ lkundrak
lucasmoura
lucendio
lungj
+magnetikonline
mal
mamercad
manuelisimo
@@ -71,6 +73,7 @@ megian
michaelrommel
mitechie
nazunalika
+netcho
nicolasbock
nishigori
olivierlemasle
@@ -83,17 +86,20 @@ renanrodrigo
rhansen
riedel
rongz609
+SadeghHayeri
sarahwzadara
+scorpion44
shi2wei3
slingamn
slyon
smoser
sshedi
+sstallion
stappersg
steverweber
t-8ch
-TheRealFalcon
taoyama
+TheRealFalcon
thetoolsmith
timothegenzmer
tnt-dev
@@ -108,4 +114,5 @@ wschoot
xiachen-rh
xnox
yangzz-97
+yawkat
zhuzaifangxuele
diff --git a/tox.ini b/tox.ini
index 26588585..1fcd26bc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,11 @@
[tox]
-envlist = py3, lowest-supported-dev, black, flake8, isort, mypy, pylint
+envlist =
+ py3,
+ black,
+ flake8,
+ isort,
+ mypy,
+ pylint
recreate = True
[doc8]
@@ -15,6 +21,8 @@ passenv=
[format_deps]
black==22.3.0
flake8==4.0.1
+hypothesis==6.31.6
+hypothesis_jsonschema==0.20.1
isort==5.10.1
mypy==0.950
pylint==2.13.9
@@ -24,6 +32,7 @@ types-oauthlib==3.1.6
types-PyYAML==6.0.4
types-requests==2.27.8
types-setuptools==57.4.9
+typing-extensions==4.1.1
[testenv:flake8]
deps =
@@ -49,19 +58,24 @@ commands = {envpython} -m isort . --check-only
[testenv:mypy]
deps =
+ hypothesis=={[format_deps]hypothesis}
+ hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema}
mypy=={[format_deps]mypy}
+ pytest=={[format_deps]pytest}
types-jsonschema=={[format_deps]types-jsonschema}
types-oauthlib=={[format_deps]types-oauthlib}
types-pyyaml=={[format_deps]types-PyYAML}
types-requests=={[format_deps]types-requests}
types-setuptools=={[format_deps]types-setuptools}
- pytest=={[format_deps]pytest}
+ typing-extensions=={[format_deps]typing-extensions}
commands = {envpython} -m mypy cloudinit/ tests/ tools/
[testenv:check_format]
deps =
black=={[format_deps]black}
flake8=={[format_deps]flake8}
+ hypothesis=={[format_deps]hypothesis}
+ hypothesis_jsonschema=={[format_deps]hypothesis_jsonschema}
isort=={[format_deps]isort}
mypy=={[format_deps]mypy}
pylint=={[format_deps]pylint}
@@ -71,6 +85,7 @@ deps =
types-pyyaml=={[format_deps]types-PyYAML}
types-requests=={[format_deps]types-requests}
types-setuptools=={[format_deps]types-setuptools}
+ typing-extensions=={[format_deps]typing-extensions}
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/integration-requirements.txt
commands =
@@ -118,9 +133,35 @@ deps =
-r{toxinidir}/test-requirements.txt
commands = {envpython} -m pytest \
--durations 10 \
+ -m "not hypothesis_slow" \
+ {posargs:--cov=cloudinit --cov-branch tests/unittests}
+
+# experimental
+[testenv:py3-fast]
+deps =
+ -r{toxinidir}/test-requirements.txt
+ pytest-xdist
+commands = {envpython} -m pytest -n auto -m "not hypothesis_slow" -m "not serial"\
+ {posargs:tests/unittests}
+
+[testenv:hypothesis-slow]
+deps =
+ hypothesis==6.31.6
+ hypothesis_jsonschema==0.20.1
+ -r{toxinidir}/test-requirements.txt
+commands = {envpython} -m pytest \
+ -m hypothesis_slow \
+ {posargs:--hypothesis-show-statistics tests/unittests}
+
+#commands = {envpython} -X tracemalloc=40 -Werror::ResourceWarning:cloudinit -m pytest \
+[testenv:py3-leak]
+deps = {[testenv:py3]deps}
+commands = {envpython} -X tracemalloc=40 -Wall -m pytest \
+ --durations 10 \
{posargs:--cov=cloudinit --cov-branch \
tests/unittests}
+
[lowest-supported-deps]
# Tox is going to install requirements from pip. This is fine for
# testing python version compatibility, but when we build cloud-init, we are
@@ -257,25 +298,27 @@ addopts = --strict
log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s
log_date_format = %Y-%m-%d %H:%M:%S
markers =
- allow_subp_for: allow subp usage for the given commands (disable_subp_usage)
+ adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins)
allow_all_subp: allow all subp usage (disable_subp_usage)
+ allow_subp_for: allow subp usage for the given commands (disable_subp_usage)
+ azure: test will only run on Azure platform
ci: run this integration test as part of CI test runs
ds_sys_cfg: a sys_cfg dict to be used by datasource fixtures
ec2: test will only run on EC2 platform
gce: test will only run on GCE platform
- azure: test will only run on Azure platform
- oci: test will only run on OCI platform
- openstack: test will only run on openstack platform
+ hypothesis_slow: hypothesis test too slow to run as unit test
+ instance_name: the name to be used for the test instance
+ is_iscsi: whether is an instance has iscsi net cfg or not
lxd_config_dict: set the config_dict passed on LXD instance creation
lxd_container: test will only run in LXD container
lxd_setup: specify callable to be called between init and start
lxd_use_exec: `execute` will use `lxc exec` instead of SSH
lxd_vm: test will only run in LXD VM
- not_bionic: test cannot run on the bionic release
no_container: test cannot run in a container
- user_data: the user data to be passed to the test instance
- instance_name: the name to be used for the test instance
+ not_bionic: test cannot run on the bionic release
+ oci: test will only run on OCI platform
+ openstack: test will only run on openstack platform
+ serial: tests that do not work in parallel, skipped with py3-fast
ubuntu: this test should run on Ubuntu
unstable: skip this test because it is flakey
- adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins)
- is_iscsi: whether is an instance has iscsi net cfg or not
+ user_data: the user data to be passed to the test instance