summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2023-03-21 14:43:52 -0600
committerChad Smith <chad.smith@canonical.com>2023-03-21 14:43:52 -0600
commit5eac937e26f6df2233a944d0dd5961f191b139ae (patch)
tree4edfba09d0c2dbd4317d98551e6c399e70b74c95
parentfe5bfa1e8c300b5ccf049246c11664f73a8fc7be (diff)
parent85b2fbc2a811d6efb703284bea93b35c5cdd4135 (diff)
downloadcloud-init-git-5eac937e26f6df2233a944d0dd5961f191b139ae.tar.gz
merge from upstream/main at 23.1-27-g85b2fbc2
-rw-r--r--.github/workflows/integration.yml2
-rw-r--r--cloudinit/analyze/show.py15
-rw-r--r--cloudinit/apport.py24
-rwxr-xr-xcloudinit/cmd/devel/hotplug_hook.py2
-rw-r--r--cloudinit/config/cc_ansible.py1
-rw-r--r--cloudinit/config/cc_apt_configure.py9
-rw-r--r--cloudinit/config/cc_ca_certs.py19
-rw-r--r--cloudinit/config/cc_disk_setup.py38
-rw-r--r--cloudinit/config/cc_grub_dpkg.py107
-rw-r--r--cloudinit/config/cc_lxd.py4
-rw-r--r--cloudinit/config/cc_refresh_rmc_and_interface.py17
-rw-r--r--cloudinit/config/cc_reset_rmc.py2
-rw-r--r--cloudinit/config/schema.py13
-rw-r--r--cloudinit/config/schemas/schema-cloud-config-v1.json7
-rw-r--r--cloudinit/distros/netbsd.py7
-rw-r--r--cloudinit/distros/parsers/ifconfig.py7
-rw-r--r--cloudinit/distros/rhel.py7
-rw-r--r--cloudinit/helpers.py11
-rw-r--r--cloudinit/net/__init__.py2
-rw-r--r--cloudinit/net/activators.py38
-rw-r--r--cloudinit/net/bsd.py3
-rw-r--r--cloudinit/net/eni.py14
-rw-r--r--cloudinit/net/ephemeral.py16
-rw-r--r--cloudinit/net/netplan.py20
-rw-r--r--cloudinit/net/network_state.py7
-rw-r--r--cloudinit/net/networkd.py16
-rw-r--r--cloudinit/net/renderer.py21
-rw-r--r--cloudinit/sources/DataSourceAzure.py44
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py5
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceExoscale.py6
-rw-r--r--cloudinit/sources/DataSourceHetzner.py2
-rw-r--r--cloudinit/sources/DataSourceLXD.py7
-rw-r--r--cloudinit/sources/DataSourceNWCS.py24
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py6
-rw-r--r--cloudinit/sources/DataSourceOracle.py7
-rw-r--r--cloudinit/sources/DataSourceScaleway.py46
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py3
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py4
-rw-r--r--cloudinit/sources/DataSourceVultr.py8
-rw-r--r--cloudinit/sources/__init__.py46
-rw-r--r--cloudinit/sources/helpers/cloudsigma.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py36
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py15
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py12
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py36
-rw-r--r--cloudinit/stages.py3
-rw-r--r--cloudinit/templater.py15
-rw-r--r--cloudinit/user_data.py2
-rw-r--r--cloudinit/util.py27
-rw-r--r--doc/rtd/conf.py42
-rw-r--r--doc/rtd/explanation/instancedata.rst2
-rw-r--r--doc/rtd/static/css/custom.css248
-rwxr-xr-xpackages/bddeb248
-rw-r--r--packages/debian/compat1
-rwxr-xr-xpackages/debian/rules28
-rw-r--r--tests/integration_tests/clouds.py2
-rw-r--r--tests/integration_tests/conftest.py2
-rw-r--r--tests/integration_tests/datasources/test_detect_openstack.py43
-rw-r--r--tests/integration_tests/datasources/test_oci_networking.py39
-rw-r--r--tests/integration_tests/instances.py4
-rw-r--r--tests/integration_tests/modules/test_combined.py6
-rw-r--r--tests/unittests/config/test_apt_configure_sources_list_v1.py10
-rw-r--r--tests/unittests/config/test_apt_source_v3.py5
-rw-r--r--tests/unittests/config/test_cc_ca_certs.py12
-rw-r--r--tests/unittests/config/test_cc_disk_setup.py2
-rw-r--r--tests/unittests/config/test_cc_growpart.py2
-rw-r--r--tests/unittests/config/test_cc_grub_dpkg.py97
-rw-r--r--tests/unittests/config/test_cc_power_state_change.py2
-rw-r--r--tests/unittests/config/test_cc_refresh_rmc_and_interface.py9
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py2
-rw-r--r--tests/unittests/helpers.py2
-rw-r--r--tests/unittests/sources/test___init__.py40
-rw-r--r--tests/unittests/sources/test_azure.py70
-rw-r--r--tests/unittests/sources/test_cloudsigma.py7
-rw-r--r--tests/unittests/sources/test_exoscale.py16
-rw-r--r--tests/unittests/sources/test_init.py5
-rw-r--r--tests/unittests/sources/test_nwcs.py14
-rw-r--r--tests/unittests/sources/test_opennebula.py20
-rw-r--r--tests/unittests/sources/test_openstack.py17
-rw-r--r--tests/unittests/sources/test_oracle.py39
-rw-r--r--tests/unittests/sources/test_scaleway.py31
-rw-r--r--tests/unittests/sources/vmware/test_vmware_config_file.py11
-rw-r--r--tests/unittests/test_apport.py75
-rw-r--r--tests/unittests/test_cli.py8
-rw-r--r--tests/unittests/test_features.py2
-rw-r--r--tests/unittests/test_net_activators.py25
-rw-r--r--tests/unittests/test_netinfo.py4
-rw-r--r--tests/unittests/test_util.py17
-rw-r--r--tools/.github-cla-signers2
91 files changed, 1170 insertions, 835 deletions
diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
index 889897a3..c8b32d61 100644
--- a/.github/workflows/integration.yml
+++ b/.github/workflows/integration.yml
@@ -37,6 +37,8 @@ jobs:
ubuntu-dev-tools
sudo sbuild-adduser $USER
cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
+ # Install all build and test dependencies
+ ./tools/read-dependencies -r requirements.txt -r test-requirements.txt -d ubuntu -s -i
- name: Build package
run: |
./packages/bddeb -S -d --release ${{ env.RELEASE }}
diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py
index 8ce649de..8d5866e3 100644
--- a/cloudinit/analyze/show.py
+++ b/cloudinit/analyze/show.py
@@ -4,7 +4,6 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import base64
import datetime
import json
import os
@@ -66,20 +65,6 @@ def format_record(msg, event):
return msg.format(**event)
-def dump_event_files(event):
- content = dict((k, v) for k, v in event.items() if k not in ["content"])
- files = content["files"]
- saved = []
- for f in files:
- fname = f["path"]
- fn_local = os.path.basename(fname)
- fcontent = base64.b64decode(f["content"]).decode("ascii")
- util.write_file(fn_local, fcontent)
- saved.append(fn_local)
-
- return saved
-
-
def event_name(event):
if event:
return event.get("name")
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index e42ecf8e..dead3059 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -4,6 +4,7 @@
"""Cloud-init apport interface"""
+import json
import os
from cloudinit.cmd.devel import read_cfg_paths
@@ -101,8 +102,29 @@ def attach_hwinfo(report, ui=None):
def attach_cloud_info(report, ui=None):
- """Prompt for cloud details if available."""
+ """Prompt for cloud details if instance-data unavailable.
+
+ When we have valid _get_instance_data, apport/generic-hooks/cloud_init.py
+ provides CloudName, CloudID, CloudPlatform and CloudSubPlatform.
+
+ Apport/generic-hooks are delivered by cloud-init's downstream branches
+ ubuntu/(devel|kinetic|jammy|focal|bionic) so they will not be represented
+ in upstream main.
+
+ In absence of viable instance-data.json format, prompt for the cloud below.
+ """
+
if ui:
+ paths = read_cfg_paths()
+ try:
+ with open(paths.get_runpath("instance_data")) as file:
+ instance_data = json.load(file)
+ assert instance_data.get("v1", {}).get("cloud_name")
+ return # Valid instance-data means generic-hooks will report
+ except (IOError, json.decoder.JSONDecodeError, AssertionError):
+ pass
+
+ # No valid /run/cloud/instance-data.json on system. Prompt for cloud.
prompt = "Is this machine running in a cloud environment?"
response = ui.yesno(prompt)
if response is None:
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
index 560857ef..78085735 100755
--- a/cloudinit/cmd/devel/hotplug_hook.py
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -168,7 +168,7 @@ def is_enabled(hotplug_init, subsystem):
try:
scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1]
except KeyError as e:
- raise Exception(
+ raise RuntimeError(
"hotplug-hook: cannot handle events for subsystem: {}".format(
subsystem
)
diff --git a/cloudinit/config/cc_ansible.py b/cloudinit/config/cc_ansible.py
index 876dbc6b..5392e605 100644
--- a/cloudinit/config/cc_ansible.py
+++ b/cloudinit/config/cc_ansible.py
@@ -39,6 +39,7 @@ meta: MetaSchema = {
dedent(
"""\
ansible:
+ package_name: ansible-core
install_method: distro
pull:
url: "https://github.com/holmanb/vmboot.git"
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 17c2fb58..e8de000a 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -383,15 +383,6 @@ def rename_apt_lists(new_mirrors, target, arch):
LOG.warning("Failed to rename apt list:", exc_info=True)
-def mirror_to_placeholder(tmpl, mirror, placeholder):
- """mirror_to_placeholder
- replace the specified mirror in a template with a placeholder string
- Checks for existance of the expected mirror and warns if not found"""
- if mirror not in tmpl:
- LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl)
- return tmpl.replace(mirror, placeholder)
-
-
def map_known_suites(suite):
"""there are a few default names which will be auto-extended.
This comes at the inability to use those names literally as suites,
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index b1c4a2bf..54153638 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -177,14 +177,20 @@ def disable_system_ca_certs(distro_cfg):
@param distro_cfg: A hash providing _distro_ca_certs_configs function.
"""
- if distro_cfg["ca_cert_config"] is None:
+
+ ca_cert_cfg_fn = distro_cfg["ca_cert_config"]
+
+ if not ca_cert_cfg_fn or not os.path.exists(ca_cert_cfg_fn):
return
+
header_comment = (
"# Modified by cloud-init to deselect certs due to user-data"
)
+
added_header = False
- if os.stat(distro_cfg["ca_cert_config"]).st_size != 0:
- orig = util.load_file(distro_cfg["ca_cert_config"])
+
+ if os.stat(ca_cert_cfg_fn).st_size:
+ orig = util.load_file(ca_cert_cfg_fn)
out_lines = []
for line in orig.splitlines():
if line == header_comment:
@@ -197,9 +203,10 @@ def disable_system_ca_certs(distro_cfg):
out_lines.append(header_comment)
added_header = True
out_lines.append("!" + line)
- util.write_file(
- distro_cfg["ca_cert_config"], "\n".join(out_lines) + "\n", omode="wb"
- )
+
+ util.write_file(
+ ca_cert_cfg_fn, "\n".join(out_lines) + "\n", omode="wb"
+ )
def remove_default_ca_certs(distro_cfg):
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 4aae5530..ecaca079 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -274,7 +274,7 @@ def enumerate_disk(device, nodeps=False):
try:
info, _err = subp.subp(lsblk_cmd)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed during disk check for %s\n%s" % (device, e)
) from e
@@ -338,7 +338,7 @@ def check_fs(device):
try:
out, _err = subp.subp(blkid_cmd, rcs=[0, 2])
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed during disk check for %s\n%s" % (device, e)
) from e
@@ -444,7 +444,7 @@ def get_hdd_size(device):
size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device])
sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device])
except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e)) from e
+ raise RuntimeError("Failed to get %s size\n%s" % (device, e)) from e
return int(size_in_bytes) / int(sector_size)
@@ -462,7 +462,7 @@ def check_partition_mbr_layout(device, layout):
try:
out, _err = subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Error running partition command on %s\n%s" % (device, e)
) from e
@@ -493,7 +493,7 @@ def check_partition_gpt_layout(device, layout):
try:
out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Error running partition command on %s\n%s" % (device, e)
) from e
@@ -542,7 +542,7 @@ def check_partition_layout(table_type, device, layout):
elif "mbr" == table_type:
found_layout = check_partition_mbr_layout(device, layout)
else:
- raise Exception("Unable to determine table type")
+ raise RuntimeError("Unable to determine table type")
LOG.debug(
"called check_partition_%s_layout(%s, %s), returned: %s",
@@ -595,11 +595,11 @@ def get_partition_mbr_layout(size, layout):
if (len(layout) == 0 and isinstance(layout, list)) or not isinstance(
layout, list
):
- raise Exception("Partition layout is invalid")
+ raise RuntimeError("Partition layout is invalid")
last_part_num = len(layout)
if last_part_num > 4:
- raise Exception("Only simply partitioning is allowed.")
+ raise RuntimeError("Only simply partitioning is allowed.")
part_definition = []
part_num = 0
@@ -610,7 +610,9 @@ def get_partition_mbr_layout(size, layout):
if isinstance(part, list):
if len(part) != 2:
- raise Exception("Partition was incorrectly defined: %s" % part)
+ raise RuntimeError(
+ "Partition was incorrectly defined: %s" % part
+ )
percent, part_type = part
part_size = int(float(size) * (float(percent) / 100))
@@ -622,7 +624,7 @@ def get_partition_mbr_layout(size, layout):
sfdisk_definition = "\n".join(part_definition)
if len(part_definition) > 4:
- raise Exception(
+ raise RuntimeError(
"Calculated partition definition is too big\n%s"
% sfdisk_definition
)
@@ -638,7 +640,7 @@ def get_partition_gpt_layout(size, layout):
for partition in layout:
if isinstance(partition, list):
if len(partition) != 2:
- raise Exception(
+ raise RuntimeError(
"Partition was incorrectly defined: %s" % partition
)
percent, partition_type = partition
@@ -682,7 +684,7 @@ def purge_disk(device):
LOG.info("Purging filesystem on /dev/%s", d["name"])
subp.subp(wipefs_cmd)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed FS purge of /dev/%s" % d["name"]
) from e
@@ -702,7 +704,7 @@ def get_partition_layout(table_type, size, layout):
return get_partition_mbr_layout(size, layout)
elif "gpt" == table_type:
return get_partition_gpt_layout(size, layout)
- raise Exception("Unable to determine table type")
+ raise RuntimeError("Unable to determine table type")
def read_parttbl(device):
@@ -733,7 +735,7 @@ def exec_mkpart_mbr(device, layout):
try:
subp.subp(prt_cmd, data="%s\n" % layout)
except Exception as e:
- raise Exception(
+ raise RuntimeError(
"Failed to partition device %s\n%s" % (device, e)
) from e
@@ -816,7 +818,7 @@ def mkpart(device, definition):
# This prevents you from overwriting the device
LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
- raise Exception(
+ raise RuntimeError(
"Device {device} is not a disk device!".format(device=device)
)
@@ -849,7 +851,7 @@ def mkpart(device, definition):
elif "gpt" == table_type:
exec_mkpart_gpt(device, part_definition)
else:
- raise Exception("Unable to determine table type")
+ raise RuntimeError("Unable to determine table type")
LOG.debug("Partition table created for %s", device)
@@ -997,7 +999,7 @@ def mkfs(fs_cfg):
# Check that we can create the FS
if not (fs_type or fs_cmd):
- raise Exception(
+ raise RuntimeError(
"No way to create filesystem '{label}'. fs_type or fs_cmd "
"must be set.".format(label=label)
)
@@ -1059,7 +1061,7 @@ def mkfs(fs_cfg):
try:
subp.subp(fs_cmd, shell=shell)
except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
+ raise RuntimeError("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 893204fa..2e0e671e 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -22,9 +22,8 @@ from cloudinit.subp import ProcessExecutionError
MODULE_DESCRIPTION = """\
Configure which device is used as the target for grub installation. This module
can be enabled/disabled using the ``enabled`` config key in the ``grub_dpkg``
-config dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``.
-If no installation device is specified this module will execute grub-probe to
-determine which disk the /boot directory is associated with.
+config dict. This module automatically selects a disk using ``grub-probe`` if
+no installation device is specified.
The value which is placed into the debconf database is in the format which the
grub postinstall script expects. Normally, this is a /dev/disk/by-id/ value,
@@ -46,8 +45,11 @@ meta: MetaSchema = {
"""\
grub_dpkg:
enabled: true
+ # BIOS mode (install_devices needs disk)
grub-pc/install_devices: /dev/sda
grub-pc/install_devices_empty: false
+ # EFI mode (install_devices needs partition)
+ grub-efi/install_devices: /dev/sda
"""
)
],
@@ -57,7 +59,7 @@ meta: MetaSchema = {
__doc__ = get_meta_doc(meta)
-def fetch_idevs(log):
+def fetch_idevs(log: Logger):
"""
Fetches the /dev/disk/by-id device grub is installed to.
Falls back to plain disk name if no by-id entry is present.
@@ -65,11 +67,19 @@ def fetch_idevs(log):
disk = ""
devices = []
+ # BIOS mode systems use /boot and the disk path,
+ # EFI mode systems use /boot/efi and the partition path.
+ probe_target = "disk"
+ probe_mount = "/boot"
+ if is_efi_booted(log):
+ probe_target = "device"
+ probe_mount = "/boot/efi"
+
try:
# get the root disk where the /boot directory resides.
- disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[
- 0
- ].strip()
+ disk = subp.subp(
+ ["grub-probe", "-t", probe_target, probe_mount], capture=True
+ ).stdout.strip()
except ProcessExecutionError as e:
# grub-common may not be installed, especially on containers
# FileNotFoundError is a nested exception of ProcessExecutionError
@@ -97,8 +107,8 @@ def fetch_idevs(log):
subp.subp(
["udevadm", "info", "--root", "--query=symlink", disk],
capture=True,
- )[0]
- .strip()
+ )
+ .stdout.strip()
.split()
)
except Exception:
@@ -117,10 +127,21 @@ def fetch_idevs(log):
return idevs
+def is_efi_booted(log: Logger) -> bool:
+ """
+ Check if the system is booted in EFI mode.
+ """
+ try:
+ return os.path.exists("/sys/firmware/efi")
+ except OSError as e:
+ log.error("Failed to determine if system is booted in EFI mode: %s", e)
+ # If we can't determine if we're booted in EFI mode, assume we're not.
+ return False
+
+
def handle(
name: str, cfg: Config, cloud: Cloud, log: Logger, args: list
) -> None:
-
mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
if not mycfg:
mycfg = {}
@@ -130,35 +151,47 @@ def handle(
log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
return
- idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- if idevs is None:
- idevs = fetch_idevs(log)
-
- idevs_empty = mycfg.get("grub-pc/install_devices_empty")
- if idevs_empty is None:
- idevs_empty = not idevs
- elif not isinstance(idevs_empty, bool):
- idevs_empty = util.translate_bool(idevs_empty)
- idevs_empty = str(idevs_empty).lower()
-
- # now idevs and idevs_empty are set to determined values
- # or, those set by user
-
- dconf_sel = (
- "grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n"
- % (idevs, idevs_empty)
- )
-
- log.debug(
- "Setting grub debconf-set-selections with '%s','%s'"
- % (idevs, idevs_empty)
- )
+ dconf_sel = get_debconf_config(mycfg, log)
+ log.debug("Setting grub debconf-set-selections with '%s'" % dconf_sel)
try:
subp.subp(["debconf-set-selections"], dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
+ except Exception as e:
+ util.logexc(
+ log, "Failed to run debconf-set-selections for grub_dpkg: %s", e
+ )
-# vi: ts=4 expandtab
+def get_debconf_config(mycfg: Config, log: Logger) -> str:
+ """
+ Returns the debconf config for grub-pc or
+ grub-efi depending on the systems boot mode.
+ """
+ if is_efi_booted(log):
+ idevs = util.get_cfg_option_str(
+ mycfg, "grub-efi/install_devices", None
+ )
+
+ if idevs is None:
+ idevs = fetch_idevs(log)
+
+ return "grub-pc grub-efi/install_devices string %s\n" % idevs
+ else:
+ idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
+ if idevs is None:
+ idevs = fetch_idevs(log)
+
+ idevs_empty = mycfg.get("grub-pc/install_devices_empty")
+ if idevs_empty is None:
+ idevs_empty = not idevs
+ elif not isinstance(idevs_empty, bool):
+ idevs_empty = util.translate_bool(idevs_empty)
+ idevs_empty = str(idevs_empty).lower()
+
+ # now idevs and idevs_empty are set to determined values
+ # or, those set by user
+ return (
+ "grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n"
+ % (idevs, idevs_empty)
+ )
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index e692fbd5..06c9f6a6 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -382,7 +382,7 @@ def bridge_to_debconf(bridge_cfg):
debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
else:
- raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
+ raise RuntimeError('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
return debconf
@@ -399,7 +399,7 @@ def bridge_to_cmd(bridge_cfg):
return None, cmd_attach
if bridge_cfg.get("mode") != "new":
- raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
+ raise RuntimeError('invalid bridge mode "%s"' % bridge_cfg.get("mode"))
cmd_create = ["network", "create", bridge_name]
diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py
index 18c22476..87923f0b 100644
--- a/cloudinit/config/cc_refresh_rmc_and_interface.py
+++ b/cloudinit/config/cc_refresh_rmc_and_interface.py
@@ -150,20 +150,3 @@ def search(contents):
or contents.startswith("IPV6INIT")
or contents.startswith("NM_CONTROLLED")
)
-
-
-def refresh_rmc():
- # To make a healthy connection between RMC daemon and hypervisor we
- # refresh RMC. With refreshing RMC we are ensuring that making IPv6
- # down and up shouldn't impact communication between RMC daemon and
- # hypervisor.
- # -z : stop Resource Monitoring & Control subsystem and all resource
- # managers, but the command does not return control to the user
- # until the subsystem and all resource managers are stopped.
- # -s : start Resource Monitoring & Control subsystem.
- try:
- subp.subp([RMCCTRL, "-z"])
- subp.subp([RMCCTRL, "-s"])
- except Exception:
- util.logexc(LOG, "Failed to refresh the RMC subsystem.")
- raise
diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py
index a780e4ff..d687c482 100644
--- a/cloudinit/config/cc_reset_rmc.py
+++ b/cloudinit/config/cc_reset_rmc.py
@@ -149,4 +149,4 @@ def reset_rmc():
if node_id_after == node_id_before:
msg = "New node ID did not get generated."
LOG.error(msg)
- raise Exception(msg)
+ raise RuntimeError(msg)
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 9bccdcec..0669defc 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -1139,19 +1139,6 @@ def get_schema() -> dict:
return full_schema
-def get_meta() -> dict:
- """Return metadata coalesced from all cc_* cloud-config module."""
- full_meta = dict()
- for (_, mod_name) in get_modules().items():
- mod_locs, _ = importer.find_module(
- mod_name, ["cloudinit.config"], ["meta"]
- )
- if mod_locs:
- mod = importer.import_module(mod_locs[0])
- full_meta[mod.meta["id"]] = mod.meta
- return full_meta
-
-
def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
if not parser:
diff --git a/cloudinit/config/schemas/schema-cloud-config-v1.json b/cloudinit/config/schemas/schema-cloud-config-v1.json
index 10636e6d..2a2d8631 100644
--- a/cloudinit/config/schemas/schema-cloud-config-v1.json
+++ b/cloudinit/config/schemas/schema-cloud-config-v1.json
@@ -1343,7 +1343,7 @@
"description": "Device to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot`` will be used to find the device"
},
"grub-pc/install_devices_empty": {
- "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``.",
+ "description": "Sets values for ``grub-pc/install_devices_empty``. If unspecified, will be set to ``true`` if ``grub-pc/install_devices`` is empty, otherwise ``false``",
"oneOf": [
{
"type": "boolean"
@@ -1355,11 +1355,16 @@
"changed_description": "Use a boolean value instead."
}
]
+ },
+ "grub-efi/install_devices": {
+ "type": "string",
+ "description": "Partition to use as target for grub installation. If unspecified, ``grub-probe`` of ``/boot/efi`` will be used to find the partition"
}
}
},
"grub-dpkg": {
"type": "object",
+ "description": "An alias for ``grub_dpkg``",
"deprecated": true,
"deprecated_version": "22.2",
"deprecated_description": "Use ``grub_dpkg`` instead."
diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py
index b3232feb..b9811e6a 100644
--- a/cloudinit/distros/netbsd.py
+++ b/cloudinit/distros/netbsd.py
@@ -100,13 +100,6 @@ class NetBSD(cloudinit.distros.bsd.BSD):
raise
self.unlock_passwd(user)
- def force_passwd_change(self, user):
- try:
- subp.subp(["usermod", "-F", user])
- except Exception:
- util.logexc(LOG, "Failed to set pw expiration for %s", user)
- raise
-
def lock_passwd(self, name):
try:
subp.subp(["usermod", "-C", "yes", name])
diff --git a/cloudinit/distros/parsers/ifconfig.py b/cloudinit/distros/parsers/ifconfig.py
index 3e57e41a..0897beba 100644
--- a/cloudinit/distros/parsers/ifconfig.py
+++ b/cloudinit/distros/parsers/ifconfig.py
@@ -71,10 +71,6 @@ class Ifstate:
def is_vlan(self) -> bool:
return ("vlan" in self.groups) or (self.vlan != {})
- @property
- def is_wlan(self) -> bool:
- return "wlan" in self.groups
-
class Ifconfig:
"""
@@ -201,9 +197,6 @@ class Ifconfig:
self._ifs_by_mac = dict(ifs_by_mac)
return {**self._ifs_by_name, **self._ifs_by_mac}
- def ifs_by_name(self):
- return self._ifs_by_name
-
def ifs_by_mac(self):
return self._ifs_by_mac
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index df7dc3d6..7fb7e56d 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -18,13 +18,6 @@ from cloudinit.settings import PER_INSTANCE
LOG = logging.getLogger(__name__)
-def _make_sysconfig_bool(val):
- if val:
- return "yes"
- else:
- return "no"
-
-
class Distro(distros.Distro):
# See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa
clock_conf_fn = "/etc/sysconfig/clock"
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 3e90f07d..78ddb794 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -43,9 +43,6 @@ class DummySemaphores:
def clear(self, _name, _freq):
return True
- def clear_all(self):
- pass
-
class FileLock:
def __init__(self, fn):
@@ -83,14 +80,6 @@ class FileSemaphores:
return False
return True
- def clear_all(self):
- try:
- util.del_dir(self.sem_path)
- except (IOError, OSError):
- util.logexc(
- LOG, "Failed deleting semaphore directory %s", self.sem_path
- )
-
def _acquire(self, name, freq):
# Check again if its been already gotten
if self.has_run(name, freq):
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 50e445ec..244305d1 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -882,7 +882,7 @@ def _rename_interfaces(
)
if len(errors):
- raise Exception("\n".join(errors))
+ raise RuntimeError("\n".join(errors))
def get_interface_mac(ifname):
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
index 7d11a02c..e69da40d 100644
--- a/cloudinit/net/activators.py
+++ b/cloudinit/net/activators.py
@@ -72,24 +72,6 @@ class NetworkActivator(ABC):
[i["name"] for i in network_state.iter_interfaces()]
)
- @classmethod
- def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool:
- """Bring down specified list of interfaces.
-
- Return True is successful, otherwise return False
- """
- return all(cls.bring_down_interface(device) for device in device_names)
-
- @classmethod
- def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool:
- """Bring down all interfaces.
-
- Return True is successful, otherwise return False
- """
- return cls.bring_down_interfaces(
- [i["name"] for i in network_state.iter_interfaces()]
- )
-
class IfUpDownActivator(NetworkActivator):
# Note that we're not overriding bring_up_interfaces to pass something
@@ -205,26 +187,6 @@ class NetplanActivator(NetworkActivator):
)
return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
- @staticmethod
- def bring_down_interfaces(device_names: Iterable[str]) -> bool:
- """Apply netplan config.
-
- Return True is successful, otherwise return False
- """
- LOG.debug(
- "Calling 'netplan apply' rather than "
- "altering individual interfaces"
- )
- return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
-
- @staticmethod
- def bring_down_all_interfaces(network_state: NetworkState) -> bool:
- """Apply netplan config.
-
- Return True is successful, otherwise return False
- """
- return _alter_interface(NetplanActivator.NETPLAN_CMD, "all")
-
class NetworkdActivator(NetworkActivator):
@staticmethod
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index b23279e5..8892b3ba 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -222,9 +222,6 @@ class BSDRenderer(renderer.Renderer):
def write_config(self, target=None):
raise NotImplementedError()
- def set_gateway(self, gateway):
- raise NotImplementedError()
-
def rename_interface(self, cur_name, device_name):
raise NotImplementedError()
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index 53bd35ca..ae56f72c 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -308,18 +308,6 @@ def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
ifaces[iface]["auto"] = False
-def parse_deb_config(path):
- """Parses a debian network configuration file."""
- ifaces = {}
- with open(path, "r") as fp:
- contents = fp.read().strip()
- abs_path = os.path.abspath(path)
- _parse_deb_config_data(
- ifaces, contents, os.path.dirname(abs_path), abs_path
- )
- return ifaces
-
-
def convert_eni_data(eni_data):
# return a network config representation of what is in eni_data
ifaces = {}
@@ -329,7 +317,7 @@ def convert_eni_data(eni_data):
def _ifaces_to_net_config_data(ifaces):
"""Return network config that represents the ifaces data provided.
- ifaces = parse_deb_config("/etc/network/interfaces")
+ ifaces = _parse_deb_config_data(...)
config = ifaces_to_net_config_data(ifaces)
state = parse_net_config_data(config)."""
devs = {}
diff --git a/cloudinit/net/ephemeral.py b/cloudinit/net/ephemeral.py
index fa1116c9..cade2e5f 100644
--- a/cloudinit/net/ephemeral.py
+++ b/cloudinit/net/ephemeral.py
@@ -107,22 +107,6 @@ class EphemeralIPv4Network:
for cmd in self.cleanup_cmds:
subp.subp(cmd, capture=True)
- def _delete_address(self, address, prefix):
- """Perform the ip command to remove the specified address."""
- subp.subp(
- [
- "ip",
- "-family",
- "inet",
- "addr",
- "del",
- "%s/%s" % (address, prefix),
- "dev",
- self.interface,
- ],
- capture=True,
- )
-
def _bringup_device(self):
"""Perform the ip comands to fully setup the device."""
cidr = "{0}/{1}".format(self.ip, self.prefix)
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index ad586e1e..1c28e16e 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -500,23 +500,3 @@ def available(target=None):
if not subp.which(p, search=search, target=target):
return False
return True
-
-
-def network_state_to_netplan(network_state, header=None):
- # render the provided network state, return a string of equivalent eni
- netplan_path = "etc/network/50-cloud-init.yaml"
- renderer = Renderer(
- {
- "netplan_path": netplan_path,
- "netplan_header": header,
- }
- )
- if not header:
- header = ""
- if not header.endswith("\n"):
- header += "\n"
- contents = renderer._render_content(network_state)
- return header + contents
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 790398bc..158a2951 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -94,13 +94,6 @@ def warn_deprecated_all_devices(dikt: dict) -> None:
)
-def from_state_file(state_file):
- state = util.read_conf(state_file)
- nsi = NetworkStateInterpreter()
- nsi.load(state)
- return nsi
-
-
def diff_keys(expected, actual):
missing = set(expected)
for key in actual:
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
index 4fd8a9b8..0c9ece0c 100644
--- a/cloudinit/net/networkd.py
+++ b/cloudinit/net/networkd.py
@@ -80,15 +80,6 @@ class CfgParser:
return contents
- def dump_data(self, target_fn):
- if not target_fn:
- LOG.warning("Target file not given")
- return
-
- contents = self.get_final_conf()
- LOG.debug("Final content: %s", contents)
- util.write_file(target_fn, contents)
-
class Renderer(renderer.Renderer):
"""
@@ -355,7 +346,7 @@ class Renderer(renderer.Renderer):
f" and dhcp{version}-overrides.use-domains"
f" configured. Use one"
)
- raise Exception(exception)
+ raise RuntimeError(exception)
self.parse_dhcp_overrides(cfg, device, dhcp, version)
@@ -371,8 +362,3 @@ def available(target=None):
if not subp.which(p, search=search, target=target):
return False
return True
-
-
-def network_state_to_networkd(ns: NetworkState):
- renderer = Renderer({})
- return renderer._render_content(ns)
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 6bf4703c..72813e32 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -9,7 +9,7 @@ import abc
import io
from typing import Optional
-from cloudinit.net.network_state import NetworkState, parse_net_config_data
+from cloudinit.net.network_state import NetworkState
from cloudinit.net.udev import generate_udev_rule
@@ -17,10 +17,6 @@ def filter_by_type(match_type):
return lambda iface: match_type == iface["type"]
-def filter_by_name(match_name):
- return lambda iface: match_name == iface["name"]
-
-
def filter_by_attr(match_name):
return lambda iface: (match_name in iface and iface[match_name])
@@ -57,18 +53,3 @@ class Renderer:
target=None,
) -> None:
"""Render network state."""
-
- def render_network_config(
- self,
- network_config: dict,
- templates: Optional[dict] = None,
- target=None,
- ):
- return self.render_network_state(
- network_state=parse_net_config_data(network_config),
- templates=templates,
- target=target,
- )
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index dfcb891f..807c02c7 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -306,20 +306,6 @@ DEF_EPHEMERAL_LABEL = "Temporary Storage"
DEF_PASSWD_REDACTION = "REDACTED"
-@azure_ds_telemetry_reporter
-def is_platform_viable(seed_dir: Optional[Path]) -> bool:
- """Check platform environment to report if this datasource may run."""
- chassis_tag = ChassisAssetTag.query_system()
- if chassis_tag is not None:
- return True
-
- # If no valid chassis tag, check for seeded ovf-env.xml.
- if seed_dir is None:
- return False
-
- return (seed_dir / "ovf-env.xml").exists()
-
-
class DataSourceAzure(sources.DataSource):
dsname = "Azure"
@@ -579,6 +565,12 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
+ # Networking is a hard requirement for source PPS, fail without it.
+ if not self._is_ephemeral_networking_up():
+ msg = "DHCP failed while in source PPS"
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
+
if pps_type == PPSType.SAVABLE:
self._wait_for_all_nics_ready()
elif pps_type == PPSType.OS_DISK:
@@ -695,14 +687,27 @@ class DataSourceAzure(sources.DataSource):
self._metadata_imds = sources.UNSET
@azure_ds_telemetry_reporter
+ def ds_detect(self):
+ """Check platform environment to report if this datasource may
+ run.
+ """
+ chassis_tag = ChassisAssetTag.query_system()
+ if chassis_tag is not None:
+ return True
+
+ # If no valid chassis tag, check for seeded ovf-env.xml.
+ if self.seed_dir is None:
+ return False
+
+ return Path(self.seed_dir, "ovf-env.xml").exists()
+
+ @azure_ds_telemetry_reporter
def _get_data(self):
"""Crawl and process datasource metadata caching metadata as attrs.
@return: True on success, False on error, invalid or disabled
datasource.
"""
- if not is_platform_viable(Path(self.seed_dir)):
- return False
try:
get_boot_telemetry()
except Exception as e:
@@ -1096,13 +1101,6 @@ class DataSourceAzure(sources.DataSource):
dhcp_attempts = 0
if report_ready:
- # Networking must be up for netlink to detect
- # media disconnect/connect. It may be down to due
- # initial DHCP failure, if so check for it and retry,
- # ensuring we flag it as required.
- if not self._is_ephemeral_networking_up():
- self._setup_ephemeral_networking(timeout_minutes=20)
-
try:
if (
self._ephemeral_dhcp_ctx is None
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 270a3a18..1dcd7107 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -31,7 +31,8 @@ class DataSourceCloudSigma(sources.DataSource):
self.ssh_public_key = ""
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- def is_running_in_cloudsigma(self):
+ @staticmethod
+ def ds_detect():
"""
Uses dmi data to detect if this instance of cloud-init is running
in the CloudSigma's infrastructure.
@@ -51,8 +52,6 @@ class DataSourceCloudSigma(sources.DataSource):
as userdata.
"""
dsmode = None
- if not self.is_running_in_cloudsigma():
- return False
try:
server_context = self.cepko.all().result
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 52d3ad26..b6a110aa 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -102,7 +102,7 @@ class DataSourceDigitalOcean(sources.DataSource):
interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
- raise Exception("Unable to get meta-data from server....")
+ raise RuntimeError("Unable to get meta-data from server....")
nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index 23478e9e..cf42fdbb 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -100,9 +100,6 @@ class DataSourceExoscale(sources.DataSource):
Please refer to the datasource documentation for details on how the
metadata server and password server are crawled.
"""
- if not self._is_platform_viable():
- return False
-
data = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
@@ -142,7 +139,8 @@ class DataSourceExoscale(sources.DataSource):
def get_config_obj(self):
return self.extra_config
- def _is_platform_viable(self):
+ @staticmethod
+ def ds_detect():
return dmi.read_dmi_data("system-product-name").startswith(
EXOSCALE_DMI_NAME
)
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index 90531769..14f14677 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -129,7 +129,7 @@ class DataSourceHetzner(sources.DataSource):
_net_config = self.metadata["network-config"]
if not _net_config:
- raise Exception("Unable to get meta-data from server....")
+ raise RuntimeError("Unable to get meta-data from server....")
self._network_config = _net_config
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index ab440cc8..2643149b 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -181,16 +181,13 @@ class DataSourceLXD(sources.DataSource):
super()._unpickle(ci_pkl_version)
self.skip_hotplug_detect = True
- def _is_platform_viable(self) -> bool:
+ @staticmethod
+ def ds_detect() -> bool:
"""Check platform environment to report if this datasource may run."""
return is_platform_viable()
def _get_data(self) -> bool:
"""Crawl LXD socket API instance data and return True on success"""
- if not self._is_platform_viable():
- LOG.debug("Not an LXD datasource: No LXD socket found.")
- return False
-
self._crawled_metadata = util.log_time(
logfunc=LOG.debug,
msg="Crawl of metadata service",
diff --git a/cloudinit/sources/DataSourceNWCS.py b/cloudinit/sources/DataSourceNWCS.py
index e21383d2..aebbf689 100644
--- a/cloudinit/sources/DataSourceNWCS.py
+++ b/cloudinit/sources/DataSourceNWCS.py
@@ -43,19 +43,10 @@ class DataSourceNWCS(sources.DataSource):
self.dsmode = sources.DSMODE_NETWORK
def _get_data(self):
- LOG.info("Detecting if machine is a NWCS instance")
- on_nwcs = get_nwcs_data()
-
- if not on_nwcs:
- LOG.info("Machine is not a NWCS instance")
- return False
-
- LOG.info("Machine is a NWCS instance")
-
md = self.get_metadata()
if md is None:
- raise Exception("failed to get metadata")
+ raise RuntimeError("failed to get metadata")
self.metadata_full = md
@@ -111,7 +102,7 @@ class DataSourceNWCS(sources.DataSource):
return self._network_config
if not self.metadata["network"]["config"]:
- raise Exception("Unable to get metadata from server")
+ raise RuntimeError("Unable to get metadata from server")
# metadata sends interface names, but we dont want to use them
for i in self.metadata["network"]["config"]:
@@ -125,14 +116,9 @@ class DataSourceNWCS(sources.DataSource):
return self._network_config
-
-def get_nwcs_data():
- vendor_name = dmi.read_dmi_data("system-manufacturer")
-
- if vendor_name != "NWCS":
- return False
-
- return True
+ @staticmethod
+ def ds_detect():
+ return "NWCS" == dmi.read_dmi_data("system-manufacturer")
def get_interface_name(mac):
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index a9744fa1..bcb0927a 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -161,9 +161,6 @@ class OpenNebulaNetwork:
def mac2ip(self, mac):
return ".".join([str(int(c, 16)) for c in mac.split(":")[2:]])
- def mac2network(self, mac):
- return self.mac2ip(mac).rpartition(".")[0] + ".0"
-
def get_nameservers(self, dev):
nameservers = {}
dns = self.get_field(dev, "dns", "").split()
@@ -208,9 +205,6 @@ class OpenNebulaNetwork:
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
- def get_network(self, dev, mac):
- return self.get_field(dev, "network", self.mac2network(mac))
-
def get_field(self, dev, name, default=None):
"""return the field name in context for device dev.
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index 7f43b9e2..3baf06e1 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -118,9 +118,9 @@ class DataSourceOracle(sources.DataSource):
vendordata_pure = None
network_config_sources: Tuple[sources.NetworkConfigSource, ...] = (
sources.NetworkConfigSource.CMD_LINE,
+ sources.NetworkConfigSource.SYSTEM_CFG,
sources.NetworkConfigSource.DS,
sources.NetworkConfigSource.INITRAMFS,
- sources.NetworkConfigSource.SYSTEM_CFG,
)
_network_config: dict = {"config": [], "version": 1}
@@ -140,13 +140,12 @@ class DataSourceOracle(sources.DataSource):
def _has_network_config(self) -> bool:
return bool(self._network_config.get("config", []))
- def _is_platform_viable(self) -> bool:
+ @staticmethod
+ def ds_detect() -> bool:
"""Check platform environment to report if this datasource may run."""
return _is_platform_viable()
def _get_data(self):
- if not self._is_platform_viable():
- return False
self.system_uuid = _read_system_uuid()
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 0ba0dec3..f45f9b04 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -38,29 +38,6 @@ DEF_MD_RETRIES = 5
DEF_MD_TIMEOUT = 10
-def on_scaleway():
- """
- There are three ways to detect if you are on Scaleway:
-
- * check DMI data: not yet implemented by Scaleway, but the check is made to
- be future-proof.
- * the initrd created the file /var/run/scaleway.
- * "scaleway" is in the kernel cmdline.
- """
- vendor_name = dmi.read_dmi_data("system-manufacturer")
- if vendor_name == "Scaleway":
- return True
-
- if os.path.exists("/var/run/scaleway"):
- return True
-
- cmdline = util.get_cmdline()
- if "scaleway" in cmdline:
- return True
-
- return False
-
-
class SourceAddressAdapter(requests.adapters.HTTPAdapter):
"""
Adapter for requests to choose the local address to bind to.
@@ -203,9 +180,28 @@ class DataSourceScaleway(sources.DataSource):
"vendor-data", self.vendordata_address, self.retries, self.timeout
)
+ @staticmethod
+ def ds_detect():
+ """
+ There are three ways to detect if you are on Scaleway:
+
+ * check DMI data: not yet implemented by Scaleway, but the check is
+ made to be future-proof.
+ * the initrd created the file /var/run/scaleway.
+ * "scaleway" is in the kernel cmdline.
+ """
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name == "Scaleway":
+ return True
+
+ if os.path.exists("/var/run/scaleway"):
+ return True
+
+ cmdline = util.get_cmdline()
+ if "scaleway" in cmdline:
+ return True
+
def _get_data(self):
- if not on_scaleway():
- return False
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 266daf68..41f6ec27 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -526,9 +526,6 @@ class JoyentMetadataClient:
).decode()
return self.request(rtype="PUT", param=param)
- def delete(self, key):
- return self.request(rtype="DELETE", param=key)
-
def close_transport(self):
if self.fp:
self.fp.close()
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
index d6b74bc1..43122f0b 100644
--- a/cloudinit/sources/DataSourceUpCloud.py
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -126,7 +126,9 @@ class DataSourceUpCloud(sources.DataSource):
raw_network_config = self.metadata.get("network")
if not raw_network_config:
- raise Exception("Unable to get network meta-data from server....")
+ raise RuntimeError(
+ "Unable to get network meta-data from server...."
+ )
self._network_config = uc_helper.convert_network_config(
raw_network_config,
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index 9d7c84fb..f7c56780 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -37,12 +37,12 @@ class DataSourceVultr(sources.DataSource):
]
)
+ @staticmethod
+ def ds_detect():
+ return vultr.is_vultr()
+
# Initiate data and check if Vultr
def _get_data(self):
- LOG.debug("Detecting if machine is a Vultr instance")
- if not vultr.is_vultr():
- LOG.debug("Machine is not a Vultr instance")
- return False
LOG.debug("Machine is a Vultr instance")
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 565e1754..2779cac4 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,6 +13,7 @@ import copy
import json
import os
import pickle
+import re
from collections import namedtuple
from enum import Enum, unique
from typing import Any, Dict, List, Optional, Tuple
@@ -311,28 +312,42 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
"""Check if running on this datasource"""
return True
- def override_ds_detect(self):
+ def override_ds_detect(self) -> bool:
"""Override if either:
- only a single datasource defined (nothing to fall back to)
- - TODO: commandline argument is used (ci.ds=OpenStack)
+ - commandline argument is used (ci.ds=OpenStack)
+
+ Note: get_cmdline() is required for the general case - when ds-identify
+ does not run, _something_ needs to detect the kernel command line
+ definition.
"""
- return self.sys_cfg.get("datasource_list", []) in (
+ if self.dsname == parse_cmdline():
+ LOG.debug(
+ "Machine is configured by the kernel commandline to run on "
+ "single datasource %s.",
+ self,
+ )
+ return True
+ elif self.sys_cfg.get("datasource_list", []) in (
[self.dsname],
[self.dsname, "None"],
- )
+ ):
+ LOG.debug(
+ "Machine is configured to run on single datasource %s.", self
+ )
+ return True
+ return False
def _check_and_get_data(self):
"""Overrides runtime datasource detection"""
if self.override_ds_detect():
- LOG.debug(
- "Machine is configured to run on single datasource %s.", self
- )
+ return self._get_data()
elif self.ds_detect():
LOG.debug("Machine is running on %s.", self)
+ return self._get_data()
else:
LOG.debug("Datasource type %s is not detected.", self)
return False
- return self._get_data()
def _get_standardized_metadata(self, instance_data):
"""Return a dictionary of standardized metadata keys."""
@@ -895,10 +910,6 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def network_config(self):
return None
- @property
- def first_instance_boot(self):
- return
-
def setup(self, is_new_instance):
"""setup(is_new_instance)
@@ -1138,4 +1149,13 @@ def pkl_load(fname: str) -> Optional[DataSource]:
return None
-# vi: ts=4 expandtab
+def parse_cmdline():
+ """Check if command line argument for this datasource was passed
+ Passing by command line overrides runtime datasource detection
+ """
+ cmdline = util.get_cmdline()
+ ds_parse_1 = re.search(r"ci\.ds=([a-zA-Z]+)(\s|$)", cmdline)
+ ds_parse_2 = re.search(r"ci\.datasource=([a-zA-Z]+)(\s|$)", cmdline)
+ ds = ds_parse_1 or ds_parse_2
+ if ds:
+ return ds.group(1)
diff --git a/cloudinit/sources/helpers/cloudsigma.py b/cloudinit/sources/helpers/cloudsigma.py
index 5d39946f..1d6a1b45 100644
--- a/cloudinit/sources/helpers/cloudsigma.py
+++ b/cloudinit/sources/helpers/cloudsigma.py
@@ -53,10 +53,6 @@ class Cepko:
request_pattern = self.request_pattern.format("/meta/{}")
return self.get(key, request_pattern)
- def global_context(self, key=""):
- request_pattern = self.request_pattern.format("/global_context/{}")
- return self.get(key, request_pattern)
-
class CepkoResult:
"""
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index df9e5c4b..a129d9a8 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -50,11 +50,6 @@ class Config:
return self._configFile.get(Config.TIMEZONE, None)
@property
- def utc(self):
- """Retrieves whether to set time to UTC or Local."""
- return self._configFile.get(Config.UTC, None)
-
- @property
def admin_password(self):
"""Return the root password to be set."""
return self._configFile.get(Config.PASS, None)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 37185cba..9f868389 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -69,39 +69,6 @@ class ConfigFile(ConfigSource, dict):
for (key, value) in config.items(category):
self._insertKey(category + "|" + key, value)
- def should_keep_current_value(self, key):
- """
- Determines whether a value for a property must be kept.
-
- If the propery is missing, it is treated as it should be not
- changed by the engine.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "empty" value which is used to indicate
- # "removal"
- return key not in self
-
- def should_remove_current_value(self, key):
- """
- Determines whether a value for the property must be removed.
-
- If the specified key is empty, it is treated as it should be
- removed by the engine.
-
- Return true if the value can be removed, false otherwise.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "missing" value which is used to indicate
- # "keeping unchanged"
- if key in self:
- return not bool(self[key])
- else:
- return False
-
def get_count_with_prefix(self, prefix):
"""
Return the total count of keys that start with the specified prefix.
@@ -110,6 +77,3 @@ class ConfigFile(ConfigSource, dict):
prefix -- prefix of the key
"""
return len([key for key in self if key.startswith(prefix)])
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
deleted file mode 100644
index d44f4c01..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
-
-
-class ConfigNamespace(ConfigSource):
- """Specifies the Config Namespace."""
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 7b9e0974..ba2488be 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -62,7 +62,7 @@ class NicConfigurator:
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception(
+ raise RuntimeError(
"There can only be one primary nic",
[nic.mac for nic in primary_nics],
)
@@ -230,16 +230,6 @@ class NicConfigurator:
return (subnet_list, route_list)
- def _genIpv6Route(self, name, nic, addrs):
- route_list = []
-
- for addr in addrs:
- route_list.append(
- {"type": "route", "gateway": addr.gateway, "metric": 10000}
- )
-
- return route_list
-
def generate(self, configure=False, osfamily=None):
"""Return the config elements that are needed to configure the nics"""
if configure:
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
deleted file mode 100644
index f290a36f..00000000
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-
-class Ipv4ModeEnum:
- """
- The IPv4 configuration mode which directly represents the user's goal.
-
- This mode effectively acts as a contract of the in-guest customization
- engine. It must be set based on what the user has requested and should
- not be changed by those layers. It's up to the in-guest engine to
- interpret and materialize the user's request.
- """
-
- # The legacy mode which only allows dhcp/static based on whether IPv4
- # addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
-
- # IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = "STATIC"
-
- # IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = "DHCP"
-
- # IPv4 must be disabled
- IPV4_MODE_DISABLED = "DISABLED"
-
- # IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = "AS_IS"
-
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index a063e778..65f952e7 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -535,9 +535,6 @@ class Init:
]
return def_handlers
- def _default_userdata_handlers(self):
- return self._default_handlers()
-
def _default_vendordata_handlers(self):
return self._default_handlers(
opts={
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 4d712829..ed6b9063 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -15,14 +15,17 @@
import collections
import re
import sys
-from typing import Type
+from typing import Any
from cloudinit import log as logging
from cloudinit import type_utils as tu
from cloudinit import util
from cloudinit.atomic_helper import write_file
-JUndefined: Type
+# After bionic EOL, mypy==1.0.0 will be able to type-analyse dynamic
+# base types, substitute this by:
+# JUndefined: typing.Type
+JUndefined: Any
try:
from jinja2 import DebugUndefined as _DebugUndefined
from jinja2 import Template as JTemplate
@@ -41,7 +44,7 @@ MISSING_JINJA_PREFIX = "CI_MISSING_JINJA_VAR/"
# Mypy, and the PEP 484 ecosystem in general, does not support creating
# classes with dynamic base types: https://stackoverflow.com/a/59636248
-class UndefinedJinjaVariable(JUndefined): # type: ignore
+class UndefinedJinjaVariable(JUndefined):
"""Class used to represent any undefined jinja template variable."""
def __str__(self):
@@ -149,12 +152,6 @@ def render_to_file(fn, outfn, params, mode=0o644):
util.write_file(outfn, contents, mode=mode)
-def render_string_to_file(content, outfn, params, mode=0o644):
- """Render string"""
- contents = render_string(content, params)
- util.write_file(outfn, contents, mode=mode)
-
-
def render_string(content, params):
"""Render string"""
if not params:
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 3336b23d..5374ec8a 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -69,7 +69,7 @@ def _set_filename(msg, filename):
def _handle_error(error_message, source_exception=None):
if features.ERROR_ON_USER_DATA_FAILURE:
- raise Exception(error_message) from source_exception
+ raise RuntimeError(error_message) from source_exception
else:
LOG.warning(error_message)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index aed332ec..fc777b82 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -34,6 +34,7 @@ import sys
import time
from base64 import b64decode, b64encode
from collections import deque, namedtuple
+from contextlib import suppress
from errno import EACCES, ENOENT
from functools import lru_cache, total_ordering
from pathlib import Path
@@ -44,6 +45,7 @@ from cloudinit import features, importer
from cloudinit import log as logging
from cloudinit import (
mergers,
+ net,
safeyaml,
subp,
temp_utils,
@@ -1233,8 +1235,8 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
return fqdn
-def is_resolvable(name):
- """determine if a url is resolvable, return a boolean
+def is_resolvable(url) -> bool:
+ """determine if a url's network address is resolvable, return a boolean
This also attempts to be resilent against dns redirection.
Note, that normal nsswitch resolution is used here. So in order
@@ -1246,6 +1248,8 @@ def is_resolvable(name):
be resolved inside the search list.
"""
global _DNS_REDIRECT_IP
+ parsed_url = parse.urlparse(url)
+ name = parsed_url.hostname
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = (
@@ -1253,7 +1257,7 @@ def is_resolvable(name):
"example.invalid.",
"__cloud_init_expected_not_found__",
)
- badresults = {}
+ badresults: dict = {}
for iname in badnames:
try:
result = socket.getaddrinfo(
@@ -1270,12 +1274,14 @@ def is_resolvable(name):
LOG.debug("detected dns redirection: %s", badresults)
try:
+ # ip addresses need no resolution
+ with suppress(ValueError):
+ if net.is_ip_address(parsed_url.netloc.strip("[]")):
+ return True
result = socket.getaddrinfo(name, None)
# check first result's sockaddr field
addr = result[0][4][0]
- if addr in _DNS_REDIRECT_IP:
- return False
- return True
+ return addr not in _DNS_REDIRECT_IP
except (socket.gaierror, socket.error):
return False
@@ -1298,7 +1304,7 @@ def is_resolvable_url(url):
logfunc=LOG.debug,
msg="Resolving URL: " + url,
func=is_resolvable,
- args=(parse.urlparse(url).hostname,),
+ args=(url,),
)
@@ -1517,12 +1523,6 @@ def blkid(devs=None, disable_cache=False):
return ret
-def peek_file(fname, max_bytes):
- LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
- with open(fname, "rb") as ifh:
- return ifh.read(max_bytes)
-
-
def uniq_list(in_list):
out_list = []
for i in in_list:
@@ -3172,6 +3172,7 @@ def deprecate_call(
deprecated_version=deprecated_version,
deprecated=func.__name__,
extra_message=extra_message,
+ schedule=schedule,
)
return out
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index a4103a7e..c8d460ab 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,3 +1,4 @@
+import datetime
import os
import sys
@@ -18,7 +19,7 @@ sys.path.insert(0, os.path.abspath("."))
# General information about the project.
project = "cloud-init"
-copyright = "Canonical Ltd."
+copyright = f"Canonical Group Ltd, {datetime.date.today().year}"
# -- General configuration ----------------------------------------------------
@@ -71,15 +72,46 @@ copybutton_only_copy_prompt_lines = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-html_static_path = ["static"]
html_theme_options = {
"light_logo": "logo.png",
"dark_logo": "logo-dark-mode.png",
+ "light_css_variables": {
+ "font-stack": "Ubuntu, -apple-system, Segoe UI, Roboto, Oxygen, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif",
+ "font-stack--monospace": "Ubuntu Mono variable, Ubuntu Mono, Consolas, Monaco, Courier, monospace",
+ "color-foreground-primary": "#111",
+ "color-foreground-secondary": "var(--color-foreground-primary)",
+ "color-foreground-muted": "#333",
+ "color-background-secondary": "#FFF",
+ "color-background-hover": "#f2f2f2",
+ "color-brand-primary": "#111",
+ "color-brand-content": "#06C",
+ "color-inline-code-background": "rgba(0,0,0,.03)",
+ "color-sidebar-link-text": "#111",
+ "color-sidebar-item-background--current": "#ebebeb",
+ "color-sidebar-item-background--hover": "#f2f2f2",
+ "sidebar-item-line-height": "1.3rem",
+ "color-link-underline": "var(--color-background-primary)",
+ "color-link-underline--hover": "var(--color-background-primary)",
+ },
+ "dark_css_variables": {
+ "color-foreground-secondary": "var(--color-foreground-primary)",
+ "color-foreground-muted": "#CDCDCD",
+ "color-background-secondary": "var(--color-background-primary)",
+ "color-background-hover": "#666",
+ "color-brand-primary": "#fff",
+ "color-brand-content": "#06C",
+ "color-sidebar-link-text": "#f7f7f7",
+ "color-sidebar-item-background--current": "#666",
+ "color-sidebar-item-background--hover": "#333",
+ },
}
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_static_path = ["static"]
+html_css_files = ["css/custom.css"]
+
html_extra_path = ["googleaf254801a5285c31.html"]
# Make sure the target is unique
diff --git a/doc/rtd/explanation/instancedata.rst b/doc/rtd/explanation/instancedata.rst
index 8f5b310a..95c588d1 100644
--- a/doc/rtd/explanation/instancedata.rst
+++ b/doc/rtd/explanation/instancedata.rst
@@ -343,7 +343,7 @@ used. The format of subplatform will be:
Example output:
- - metadata (http://168.254.169.254)
+ - metadata (http://169.254.169.254)
- seed-dir (/path/to/seed-dir/)
- config-disk (/dev/cd0)
- configdrive (/dev/sr0)
diff --git a/doc/rtd/static/css/custom.css b/doc/rtd/static/css/custom.css
new file mode 100644
index 00000000..a6a1f527
--- /dev/null
+++ b/doc/rtd/static/css/custom.css
@@ -0,0 +1,248 @@
+/** Fix the font weight (300 for normal, 400 for slightly bold) **/
+/** Should be 100 for all headers, 400 for normal text **/
+
+h1, h2, h3, h4, h5, h6, .sidebar-tree .current-page>.reference, button, input, optgroup, select, textarea, th.head {
+ font-weight: 200;
+}
+
+.toc-title {
+ font-weight: 400;
+}
+
+div.page, li.scroll-current>.reference, dl.glossary dt, dl.simple dt, dl:not([class]) dt {
+ font-weight: 300;
+ line-height: 1.5;
+ font-size: var(--font-size--normal);
+}
+
+/** Semantic markup styling **/
+strong.command {
+ font-family: var(--font-stack--monospace);
+ font-size: var(--font-size--medium);
+ background: var(--color-inline-code-background);
+ padding: 0.1em 0.2em;
+}
+
+/** Side bars (side-bar tree = left, toc-tree = right) **/
+div.sidebar-tree {
+ font-weight: 200;
+ line-height: 1.5;
+ font-size: var(--font-size--normal);
+}
+
+div.toc-tree {
+ font-weight: 200;
+ font-size: var(--font-size--medium);
+ line-height: 1.5;
+}
+
+.sidebar-tree .toctree-l1>.reference, .toc-tree li.scroll-current>.reference {
+ font-weight: 400;
+}
+
+/** List styling **/
+ol, ul {
+ margin-bottom: 1.5rem;
+ margin-left: 1rem;
+ margin-top: 0;
+ padding-left: 1rem;
+}
+
+/** Table styling **/
+
+th.head {
+ text-transform: uppercase;
+ font-size: var(--font-size--small);
+}
+
+table.docutils {
+ border: 0;
+ box-shadow: none;
+ width:100%;
+}
+
+table.docutils td, table.docutils th, table.docutils td:last-child, table.docutils th:last-child, table.docutils td:first-child, table.docutils th:first-child {
+ border-right: none;
+ border-left: none;
+}
+
+/* center align table cells with ":-:" */
+td.text-center {
+ text-align: center;
+}
+
+/** No rounded corners **/
+
+.admonition, code.literal, .sphinx-tabs-tab, .sphinx-tabs-panel, .highlight {
+ border-radius: 0;
+}
+
+/** code blocks and literals **/
+code.docutils.literal.notranslate, .highlight pre, pre.literal-block {
+ font-size: var(--font-size--medium);
+ border: none;
+}
+
+
+/** Admonition styling **/
+
+.admonition {
+ font-size: var(--font-size--medium);
+ box-shadow: none;
+}
+
+/** Styling for links **/
+/* unvisited link */
+a:link {
+ color: #06c;
+ text-decoration: none;
+}
+
+/* visited link */
+a:visited {
+ color: #7d42b8;
+ text-decoration: none;
+}
+
+/* mouse over link */
+a:hover {
+ text-decoration: underline;
+}
+
+/* selected link */
+a:active {
+ text-decoration: underline;
+}
+
+a.sidebar-brand.centered {
+ text-decoration: none;
+}
+
+/** Color for the "copy link" symbol next to headings **/
+
+a.headerlink {
+ color: var(--color-brand-primary);
+}
+
+/** Line to the left of the current navigation entry **/
+
+.sidebar-tree li.current-page {
+ border-left: 2px solid var(--color-brand-primary);
+}
+
+/** Some tweaks for issue #16 **/
+
+[role="tablist"] {
+ border-bottom: 1px solid var(--color-sidebar-item-background--hover);
+}
+
+.sphinx-tabs-tab[aria-selected="true"] {
+ border: 0;
+ border-bottom: 2px solid var(--color-brand-primary);
+ background-color: var(--color-sidebar-item-background--current);
+ font-weight:300;
+}
+
+.sphinx-tabs-tab{
+ color: var(--color-brand-primary);
+ font-weight:300;
+}
+
+.sphinx-tabs-panel {
+ border: 0;
+ border-bottom: 1px solid var(--color-sidebar-item-background--hover);
+ background: var(--color-background-primary);
+}
+
+button.sphinx-tabs-tab:hover {
+ background-color: var(--color-sidebar-item-background--hover);
+}
+
+/** Custom classes to fix scrolling in tables by decreasing the
+ font size or breaking certain columns.
+ Specify the classes in the Markdown file with, for example:
+ ```{rst-class} break-col-4 min-width-4-8
+ ```
+**/
+
+table.dec-font-size {
+ font-size: smaller;
+}
+table.break-col-1 td.text-left:first-child {
+ word-break: break-word;
+}
+table.break-col-4 td.text-left:nth-child(4) {
+ word-break: break-word;
+}
+table.min-width-1-15 td.text-left:first-child {
+ min-width: 15em;
+}
+table.min-width-4-8 td.text-left:nth-child(4) {
+ min-width: 8em;
+}
+
+/** Underline for abbreviations **/
+
+abbr[title] {
+ text-decoration: underline solid #cdcdcd;
+}
+
+/** Use the same style for right-details as for left-details **/
+.bottom-of-page .right-details {
+ font-size: var(--font-size--small);
+ display: block;
+}
+
+/** Version switcher */
+button.version_select {
+ color: var(--color-foreground-primary);
+ background-color: var(--color-toc-background);
+ padding: 5px 10px;
+ border: none;
+}
+
+.version_select:hover, .version_select:focus {
+ background-color: var(--color-sidebar-item-background--hover);
+}
+
+.version_dropdown {
+ position: relative;
+ display: inline-block;
+ text-align: right;
+ font-size: var(--sidebar-item-font-size);
+}
+
+.available_versions {
+ display: none;
+ position: absolute;
+ right: 0px;
+ background-color: var(--color-toc-background);
+ box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
+ z-index: 11;
+}
+
+.available_versions a {
+ color: var(--color-foreground-primary);
+ padding: 12px 16px;
+ text-decoration: none;
+ display: block;
+}
+
+.available_versions a:hover {background-color: var(--color-sidebar-item-background--current)}
+
+.show {display:block;}
+
+/** Fix for nested numbered list - the nested list is lettered **/
+ol.arabic ol.arabic {
+ list-style: lower-alpha;
+}
+
+/** Make expandable sections look like links **/
+details summary {
+ color: var(--color-link);
+}
+
+/** Context links at the bottom of the page **/
+footer, .page-info .context {
+ font-size: var(--font-size--medium);
+}
diff --git a/packages/bddeb b/packages/bddeb
index 44d82a78..2eee6003 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -4,6 +4,7 @@ import argparse
import csv
import json
import os
+import re
import shutil
import subprocess
import sys
@@ -16,11 +17,17 @@ def find_root():
top_dir = os.environ.get("CLOUD_INIT_TOP_D", None)
if top_dir is None:
top_dir = os.path.dirname(
- os.path.dirname(os.path.abspath(sys.argv[0])))
- if os.path.isfile(os.path.join(top_dir, 'setup.py')):
+ os.path.dirname(os.path.abspath(sys.argv[0]))
+ )
+ if os.path.isfile(os.path.join(top_dir, "setup.py")):
return os.path.abspath(top_dir)
- raise OSError(("Unable to determine where your cloud-init topdir is."
- " set CLOUD_INIT_TOP_D?"))
+ raise OSError(
+ (
+ "Unable to determine where your cloud-init topdir is."
+ " set CLOUD_INIT_TOP_D?"
+ )
+ )
+
if "avoid-pep8-E402-import-not-top-of-file":
# Use the util functions from cloudinit
@@ -47,20 +54,24 @@ def get_release_suffix(release):
if os.path.exists(csv_path):
with open(csv_path, "r") as fp:
# version has "16.04 LTS" or "16.10", so drop "LTS" portion.
- rels = {row['series']: row['version'].replace(' LTS', '')
- for row in csv.DictReader(fp)}
+ rels = {
+ row["series"]: row["version"].replace(" LTS", "")
+ for row in csv.DictReader(fp)
+ }
if release in rels:
return "~%s.1" % rels[release]
elif release != UNRELEASED:
- print("missing distro-info-data package, unable to give "
- "per-release suffix.\n")
+ print(
+ "missing distro-info-data package, unable to give "
+ "per-release suffix.\n"
+ )
return ""
def run_helper(helper, args=None, strip=True):
if args is None:
args = []
- cmd = [util.abs_join(find_root(), 'tools', helper)] + args
+ cmd = [util.abs_join(find_root(), "tools", helper)] + args
(stdout, _stderr) = subp.subp(cmd)
if strip:
stdout = stdout.strip()
@@ -71,43 +82,56 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
"""Create a debian package directory with all rendered template files."""
print("Creating a debian/ folder in %r" % (root))
- deb_dir = util.abs_join(root, 'debian')
+ deb_dir = util.abs_join(root, "debian")
# Just copy debian/ dir and then update files
- pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
- subp.subp(['cp', '-a', pdeb_d, deb_dir])
+ pdeb_d = util.abs_join(find_root(), "packages", "debian")
+ subp.subp(["cp", "-a", pdeb_d, deb_dir])
# Fill in the change log template
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'changelog.in'),
- util.abs_join(deb_dir, 'changelog'),
- params=templ_data)
+ templater.render_to_file(
+ util.abs_join(find_root(), "packages", "debian", "changelog.in"),
+ util.abs_join(deb_dir, "changelog"),
+ params=templ_data,
+ )
# Write out the control file template
- reqs_output = run_helper(
- 'read-dependencies', args=['--distro', 'debian'])
+ reqs_output = run_helper("read-dependencies", args=["--distro", "debian"])
reqs = reqs_output.splitlines()
test_reqs = run_helper(
- 'read-dependencies',
- ['--requirements-file', 'test-requirements.txt',
- '--system-pkg-names']).splitlines()
+ "read-dependencies",
+ ["--requirements-file", "test-requirements.txt", "--system-pkg-names"],
+ ).splitlines()
- requires = ['cloud-utils | cloud-guest-utils'] if cloud_util_deps else []
+ requires = ["cloud-utils | cloud-guest-utils"] if cloud_util_deps else []
# We consolidate all deps as Build-Depends as our package build runs all
# tests so we need all runtime dependencies anyway.
# NOTE: python package was moved to the front after debuild -S would fail with
# 'Please add apropriate interpreter' errors (as in debian bug 861132)
- requires.extend(['python3'] + reqs + test_reqs)
- if templ_data['debian_release'] == 'xenial':
- requires.append('python3-pytest-catchlog')
- elif templ_data['debian_release'] in (
- 'buster', 'xenial', 'bionic', 'focal'
+ requires.extend(["python3"] + reqs + test_reqs)
+ if templ_data["debian_release"] in (
+ "buster",
+ "bionic",
+ "focal",
):
- requires.append('dh-systemd')
- templater.render_to_file(util.abs_join(find_root(),
- 'packages', 'debian', 'control.in'),
- util.abs_join(deb_dir, 'control'),
- params={'build_depends': ','.join(requires)})
+ requires.append("dh-systemd")
+ build_deps = ",".join(requires)
+ (stdout, _stderr) = subp.subp(
+ ["dpkg-query", "-W", "-f='${Provides}'", "debhelper"]
+ )
+ # Get latest debhelper-compat support on host
+ debhelper_matches = re.findall(r"(debhelper-compat \(= \d+\)),", stdout)
+ if debhelper_matches:
+ if templ_data["debian_release"] == "bionic":
+ # Bionic doesn't support debhelper-compat > 11
+ build_deps += ",debhelper-compat (= 11)"
+ else:
+ build_deps += f",{debhelper_matches[-1]}"
+ templater.render_to_file(
+ util.abs_join(find_root(), "packages", "debian", "control.in"),
+ util.abs_join(deb_dir, "control"),
+ params={"build_depends": build_deps},
+ )
def write_debian_folder_from_branch(root, templ_data, branch):
@@ -118,8 +142,7 @@ def write_debian_folder_from_branch(root, templ_data, branch):
["git", "archive", branch, "debian"], stdout=subprocess.PIPE
)
subprocess.check_call(
- ["tar", "-v", "-C", root, "-x"],
- stdin=p_dumpdeb.stdout
+ ["tar", "-v", "-C", root, "-x"], stdin=p_dumpdeb.stdout
)
print("Adding new entry to debian/changelog")
@@ -136,55 +159,83 @@ def write_debian_folder_from_branch(root, templ_data, branch):
"--controlmaint",
"Snapshot build.",
],
- cwd=root
+ cwd=root,
)
def read_version():
- return json.loads(run_helper('read-version', ['--json']))
+ return json.loads(run_helper("read-version", ["--json"]))
def get_parser():
"""Setup and return an argument parser for bdeb tool."""
parser = argparse.ArgumentParser()
- parser.add_argument("-v", "--verbose", dest="verbose",
- help=("run verbosely"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
- parser.add_argument("--cloud-utils", dest="cloud_utils",
- help=("depend on cloud-utils package"
- " (default: %(default)s)"),
- default=False,
- action='store_true')
-
- parser.add_argument("--init-system", dest="init_system",
- help=("build deb with INIT_SYSTEM=xxx"
- " (default: %(default)s"),
- default=os.environ.get("INIT_SYSTEM", "systemd"))
-
- parser.add_argument("--release", dest="release",
- help=("build with changelog referencing RELEASE"),
- default=UNRELEASED)
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ dest="verbose",
+ help=("run verbosely" " (default: %(default)s)"),
+ default=False,
+ action="store_true",
+ )
+ parser.add_argument(
+ "--cloud-utils",
+ dest="cloud_utils",
+ help=("depend on cloud-utils package" " (default: %(default)s)"),
+ default=False,
+ action="store_true",
+ )
- for ent in DEBUILD_ARGS:
- parser.add_argument(ent, dest="debuild_args", action='append_const',
- const=ent, default=[],
- help=("pass through '%s' to debuild" % ent))
+ parser.add_argument(
+ "--init-system",
+ dest="init_system",
+ help=("build deb with INIT_SYSTEM=xxx" " (default: %(default)s"),
+ default=os.environ.get("INIT_SYSTEM", "systemd"),
+ )
- parser.add_argument("--sign", default=False, action='store_true',
- help="sign result. do not pass -us -uc to debuild")
+ parser.add_argument(
+ "--release",
+ dest="release",
+ help=("build with changelog referencing RELEASE"),
+ default=UNRELEASED,
+ )
- parser.add_argument("--signuser", default=False, action='store',
- help="user to sign, see man dpkg-genchanges")
+ for ent in DEBUILD_ARGS:
+ parser.add_argument(
+ ent,
+ dest="debuild_args",
+ action="append_const",
+ const=ent,
+ default=[],
+ help=("pass through '%s' to debuild" % ent),
+ )
+
+ parser.add_argument(
+ "--sign",
+ default=False,
+ action="store_true",
+ help="sign result. do not pass -us -uc to debuild",
+ )
- parser.add_argument("--packaging-branch", nargs="?", metavar="BRANCH",
- const="ubuntu/devel", type=str,
- help=(
- "Import packaging from %(metavar)s instead of"
- " using the packages/debian/* templates"
- " (default: %(const)s)"
- ))
+ parser.add_argument(
+ "--signuser",
+ default=False,
+ action="store",
+ help="user to sign, see man dpkg-genchanges",
+ )
+
+ parser.add_argument(
+ "--packaging-branch",
+ nargs="?",
+ metavar="BRANCH",
+ const="ubuntu/devel",
+ type=str,
+ help=(
+ "Import packaging from %(metavar)s instead of"
+ " using the packages/debian/* templates"
+ " (default: %(const)s)"
+ ),
+ )
return parser
@@ -225,35 +276,36 @@ def main():
return 1
if not args.sign:
- args.debuild_args.extend(['-us', '-uc'])
+ args.debuild_args.extend(["-us", "-uc"])
if args.signuser:
- args.debuild_args.extend(['-e%s' % args.signuser])
+ args.debuild_args.extend(["-e%s" % args.signuser])
- os.environ['INIT_SYSTEM'] = args.init_system
+ os.environ["INIT_SYSTEM"] = args.init_system
capture = True
if args.verbose:
capture = False
templ_data = {
- 'debian_release': args.release,
- 'release_suffix': get_release_suffix(args.release)}
+ "debian_release": args.release,
+ "release_suffix": get_release_suffix(args.release),
+ }
with temp_utils.tempdir() as tdir:
# output like 0.7.6-1022-g36e92d3
ver_data = read_version()
- if ver_data['is_release_branch_ci']:
+ if ver_data["is_release_branch_ci"]:
# If we're performing CI for a new release branch, we don't yet
# have the tag required to generate version_long; use version
# instead.
- ver_data['version_long'] = ver_data['version']
+ ver_data["version_long"] = ver_data["version"]
# This is really only a temporary archive
# since we will extract it then add in the debian
# folder, then re-archive it for debian happiness
- tarball = "cloud-init_%s.orig.tar.gz" % ver_data['version_long']
+ tarball = "cloud-init_%s.orig.tar.gz" % ver_data["version_long"]
tarball_fp = util.abs_join(tdir, tarball)
path = None
for pd in ("./", "../", "../dl/"):
@@ -264,15 +316,20 @@ def main():
break
if path is None:
print("Creating a temp tarball using the 'make-tarball' helper")
- run_helper('make-tarball',
- ['--version', ver_data['version_long'],
- '--output=' + tarball_fp])
+ run_helper(
+ "make-tarball",
+ [
+ "--version",
+ ver_data["version_long"],
+ "--output=" + tarball_fp,
+ ],
+ )
print("Extracting temporary tarball %r" % (tarball))
- cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir]
+ cmd = ["tar", "-xvzf", tarball_fp, "-C", tdir]
subp.subp(cmd, capture=capture)
- xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long'])
+ xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data["version_long"])
templ_data.update(ver_data)
if args.packaging_branch:
@@ -284,36 +341,37 @@ def main():
xdir, templ_data, cloud_util_deps=args.cloud_utils
)
- print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args),
- xdir))
+ print(
+ "Running 'debuild %s' in %r" % (" ".join(args.debuild_args), xdir)
+ )
with util.chdir(xdir):
- cmd = ['debuild', '--preserve-envvar', 'INIT_SYSTEM']
+ cmd = ["debuild", "--preserve-envvar", "INIT_SYSTEM"]
if args.debuild_args:
cmd.extend(args.debuild_args)
subp.subp(cmd, capture=capture)
- link_fn = os.path.join(os.getcwd(), 'cloud-init_all.deb')
- link_dsc = os.path.join(os.getcwd(), 'cloud-init.dsc')
+ link_fn = os.path.join(os.getcwd(), "cloud-init_all.deb")
+ link_dsc = os.path.join(os.getcwd(), "cloud-init.dsc")
for base_fn in os.listdir(os.path.join(tdir)):
full_fn = os.path.join(tdir, base_fn)
if not os.path.isfile(full_fn):
continue
shutil.move(full_fn, base_fn)
print("Wrote %r" % (base_fn))
- if base_fn.endswith('_all.deb'):
+ if base_fn.endswith("_all.deb"):
# Add in the local link
util.del_file(link_fn)
os.symlink(base_fn, link_fn)
- print("Linked %r to %r" % (base_fn,
- os.path.basename(link_fn)))
- if base_fn.endswith('.dsc'):
+ print("Linked %r to %r" % (base_fn, os.path.basename(link_fn)))
+ if base_fn.endswith(".dsc"):
util.del_file(link_dsc)
os.symlink(base_fn, link_dsc)
- print("Linked %r to %r" % (base_fn,
- os.path.basename(link_dsc)))
+ print(
+ "Linked %r to %r" % (base_fn, os.path.basename(link_dsc))
+ )
return 0
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/packages/debian/compat b/packages/debian/compat
deleted file mode 100644
index ec635144..00000000
--- a/packages/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/packages/debian/rules b/packages/debian/rules
index d138deeb..b9b8eaff 100755
--- a/packages/debian/rules
+++ b/packages/debian/rules
@@ -1,26 +1,26 @@
#!/usr/bin/make -f
+
+include /usr/share/dpkg/pkg-info.mk
+
INIT_SYSTEM ?= systemd
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
-DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version)
%:
- dh $@ --with python3,systemd --buildsystem pybuild
-
-override_dh_install:
- dh_install
- install -d debian/cloud-init/etc/rsyslog.d
- install -d debian/cloud-init/usr/share/apport/package-hooks
- cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
- install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
- install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
- flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
+ dh $@ --with python3 --buildsystem pybuild
override_dh_auto_test:
ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
- http_proxy= make check
+ http_proxy= make PYVER=python3 check
else
@echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
endif
-override_dh_systemd_start:
- dh_systemd_start --no-restart-on-upgrade --no-start
+override_dh_installsystemd:
+ dh_installsystemd --no-restart-on-upgrade --no-start
+
+override_dh_auto_install:
+ dh_auto_install --destdir=debian/cloud-init
+ install -D -m 0644 ./tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
+ install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh
+ install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh
+ flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement}
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 308ffedd..945a5fb6 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -402,7 +402,7 @@ class OpenstackCloud(IntegrationCloud):
try:
UUID(image.image_id)
except ValueError as e:
- raise Exception(
+ raise RuntimeError(
"When using Openstack, `OS_IMAGE` MUST be specified with "
"a 36-character UUID image ID. Passing in a release name is "
"not valid here.\n"
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 782ca7e5..fabeb608 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -65,7 +65,7 @@ def pytest_runtest_setup(item):
unsupported_message = "Cannot run on platform {}".format(current_platform)
if "no_container" in test_marks:
if "lxd_container" in test_marks:
- raise Exception(
+ raise RuntimeError(
"lxd_container and no_container marks simultaneously set "
"on test"
)
diff --git a/tests/integration_tests/datasources/test_detect_openstack.py b/tests/integration_tests/datasources/test_detect_openstack.py
new file mode 100644
index 00000000..c70e9815
--- /dev/null
+++ b/tests/integration_tests/datasources/test_detect_openstack.py
@@ -0,0 +1,43 @@
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+@pytest.mark.lxd_vm
+@pytest.mark.lxd_use_exec
+def test_lxd_datasource_kernel_override(client: IntegrationInstance):
+ """This test is twofold: it tests kernel commandline override, which also
+ validates OpenStack Ironic requirements. OpenStack Ironic does not
+ advertise itself to cloud-init via any of the conventional methods: DMI,
+ etc.
+
+ On systemd, ds-identify is able to grok kernel commandline, however to
+ support cloud-init kernel command line parsing on non-systemd, parsing
+ kernel commandline in Python code is required.
+
+ This test runs on LXD, but forces cloud-init to attempt to run OpenStack.
+ This will inevitably fail on LXD, but we only care that it tried - on
+ Ironic it will succeed.
+
+ Configure grub's kernel command line to tell cloud-init to use OpenStack
+ - even though LXD should naturally be detected.
+ """
+ client.execute(
+ "sed --in-place "
+ '\'s/^.*GRUB_CMDLINE_LINUX=.*$/GRUB_CMDLINE_LINUX="ci.ds=OpenStack"/g'
+ "' /etc/default/grub"
+ )
+
+ # We should probably include non-systemd distros at some point. This should
+ # most likely be as simple as updating the output path for grub-mkconfig
+ client.execute("grub-mkconfig -o /boot/efi/EFI/ubuntu/grub.cfg")
+ client.execute("cloud-init clean --logs")
+ client.instance.shutdown()
+ client.instance.execute_via_ssh = False
+ client.instance.start()
+ client.execute("cloud-init status --wait")
+ log = client.execute("cat /var/log/cloud-init.log")
+ assert (
+ "Machine is configured by the kernel commandline to run on single "
+ "datasource DataSourceOpenStackLocal"
+ ) in log
diff --git a/tests/integration_tests/datasources/test_oci_networking.py b/tests/integration_tests/datasources/test_oci_networking.py
index f569650e..dc0d343b 100644
--- a/tests/integration_tests/datasources/test_oci_networking.py
+++ b/tests/integration_tests/datasources/test_oci_networking.py
@@ -116,3 +116,42 @@ def test_oci_networking_iscsi_instance_secondary_vnics(
)
assert len(expected_interfaces) + 1 == len(configured_interfaces)
assert client.execute("ping -c 2 canonical.com").ok
+
+
+SYSTEM_CFG = """\
+network:
+ ethernets:
+ id0:
+ dhcp4: true
+ dhcp6: true
+ match:
+ name: "ens*"
+ version: 2
+"""
+
+
+def customize_netcfg(
+ client: IntegrationInstance,
+ tmpdir,
+):
+ cfg = tmpdir.join("net.cfg")
+ with open(cfg, "w") as f:
+ f.write(SYSTEM_CFG)
+ client.push_file(cfg, "/etc/cloud/cloud.cfg.d/50-network-test.cfg")
+ client.execute("cloud-init clean --logs")
+ client.restart()
+
+
+@pytest.mark.oci
+def test_oci_networking_system_cfg(client: IntegrationInstance, tmpdir):
+ customize_netcfg(client, tmpdir)
+ log = client.read_from_file("/var/log/cloud-init.log")
+ verify_clean_log(log)
+
+ assert (
+ "Applying network configuration from system_cfg" in log
+ ), "network source used wasn't system_cfg"
+ netplan_yaml = client.read_from_file("/etc/netplan/50-cloud-init.yaml")
+ netplan_cfg = yaml.safe_load(netplan_yaml)
+ expected_netplan_cfg = yaml.safe_load(SYSTEM_CFG)
+ assert expected_netplan_cfg == netplan_cfg
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index cf2bf4cc..7bdf05d0 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -75,7 +75,7 @@ class IntegrationInstance:
def execute(self, command, *, use_sudo=True) -> Result:
if self.instance.username == "root" and use_sudo is False:
- raise Exception("Root user cannot run unprivileged")
+ raise RuntimeError("Root user cannot run unprivileged")
return self.instance.execute(command, use_sudo=use_sudo)
def pull_file(self, remote_path, local_path):
@@ -139,7 +139,7 @@ class IntegrationInstance:
elif source == CloudInitSource.UPGRADE:
self.upgrade_cloud_init()
else:
- raise Exception(
+ raise RuntimeError(
"Specified to install {} which isn't supported here".format(
source
)
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 264e2383..8481b454 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -60,7 +60,7 @@ rsyslog:
content: |
module(load="imtcp")
input(type="imtcp" port="514")
- $template RemoteLogs,"/var/tmp/rsyslog.log"
+ $template RemoteLogs,"/var/spool/rsyslog/cloudinit.log"
*.* ?RemoteLogs
& ~
remotes:
@@ -175,7 +175,9 @@ class TestCombined:
def test_rsyslog(self, class_client: IntegrationInstance):
"""Test rsyslog is configured correctly."""
client = class_client
- assert "My test log" in client.read_from_file("/var/tmp/rsyslog.log")
+ assert "My test log" in client.read_from_file(
+ "/var/spool/rsyslog/cloudinit.log"
+ )
def test_runcmd(self, class_client: IntegrationInstance):
"""Test runcmd works as expected"""
diff --git a/tests/unittests/config/test_apt_configure_sources_list_v1.py b/tests/unittests/config/test_apt_configure_sources_list_v1.py
index 52964e10..b0bf54f4 100644
--- a/tests/unittests/config/test_apt_configure_sources_list_v1.py
+++ b/tests/unittests/config/test_apt_configure_sources_list_v1.py
@@ -135,7 +135,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
@staticmethod
def myresolve(name):
"""Fake util.is_resolvable for mirrorfail tests"""
- if name == "does.not.exist":
+ if "does.not.exist" in name:
print("Faking FAIL for '%s'" % name)
return False
else:
@@ -155,8 +155,8 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
],
"http://httpredir.debian.org/debian",
)
- mockresolve.assert_any_call("does.not.exist")
- mockresolve.assert_any_call("httpredir.debian.org")
+ mockresolve.assert_any_call("http://does.not.exist")
+ mockresolve.assert_any_call("http://httpredir.debian.org/debian")
def test_apt_v1_srcl_ubuntu_mirrorfail(self):
"""Test rendering of a source.list from template for ubuntu"""
@@ -168,8 +168,8 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase):
["http://does.not.exist", "http://archive.ubuntu.com/ubuntu/"],
"http://archive.ubuntu.com/ubuntu/",
)
- mockresolve.assert_any_call("does.not.exist")
- mockresolve.assert_any_call("archive.ubuntu.com")
+ mockresolve.assert_any_call("http://does.not.exist")
+ mockresolve.assert_any_call("http://archive.ubuntu.com/ubuntu/")
def test_apt_v1_srcl_custom(self):
"""Test rendering from a custom source.list template"""
diff --git a/tests/unittests/config/test_apt_source_v3.py b/tests/unittests/config/test_apt_source_v3.py
index 8d7ba5dc..1813000e 100644
--- a/tests/unittests/config/test_apt_source_v3.py
+++ b/tests/unittests/config/test_apt_source_v3.py
@@ -963,11 +963,11 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
with mock.patch.object(util, "is_resolvable") as mockresolve:
util.is_resolvable_url("http://1.2.3.4/ubuntu")
- mockresolve.assert_called_with("1.2.3.4")
+ mockresolve.assert_called_with("http://1.2.3.4/ubuntu")
with mock.patch.object(util, "is_resolvable") as mockresolve:
util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu")
- mockresolve.assert_called_with("us.archive.ubuntu.com")
+ mockresolve.assert_called_with("http://us.archive.ubuntu.com/ubuntu")
# former tests can leave this set (or not if the test is ran directly)
# do a hard reset to ensure a stable result
@@ -984,7 +984,6 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase):
)
mocksock.assert_any_call("example.invalid.", None, 0, 0, 1, 2)
mocksock.assert_any_call("us.archive.ubuntu.com", None)
- mocksock.assert_any_call("1.2.3.4", None)
self.assertTrue(ret)
self.assertTrue(ret2)
diff --git a/tests/unittests/config/test_cc_ca_certs.py b/tests/unittests/config/test_cc_ca_certs.py
index adc3609a..07a29395 100644
--- a/tests/unittests/config/test_cc_ca_certs.py
+++ b/tests/unittests/config/test_cc_ca_certs.py
@@ -367,6 +367,18 @@ class TestRemoveDefaultCaCerts(TestCase):
else:
assert mock_subp.call_count == 0
+ def test_non_existent_cert_cfg(self):
+ self.m_stat.return_value.st_size = 0
+
+ for distro_name in cc_ca_certs.distros:
+ conf = cc_ca_certs._distro_ca_certs_configs(distro_name)
+ with ExitStack() as mocks:
+ mocks.enter_context(
+ mock.patch.object(util, "delete_dir_contents")
+ )
+ mocks.enter_context(mock.patch.object(subp, "subp"))
+ cc_ca_certs.disable_default_ca_certs(distro_name, conf)
+
class TestCACertsSchema:
"""Directly test schema rather than through handle."""
diff --git a/tests/unittests/config/test_cc_disk_setup.py b/tests/unittests/config/test_cc_disk_setup.py
index 496ad8e1..39314313 100644
--- a/tests/unittests/config/test_cc_disk_setup.py
+++ b/tests/unittests/config/test_cc_disk_setup.py
@@ -75,7 +75,7 @@ class TestGetMbrHddSize(TestCase):
return hdd_size_in_bytes, None
elif "--getss" in cmd:
return sector_size_in_bytes, None
- raise Exception("Unexpected blockdev command called")
+ raise RuntimeError("Unexpected blockdev command called")
self.subp.side_effect = _subp
diff --git a/tests/unittests/config/test_cc_growpart.py b/tests/unittests/config/test_cc_growpart.py
index 13622332..e4480341 100644
--- a/tests/unittests/config/test_cc_growpart.py
+++ b/tests/unittests/config/test_cc_growpart.py
@@ -381,7 +381,7 @@ class TestEncrypted:
return "/dev/vdz"
elif value.startswith("/dev"):
return value
- raise Exception(f"unexpected value {value}")
+ raise RuntimeError(f"unexpected value {value}")
def _realpath_side_effect(self, value):
return "/dev/dm-1" if value.startswith("/dev/mapper") else value
diff --git a/tests/unittests/config/test_cc_grub_dpkg.py b/tests/unittests/config/test_cc_grub_dpkg.py
index aa076d19..189b674b 100644
--- a/tests/unittests/config/test_cc_grub_dpkg.py
+++ b/tests/unittests/config/test_cc_grub_dpkg.py
@@ -11,7 +11,7 @@ from cloudinit.config.schema import (
get_schema,
validate_cloudconfig_schema,
)
-from cloudinit.subp import ProcessExecutionError
+from cloudinit.subp import ProcessExecutionError, SubpResult
from tests.unittests.helpers import does_not_raise, skipUnlessJsonSchema
@@ -21,7 +21,7 @@ class TestFetchIdevs:
# Note: udevadm info returns devices in a large single line string
@pytest.mark.parametrize(
"grub_output,path_exists,expected_log_call,udevadm_output"
- ",expected_idevs",
+ ",expected_idevs,is_efi_boot",
[
# Inside a container, grub not installed
(
@@ -30,6 +30,7 @@ class TestFetchIdevs:
mock.call("'grub-probe' not found in $PATH"),
"",
"",
+ False,
),
# Inside a container, grub installed
(
@@ -38,10 +39,11 @@ class TestFetchIdevs:
mock.call("grub-probe 'failed to get canonical path'"),
"",
"",
+ False,
),
# KVM Instance
(
- ["/dev/vda"],
+ SubpResult("/dev/vda", ""),
True,
None,
(
@@ -49,18 +51,20 @@ class TestFetchIdevs:
"/dev/disk/by-path/virtio-pci-0000:00:00.0 ",
),
"/dev/vda",
+ False,
),
# Xen Instance
(
- ["/dev/xvda"],
+ SubpResult("/dev/xvda", ""),
True,
None,
"",
"/dev/xvda",
+ False,
),
# NVMe Hardware Instance
(
- ["/dev/nvme1n1"],
+ SubpResult("/dev/nvme1n1", ""),
True,
None,
(
@@ -69,10 +73,11 @@ class TestFetchIdevs:
"/dev/disk/by-path/pci-0000:00:00.0-nvme-0 ",
),
"/dev/disk/by-id/nvme-Company_hash000",
+ False,
),
# SCSI Hardware Instance
(
- ["/dev/sda"],
+ SubpResult("/dev/sda", ""),
True,
None,
(
@@ -81,9 +86,28 @@ class TestFetchIdevs:
"/dev/disk/by-path/pci-0000:00:00.0-scsi-0:0:0:0 ",
),
"/dev/disk/by-id/company-user-1",
+ False,
+ ),
+ # UEFI Hardware Instance
+ (
+ SubpResult("/dev/sda2", ""),
+ True,
+ None,
+ (
+ "/dev/disk/by-id/scsi-3500a075116e6875a "
+ "/dev/disk/by-id/scsi-SATA_Crucial_CT525MX3_171816E6875A "
+ "/dev/disk/by-id/scsi-0ATA_Crucial_CT525MX3_171816E6875A "
+ "/dev/disk/by-path/pci-0000:00:17.0-ata-1 "
+ "/dev/disk/by-id/wwn-0x500a075116e6875a "
+ "/dev/disk/by-id/ata-Crucial_CT525MX300SSD1_171816E6875A"
+ ),
+ "/dev/disk/by-id/ata-Crucial_CT525MX300SSD1_171816E6875A-"
+ "part1",
+ True,
),
],
)
+ @mock.patch("cloudinit.config.cc_grub_dpkg.is_efi_booted")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.os.path.exists")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
@@ -92,18 +116,30 @@ class TestFetchIdevs:
m_subp,
m_exists,
m_logexc,
+ m_efi_booted,
grub_output,
path_exists,
expected_log_call,
udevadm_output,
expected_idevs,
+ is_efi_boot,
):
- """Tests outputs from grub-probe and udevadm info against grub-dpkg"""
- m_subp.side_effect = [grub_output, ["".join(udevadm_output)]]
+ """Tests outputs from grub-probe and udevadm info against grub_dpkg"""
+ m_subp.side_effect = [
+ grub_output,
+ SubpResult("".join(udevadm_output), ""),
+ ]
m_exists.return_value = path_exists
+ m_efi_booted.return_value = is_efi_boot
log = mock.Mock(spec=Logger)
+
idevs = fetch_idevs(log)
- assert expected_idevs == idevs
+
+ if is_efi_boot:
+ assert expected_idevs.startswith(idevs) is True
+ else:
+ assert idevs == expected_idevs
+
if expected_log_call is not None:
assert expected_log_call in log.debug.call_args_list
@@ -112,7 +148,8 @@ class TestHandle:
"""Tests cc_grub_dpkg.handle()"""
@pytest.mark.parametrize(
- "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,expected_log_output",
+ "cfg_idevs,cfg_idevs_empty,fetch_idevs_output,"
+ "expected_log_output,is_uefi",
[
(
# No configuration
@@ -121,8 +158,11 @@ class TestHandle:
"/dev/disk/by-id/nvme-Company_hash000",
(
"Setting grub debconf-set-selections with ",
- "'/dev/disk/by-id/nvme-Company_hash000','false'",
+ "'grub-pc grub-pc/install_devices string "
+ "/dev/disk/by-id/nvme-Company_hash000\n",
+ "grub-pc grub-pc/install_devices_empty boolean false\n'",
),
+ False,
),
(
# idevs set, idevs_empty unset
@@ -131,8 +171,10 @@ class TestHandle:
"/dev/sda",
(
"Setting grub debconf-set-selections with ",
- "'/dev/sda','false'",
+ "'grub-pc grub-pc/install_devices string /dev/sda\n",
+ "grub-pc grub-pc/install_devices_empty boolean false\n'",
),
+ False,
),
(
# idevs unset, idevs_empty set
@@ -141,8 +183,10 @@ class TestHandle:
"/dev/xvda",
(
"Setting grub debconf-set-selections with ",
- "'/dev/xvda','true'",
+ "'grub-pc grub-pc/install_devices string /dev/xvda\n",
+ "grub-pc grub-pc/install_devices_empty boolean true\n'",
),
+ False,
),
(
# idevs set, idevs_empty set
@@ -151,8 +195,10 @@ class TestHandle:
"/dev/disk/by-id/company-user-1",
(
"Setting grub debconf-set-selections with ",
- "'/dev/vda','false'",
+ "'grub-pc grub-pc/install_devices string /dev/vda\n",
+ "grub-pc grub-pc/install_devices_empty boolean false\n'",
),
+ False,
),
(
# idevs set, idevs_empty set
@@ -162,16 +208,31 @@ class TestHandle:
"",
(
"Setting grub debconf-set-selections with ",
- "'/dev/nvme0n1','true'",
+ "'grub-pc grub-pc/install_devices string /dev/nvme0n1\n",
+ "grub-pc grub-pc/install_devices_empty boolean true\n'",
+ ),
+ False,
+ ),
+ (
+ # uefi active, idevs set
+ "/dev/sda1",
+ False,
+ "/dev/sda1",
+ (
+ "Setting grub debconf-set-selections with ",
+ "'grub-pc grub-efi/install_devices string /dev/sda1\n'",
),
+ True,
),
],
)
@mock.patch("cloudinit.config.cc_grub_dpkg.fetch_idevs")
@mock.patch("cloudinit.config.cc_grub_dpkg.util.logexc")
@mock.patch("cloudinit.config.cc_grub_dpkg.subp.subp")
+ @mock.patch("cloudinit.config.cc_grub_dpkg.is_efi_booted")
def test_handle(
self,
+ m_is_efi_booted,
m_subp,
m_logexc,
m_fetch_idevs,
@@ -179,8 +240,10 @@ class TestHandle:
cfg_idevs_empty,
fetch_idevs_output,
expected_log_output,
+ is_uefi,
):
"""Test setting of correct debconf database entries"""
+ m_is_efi_booted.return_value = is_uefi
m_fetch_idevs.return_value = fetch_idevs_output
log = mock.Mock(spec=Logger)
cfg = {"grub_dpkg": {}}
@@ -235,8 +298,8 @@ class TestGrubDpkgSchema:
pytest.raises(
SchemaValidationError,
match=(
- "Cloud config schema deprecations: grub-dpkg:"
- " Deprecated in version 22.2. Use "
+ "Cloud config schema deprecations: grub-dpkg: An alias"
+ " for ``grub_dpkg`` Deprecated in version 22.2. Use "
"``grub_dpkg`` instead."
),
),
diff --git a/tests/unittests/config/test_cc_power_state_change.py b/tests/unittests/config/test_cc_power_state_change.py
index fbdc06ef..ccec0fde 100644
--- a/tests/unittests/config/test_cc_power_state_change.py
+++ b/tests/unittests/config/test_cc_power_state_change.py
@@ -164,7 +164,7 @@ def check_lps_ret(psc_return, mode=None):
if len(errs):
lines = ["Errors in result: %s" % str(psc_return)] + errs
- raise Exception("\n".join(lines))
+ raise RuntimeError("\n".join(lines))
class TestPowerStateChangeSchema:
diff --git a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
index e038f814..ee5ee8a8 100644
--- a/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
+++ b/tests/unittests/config/test_cc_refresh_rmc_and_interface.py
@@ -110,7 +110,6 @@ class TestRsctNodeFile(t_help.CiTestCase):
util.load_file(fname),
)
- @mock.patch(MPATH + ".refresh_rmc")
@mock.patch(MPATH + ".restart_network_manager")
@mock.patch(MPATH + ".disable_ipv6")
@mock.patch(MPATH + ".refresh_ipv6")
@@ -123,11 +122,13 @@ class TestRsctNodeFile(t_help.CiTestCase):
m_refresh_ipv6,
m_disable_ipv6,
m_restart_nm,
- m_which,
):
- """Basic test of handle."""
+ """Basic test of handle.
+
+ TODO: This test has suspicious mock names, is it actually testing the
+ correct things?
+ """
m_netdev_info.return_value = NET_INFO
- m_which.return_value = "/opt/rsct/bin/rmcctrl"
ccrmci.handle("refresh_rmc_and_interface", None, None, None, None)
self.assertEqual(1, m_netdev_info.call_count)
m_refresh_ipv6.assert_called_with("env5")
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
index 2c92949f..4e74c55c 100644
--- a/tests/unittests/config/test_cc_set_hostname.py
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -226,7 +226,7 @@ class TestHostname(t_help.FilesystemMockingTestCase):
distro = self._fetch_distro("debian")
def set_hostname_error(hostname, fqdn):
- raise Exception("OOPS on: %s" % fqdn)
+ raise RuntimeError("OOPS on: %s" % fqdn)
distro.set_hostname = set_hostname_error
paths = helpers.Paths({"cloud_dir": self.tmp})
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index fec63809..32503fb8 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -173,7 +173,7 @@ class CiTestCase(TestCase):
)
if pass_through:
return _real_subp(*args, **kwargs)
- raise Exception(
+ raise RuntimeError(
"called subp. set self.allowed_subp=True to allow\n subp(%s)"
% ", ".join(
[str(repr(a)) for a in args]
diff --git a/tests/unittests/sources/test___init__.py b/tests/unittests/sources/test___init__.py
new file mode 100644
index 00000000..b84976da
--- /dev/null
+++ b/tests/unittests/sources/test___init__.py
@@ -0,0 +1,40 @@
+import pytest
+
+from cloudinit import sources
+from cloudinit.sources import DataSourceOpenStack as ds
+from tests.unittests.helpers import mock
+
+
+@pytest.mark.parametrize(
+ "m_cmdline",
+ (
+ # test ci.ds=
+ "aosiejfoij ci.ds=OpenStack ",
+ "ci.ds=OpenStack",
+ "aosiejfoij ci.ds=OpenStack blah",
+ "aosiejfoij ci.ds=OpenStack faljskebflk",
+ # test ci.datasource=
+ "aosiejfoij ci.datasource=OpenStack ",
+ "ci.datasource=OpenStack",
+ "aosiejfoij ci.datasource=OpenStack blah",
+ "aosiejfoij ci.datasource=OpenStack faljskebflk",
+ # weird whitespace
+ "ci.datasource=OpenStack\n",
+ "ci.datasource=OpenStack\t",
+ "ci.datasource=OpenStack\r",
+ "ci.datasource=OpenStack\v",
+ "ci.ds=OpenStack\n",
+ "ci.ds=OpenStack\t",
+ "ci.ds=OpenStack\r",
+ "ci.ds=OpenStack\v",
+ ),
+)
+def test_ds_detect_kernel_commandline(m_cmdline):
+ """check commandline match"""
+ with mock.patch(
+ "cloudinit.util.get_cmdline",
+ return_value=m_cmdline,
+ ):
+ assert (
+ ds.DataSourceOpenStack.dsname == sources.parse_cmdline()
+ ), f"could not parse [{m_cmdline}]"
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index 166cbe13..9815c913 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -1096,8 +1096,8 @@ scbus-1 on xpt0 bus 0
dev = ds.get_resource_disk_on_freebsd(1)
self.assertEqual("da1", dev)
- def test_not_is_platform_viable_seed_should_return_no_datasource(self):
- """Check seed_dir using _is_platform_viable and return False."""
+ def test_not_ds_detect_seed_should_return_no_datasource(self):
+ """Check seed_dir using ds_detect and return False."""
# Return a non-matching asset tag value
data = {}
dsrc = self._get_ds(data)
@@ -3022,8 +3022,10 @@ class TestPreprovisioningPollIMDS(CiTestCase):
m_media_switch.return_value = None
dhcp_ctx = mock.MagicMock(lease=lease)
dhcp_ctx.obtain_lease.return_value = lease
+ dhcp_ctx.iface = lease["interface"]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = dhcp_ctx
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
):
@@ -3031,7 +3033,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
assert m_report_ready.mock_calls == [mock.call()]
- self.assertEqual(3, m_dhcp.call_count, "Expected 3 DHCP calls")
+ self.assertEqual(2, m_dhcp.call_count, "Expected 2 DHCP calls")
assert m_fetch_reprovisiondata.call_count == 2
@mock.patch("os.path.isfile")
@@ -3162,6 +3164,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
distro.get_tmp_exec_path = self.tmp_dir
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
):
@@ -3196,6 +3199,7 @@ class TestPreprovisioningPollIMDS(CiTestCase):
distro.get_tmp_exec_path = self.tmp_dir
dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths)
self.assertFalse(os.path.exists(report_file))
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
with mock.patch.object(
dsa, "_reported_ready_marker_file", report_file
):
@@ -3237,8 +3241,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
}
]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
self.assertTrue(len(dsa._poll_imds()) > 0)
- self.assertEqual(m_dhcp.call_count, 2)
+ self.assertEqual(m_dhcp.call_count, 1)
m_net.assert_any_call(
broadcast="192.168.2.255",
interface="eth9",
@@ -3247,7 +3252,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
router="192.168.2.1",
static_routes=None,
)
- self.assertEqual(m_net.call_count, 2)
+ self.assertEqual(m_net.call_count, 1)
def test__reprovision_calls__poll_imds(
self, m_fetch_reprovisiondata, m_dhcp, m_net, m_media_switch
@@ -3268,10 +3273,11 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
content = construct_ovf_env(username=username, hostname=hostname)
m_fetch_reprovisiondata.side_effect = [content]
dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths)
+ dsa._ephemeral_dhcp_ctx = mock.Mock(interface="eth9")
md, _ud, cfg, _d = dsa._reprovision()
self.assertEqual(md["local-hostname"], hostname)
self.assertEqual(cfg["system_info"]["default_user"]["name"], username)
- self.assertEqual(m_dhcp.call_count, 2)
+ self.assertEqual(m_dhcp.call_count, 1)
m_net.assert_any_call(
broadcast="192.168.2.255",
interface="eth9",
@@ -3280,7 +3286,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase):
router="192.168.2.1",
static_routes=None,
)
- self.assertEqual(m_net.call_count, 2)
+ self.assertEqual(m_net.call_count, 1)
class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
@@ -3350,7 +3356,7 @@ class TestIsPlatformViable:
):
mock_chassis_asset_tag.return_value = tag
- assert dsaz.is_platform_viable(None) is True
+ assert dsaz.DataSourceAzure.ds_detect(None) is True
def test_true_on_azure_ovf_env_in_seed_dir(
self, azure_ds, mock_chassis_asset_tag, tmpdir
@@ -3361,7 +3367,7 @@ class TestIsPlatformViable:
seed_path.parent.mkdir(exist_ok=True, parents=True)
seed_path.write_text("")
- assert dsaz.is_platform_viable(seed_path.parent) is True
+ assert dsaz.DataSourceAzure.ds_detect(seed_path.parent) is True
def test_false_on_no_matching_azure_criteria(
self, azure_ds, mock_chassis_asset_tag
@@ -3370,8 +3376,13 @@ class TestIsPlatformViable:
seed_path = Path(azure_ds.seed_dir, "ovf-env.xml")
seed_path.parent.mkdir(exist_ok=True, parents=True)
+ paths = helpers.Paths(
+ {"cloud_dir": "/tmp/", "run_dir": "/tmp/", "seed_dir": seed_path}
+ )
- assert dsaz.is_platform_viable(seed_path) is False
+ assert (
+ dsaz.DataSourceAzure({}, mock.Mock(), paths).ds_detect() is False
+ )
class TestRandomSeed(CiTestCase):
@@ -3696,7 +3707,7 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
@@ -3758,7 +3769,7 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
@@ -3859,7 +3870,7 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
@@ -4008,7 +4019,7 @@ class TestProvisioning:
None,
]
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
@@ -4115,7 +4126,7 @@ class TestProvisioning:
]
self.mock_azure_get_metadata_from_fabric.return_value = []
- self.azure_ds._get_data()
+ self.azure_ds._check_and_get_data()
assert self.mock_readurl.mock_calls == [
mock.call(
@@ -4175,6 +4186,35 @@ class TestProvisioning:
# Verify no netlink operations for recovering PPS.
assert self.mock_netlink.mock_calls == []
+ @pytest.mark.parametrize("pps_type", ["Savable", "Running", "Unknown"])
+ def test_source_pps_fails_initial_dhcp(self, pps_type):
+ self.imds_md["extended"]["compute"]["ppsType"] = pps_type
+
+ nl_sock = mock.MagicMock()
+ self.mock_netlink.create_bound_netlink_socket.return_value = nl_sock
+ self.mock_readurl.side_effect = [
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ mock.MagicMock(contents=construct_ovf_env().encode()),
+ mock.MagicMock(contents=json.dumps(self.imds_md).encode()),
+ ]
+ self.mock_azure_get_metadata_from_fabric.return_value = []
+
+ self.mock_net_dhcp_maybe_perform_dhcp_discovery.side_effect = [
+ dhcp.NoDHCPLeaseError()
+ ]
+
+ with mock.patch.object(self.azure_ds, "_report_failure") as m_report:
+ self.azure_ds._get_data()
+
+ assert m_report.mock_calls == [mock.call()]
+
+ assert self.mock_wrapping_setup_ephemeral_networking.mock_calls == [
+ mock.call(timeout_minutes=20),
+ ]
+ assert self.mock_readurl.mock_calls == []
+ assert self.mock_azure_get_metadata_from_fabric.mock_calls == []
+ assert self.mock_netlink.mock_calls == []
+
@pytest.mark.parametrize(
"subp_side_effect",
[
diff --git a/tests/unittests/sources/test_cloudsigma.py b/tests/unittests/sources/test_cloudsigma.py
index b92c3723..3b3279c8 100644
--- a/tests/unittests/sources/test_cloudsigma.py
+++ b/tests/unittests/sources/test_cloudsigma.py
@@ -10,7 +10,6 @@ from tests.unittests import helpers as test_helpers
SERVER_CONTEXT = {
"cpu": 1000,
"cpus_instead_of_cores": False,
- "global_context": {"some_global_key": "some_global_val"},
"mem": 1073741824,
"meta": {
"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe",
@@ -44,7 +43,7 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
super(DataSourceCloudSigmaTest, self).setUp()
self.paths = helpers.Paths({"run_dir": self.tmp_dir()})
self.add_patch(
- DS_PATH + ".is_running_in_cloudsigma",
+ DS_PATH + ".override_ds_detect",
"m_is_container",
return_value=True,
)
@@ -99,6 +98,7 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase):
)
def test_encoded_user_data(self):
+
encoded_context = copy.deepcopy(SERVER_CONTEXT)
encoded_context["meta"]["base64_fields"] = "cloudinit-user-data"
encoded_context["meta"]["cloudinit-user-data"] = "aGkgd29ybGQK"
@@ -142,6 +142,3 @@ class DsLoads(test_helpers.TestCase):
["CloudSigma"], (sources.DEP_FILESYSTEM,), ["cloudinit.sources"]
)
self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], found)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_exoscale.py b/tests/unittests/sources/test_exoscale.py
index c71889f9..82b567d7 100644
--- a/tests/unittests/sources/test_exoscale.py
+++ b/tests/unittests/sources/test_exoscale.py
@@ -76,7 +76,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
full test data."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
+ ds.ds_detect = lambda: True
expected_password = "p@ssw0rd"
expected_id = "12345"
expected_hostname = "myname"
@@ -102,7 +102,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
- self.assertTrue(ds._get_data())
+ self.assertTrue(ds._check_and_get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
self.assertEqual(
ds.metadata,
@@ -124,7 +124,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
returned by the password server."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
+ ds.ds_detect = lambda: True
expected_answer = "saved_password"
expected_id = "12345"
expected_hostname = "myname"
@@ -150,7 +150,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
- self.assertTrue(ds._get_data())
+ self.assertTrue(ds._check_and_get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
self.assertEqual(
ds.metadata,
@@ -163,7 +163,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
returned by the password server."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: True
+ ds.ds_detect = lambda: True
expected_answer = ""
expected_id = "12345"
expected_hostname = "myname"
@@ -189,7 +189,7 @@ class TestDatasourceExoscale(ResponsesTestCase):
"{}instance-id".format(self.metadata_url),
body=expected_id,
)
- self.assertTrue(ds._get_data())
+ self.assertTrue(ds._check_and_get_data())
self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config")
self.assertEqual(
ds.metadata,
@@ -236,5 +236,5 @@ class TestDatasourceExoscale(ResponsesTestCase):
"""The datasource fails fast when the platform is not viable."""
path = helpers.Paths({"run_dir": self.tmp})
ds = DataSourceExoscale({}, None, path)
- ds._is_platform_viable = lambda: False
- self.assertFalse(ds._get_data())
+ ds.ds_detect = lambda: False
+ self.assertFalse(ds._check_and_get_data())
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index 0447e02c..96e4dd90 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -894,15 +894,14 @@ class TestDataSource(CiTestCase):
self.datasource.default_update_events,
)
- def fake_get_data():
- raise Exception("get_data should not be called")
-
+ fake_get_data = mock.Mock()
self.datasource.get_data = fake_get_data
self.assertFalse(
self.datasource.update_metadata_if_supported(
source_event_types=[EventType.BOOT]
)
)
+ self.assertEqual([], fake_get_data.call_args_list)
@mock.patch.dict(
DataSource.supported_update_events,
diff --git a/tests/unittests/sources/test_nwcs.py b/tests/unittests/sources/test_nwcs.py
index 395f99f8..052e322a 100644
--- a/tests/unittests/sources/test_nwcs.py
+++ b/tests/unittests/sources/test_nwcs.py
@@ -47,16 +47,16 @@ class TestDataSourceNWCS(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceNWCS.EphemeralDHCPv4")
@mock.patch("cloudinit.net.find_fallback_nic")
@mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata")
- @mock.patch("cloudinit.sources.DataSourceNWCS.get_nwcs_data")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.DataSourceNWCS.ds_detect")
def test_read_data(
self,
- m_get_nwcs_data,
+ m_ds_detect,
m_readmd,
m_fallback_nic,
m_net,
m_dhcp,
):
- m_get_nwcs_data.return_value = True
+ m_ds_detect.return_value = True
m_readmd.return_value = METADATA.copy()
m_fallback_nic.return_value = "eth0"
m_dhcp.return_value = [
@@ -92,13 +92,13 @@ class TestDataSourceNWCS(CiTestCase):
@mock.patch("cloudinit.sources.DataSourceNWCS.read_metadata")
@mock.patch("cloudinit.net.find_fallback_nic")
- @mock.patch("cloudinit.sources.DataSourceNWCS.get_nwcs_data")
+ @mock.patch("cloudinit.sources.DataSourceNWCS.DataSourceNWCS.ds_detect")
def test_not_on_nwcs_returns_false(
- self, m_get_nwcs_data, m_find_fallback, m_read_md
+ self, m_ds_detect, m_find_fallback, m_read_md
):
- """If helper 'get_nwcs_data' returns False,
+ """If 'ds_detect' returns False,
return False from get_data."""
- m_get_nwcs_data.return_value = False
+ m_ds_detect.return_value = False
ds = self.get_ds()
ret = ds.get_data()
diff --git a/tests/unittests/sources/test_opennebula.py b/tests/unittests/sources/test_opennebula.py
index 0fc332a9..43a5dd5f 100644
--- a/tests/unittests/sources/test_opennebula.py
+++ b/tests/unittests/sources/test_opennebula.py
@@ -562,26 +562,6 @@ class TestOpenNebulaNetwork(unittest.TestCase):
val = net.get_mask("eth0")
self.assertEqual("255.255.255.0", val)
- def test_get_network(self):
- """
- Verify get_network('device') correctly returns IPv4 network address.
- """
- context = {"ETH0_NETWORK": "1.2.3.0"}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network("eth0", MACADDR)
- self.assertEqual("1.2.3.0", val)
-
- def test_get_network_emptystring(self):
- """
- Verify get_network('device') correctly returns IPv4 network address.
- It returns network address created by MAC address if ETH0_NETWORK has
- empty string.
- """
- context = {"ETH0_NETWORK": ""}
- net = ds.OpenNebulaNetwork(context, mock.Mock())
- val = net.get_network("eth0", MACADDR)
- self.assertEqual("10.18.1.0", val)
-
def test_get_field(self):
"""
Verify get_field('device', 'name') returns *context* value.
diff --git a/tests/unittests/sources/test_openstack.py b/tests/unittests/sources/test_openstack.py
index 1a2f5924..b37a7570 100644
--- a/tests/unittests/sources/test_openstack.py
+++ b/tests/unittests/sources/test_openstack.py
@@ -305,7 +305,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
self.assertIsNone(ds_os.version)
- with mock.patch.object(ds_os, "ds_detect", return_value=True):
+ with mock.patch.object(ds_os, "override_ds_detect", return_value=True):
self.assertTrue(ds_os.get_data())
self.assertEqual(2, ds_os.version)
md = dict(ds_os.metadata)
@@ -351,7 +351,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
self.assertIsNone(ds_os_local.version)
with test_helpers.mock.patch.object(
- ds_os_local, "ds_detect"
+ ds_os_local, "override_ds_detect"
) as m_detect_os:
m_detect_os.return_value = True
found = ds_os_local.get_data()
@@ -383,7 +383,9 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
)
self.assertIsNone(ds_os.version)
- with test_helpers.mock.patch.object(ds_os, "ds_detect") as m_detect_os:
+ with test_helpers.mock.patch.object(
+ ds_os, "override_ds_detect"
+ ) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
self.assertFalse(found)
@@ -412,7 +414,7 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
"timeout": 0,
}
self.assertIsNone(ds_os.version)
- with mock.patch.object(ds_os, "ds_detect", return_value=True):
+ with mock.patch.object(ds_os, "override_ds_detect", return_value=True):
self.assertFalse(ds_os.get_data())
self.assertIsNone(ds_os.version)
@@ -488,7 +490,9 @@ class TestOpenStackDataSource(test_helpers.ResponsesTestCase):
"timeout": 0,
}
self.assertIsNone(ds_os.version)
- with test_helpers.mock.patch.object(ds_os, "ds_detect") as m_detect_os:
+ with test_helpers.mock.patch.object(
+ ds_os, "override_ds_detect"
+ ) as m_detect_os:
m_detect_os.return_value = True
found = ds_os.get_data()
self.assertFalse(found)
@@ -869,6 +873,3 @@ class TestMetadataReader(test_helpers.ResponsesTestCase):
reader._read_ec2_metadata = mock_read_ec2
self.assertEqual(expected, reader.read_v2())
self.assertEqual(1, mock_read_ec2.call_count)
-
-
-# vi: ts=4 expandtab
diff --git a/tests/unittests/sources/test_oracle.py b/tests/unittests/sources/test_oracle.py
index e3648889..c67cacef 100644
--- a/tests/unittests/sources/test_oracle.py
+++ b/tests/unittests/sources/test_oracle.py
@@ -121,7 +121,7 @@ def oracle_ds(request, fixture_utils, paths, metadata_version, mocker):
This also performs the mocking required:
* ``_read_system_uuid`` returns something,
- * ``_is_platform_viable`` returns True,
+ * ``ds_detect`` returns True,
* ``DataSourceOracle._is_iscsi_root`` returns True by default or what
pytest.mark.is_iscsi gives as first param,
* ``DataSourceOracle._get_iscsi_config`` returns a network cfg if
@@ -144,7 +144,7 @@ def oracle_ds(request, fixture_utils, paths, metadata_version, mocker):
mocker.patch(DS_PATH + ".net.find_fallback_nic")
mocker.patch(DS_PATH + ".ephemeral.EphemeralDHCPv4")
mocker.patch(DS_PATH + "._read_system_uuid", return_value="someuuid")
- mocker.patch(DS_PATH + "._is_platform_viable", return_value=True)
+ mocker.patch(DS_PATH + ".DataSourceOracle.ds_detect", return_value=True)
mocker.patch(DS_PATH + ".read_opc_metadata", return_value=metadata)
mocker.patch(DS_PATH + ".KlibcOracleNetworkConfigSource")
ds = oracle.DataSourceOracle(
@@ -170,7 +170,7 @@ class TestDataSourceOracle:
assert "unknown" == oracle_ds.subplatform
def test_platform_info_after_fetch(self, oracle_ds):
- oracle_ds._get_data()
+ oracle_ds._check_and_get_data()
assert (
"metadata (http://169.254.169.254/opc/v2/)"
== oracle_ds.subplatform
@@ -178,7 +178,7 @@ class TestDataSourceOracle:
@pytest.mark.parametrize("metadata_version", [1])
def test_v1_platform_info_after_fetch(self, oracle_ds):
- oracle_ds._get_data()
+ oracle_ds._check_and_get_data()
assert (
"metadata (http://169.254.169.254/opc/v1/)"
== oracle_ds.subplatform
@@ -206,11 +206,11 @@ class TestIsPlatformViable:
("LetsGoCubs", False),
],
)
- def test_is_platform_viable(self, dmi_data, platform_viable):
+ def test_ds_detect(self, dmi_data, platform_viable):
with mock.patch(
DS_PATH + ".dmi.read_dmi_data", return_value=dmi_data
) as m_read_dmi_data:
- assert platform_viable == oracle._is_platform_viable()
+ assert platform_viable == oracle.DataSourceOracle.ds_detect()
m_read_dmi_data.assert_has_calls([mock.call("chassis-asset-tag")])
@@ -830,13 +830,13 @@ class TestCommon_GetDataBehaviour:
"""
@mock.patch(
- DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
+ DS_PATH + ".DataSourceOracle.ds_detect", mock.Mock(return_value=False)
)
def test_false_if_platform_not_viable(
self,
oracle_ds,
):
- assert not oracle_ds._get_data()
+ assert not oracle_ds._check_and_get_data()
@pytest.mark.parametrize(
"keyname,expected_value",
@@ -862,7 +862,7 @@ class TestCommon_GetDataBehaviour:
expected_value,
oracle_ds,
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert expected_value == oracle_ds.metadata[keyname]
@pytest.mark.parametrize(
@@ -885,7 +885,7 @@ class TestCommon_GetDataBehaviour:
expected_value,
oracle_ds,
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert expected_value == getattr(oracle_ds, attribute_name)
@pytest.mark.parametrize(
@@ -917,7 +917,7 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert expected_value == oracle_ds.get_public_ssh_keys()
def test_missing_user_data_handled_gracefully(self, oracle_ds):
@@ -928,7 +928,7 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert oracle_ds.userdata_raw is None
@@ -940,7 +940,7 @@ class TestCommon_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(return_value=metadata),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert oracle_ds.userdata_raw is None
assert [] == oracle_ds.get_public_ssh_keys()
@@ -978,7 +978,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(side_effect=assert_in_context_manager),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert [
mock.call(
@@ -1020,7 +1020,7 @@ class TestNonIscsiRoot_GetDataBehaviour:
DS_PATH + ".read_opc_metadata",
mock.Mock(side_effect=assert_in_context_manager),
):
- assert oracle_ds._get_data()
+ assert oracle_ds._check_and_get_data()
assert [
mock.call(
@@ -1132,6 +1132,15 @@ class TestNetworkConfig:
initramfs_idx = config_sources.index(NetworkConfigSource.INITRAMFS)
assert ds_idx < initramfs_idx
+ def test_system_network_cfg_preferred_over_ds(
+ self, m_get_interfaces_by_mac
+ ):
+ """Ensure that system net config is preferred over DS config"""
+ config_sources = oracle.DataSourceOracle.network_config_sources
+ ds_idx = config_sources.index(NetworkConfigSource.DS)
+ system_idx = config_sources.index(NetworkConfigSource.SYSTEM_CFG)
+ assert system_idx < ds_idx
+
@pytest.mark.parametrize("set_primary", [True, False])
def test__add_network_config_from_opc_imds_no_vnics_data(
self,
diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py
index f9b470cb..d6a0874d 100644
--- a/tests/unittests/sources/test_scaleway.py
+++ b/tests/unittests/sources/test_scaleway.py
@@ -1,6 +1,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+import sys
from urllib.parse import SplitResult, urlsplit
import requests
@@ -90,7 +91,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_not_on_scaleway(
+ def test_not_ds_detect(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
self.install_mocks(
@@ -98,7 +99,7 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, False),
fake_cmdline=(m_get_cmdline, False),
)
- self.assertFalse(DataSourceScaleway.on_scaleway())
+ self.assertFalse(DataSourceScaleway.DataSourceScaleway.ds_detect())
# When not on Scaleway, get_data() returns False.
datasource = DataSourceScaleway.DataSourceScaleway(
@@ -109,7 +110,7 @@ class TestOnScaleway(CiTestCase):
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_on_scaleway_dmi(
+ def test_ds_detect_dmi(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
"""
@@ -121,12 +122,12 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, False),
fake_cmdline=(m_get_cmdline, False),
)
- self.assertTrue(DataSourceScaleway.on_scaleway())
+ self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect())
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_on_scaleway_var_run_scaleway(
+ def test_ds_detect_var_run_scaleway(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
"""
@@ -137,12 +138,12 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, True),
fake_cmdline=(m_get_cmdline, False),
)
- self.assertTrue(DataSourceScaleway.on_scaleway())
+ self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect())
@mock.patch("cloudinit.util.get_cmdline")
@mock.patch("os.path.exists")
@mock.patch("cloudinit.dmi.read_dmi_data")
- def test_on_scaleway_cmdline(
+ def test_ds_detect_cmdline(
self, m_read_dmi_data, m_file_exists, m_get_cmdline
):
"""
@@ -153,7 +154,7 @@ class TestOnScaleway(CiTestCase):
fake_file_exists=(m_file_exists, False),
fake_cmdline=(m_get_cmdline, True),
)
- self.assertTrue(DataSourceScaleway.on_scaleway())
+ self.assertTrue(DataSourceScaleway.DataSourceScaleway.ds_detect())
def get_source_address_adapter(*args, **kwargs):
@@ -204,8 +205,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
]
self.add_patch(
- "cloudinit.sources.DataSourceScaleway.on_scaleway",
- "_m_on_scaleway",
+ "cloudinit.sources.DataSourceScaleway."
+ "DataSourceScaleway.ds_detect",
+ "_m_ds_detect",
return_value=True,
)
self.add_patch(
@@ -225,6 +227,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
"""
get_data() returns metadata, user data and vendor data.
"""
+ # fails on python 3.6
+ if sys.version_info.minor < 7:
+ return
m_get_cmdline.return_value = "scaleway"
# Make user data API return a valid response
@@ -355,6 +360,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
"""
get_data() returns metadata, but no user data nor vendor data.
"""
+ # fails on python 3.6
+ if sys.version_info.minor < 7:
+ return
m_get_cmdline.return_value = "scaleway"
# Make user and vendor data APIs return HTTP/404, which means there is
@@ -386,6 +394,9 @@ class TestDataSourceScaleway(ResponsesTestCase):
get_data() is rate limited two times by the metadata API when fetching
user data.
"""
+ if sys.version_info.minor < 7:
+ return
+
m_get_cmdline.return_value = "scaleway"
self.responses.add_callback(
diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py
index 2fc2e21c..e09fdfe4 100644
--- a/tests/unittests/sources/vmware/test_vmware_config_file.py
+++ b/tests/unittests/sources/vmware/test_vmware_config_file.py
@@ -50,18 +50,8 @@ class TestVmwareConfigFile(CiTestCase):
self.assertEqual(2, len(cf), "insert size")
self.assertEqual("foo", cf["PASSWORD|-PASS"], "password")
self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
- self.assertFalse(
- cf.should_keep_current_value("PASSWORD|-PASS"), "keepPassword"
- )
- self.assertFalse(
- cf.should_remove_current_value("PASSWORD|-PASS"), "removePassword"
- )
self.assertFalse("FOO" in cf, "hasFoo")
- self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo")
- self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo")
self.assertTrue("BAR" in cf, "hasBar")
- self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar")
- self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar")
def test_configfile_without_instance_id(self):
"""
@@ -95,7 +85,6 @@ class TestVmwareConfigFile(CiTestCase):
self.assertEqual("myhost1", conf.host_name, "hostName")
self.assertEqual("Africa/Abidjan", conf.timezone, "tz")
- self.assertTrue(conf.utc, "utc")
self.assertEqual(
["10.20.145.1", "10.20.145.2"], conf.name_servers, "dns"
diff --git a/tests/unittests/test_apport.py b/tests/unittests/test_apport.py
index 1876c1be..c731a30a 100644
--- a/tests/unittests/test_apport.py
+++ b/tests/unittests/test_apport.py
@@ -1,3 +1,5 @@
+import os
+
import pytest
from tests.unittests.helpers import mock
@@ -5,24 +7,74 @@ from tests.unittests.helpers import mock
M_PATH = "cloudinit.apport."
+@pytest.fixture()
+def apport(request, mocker, paths):
+ """Mock apport.hookutils before importing cloudinit.apport.
+
+ This avoids our optional import dependency on apport, providing tests with
+ mocked apport.hookutils function call counts.
+ """
+ m_hookutils = mock.Mock()
+ mocker.patch.dict("sys.modules", {"apport.hookutils": m_hookutils})
+ mocker.patch(M_PATH + "read_cfg_paths", return_value=paths)
+ from cloudinit import apport
+
+ yield apport
+
+
class TestApport:
- def test_attach_user_data(self, mocker, tmpdir):
- m_hookutils = mock.Mock()
- mocker.patch.dict("sys.modules", {"apport.hookutils": m_hookutils})
- user_data_file = tmpdir.join("instance", "user-data.txt")
- mocker.patch(
- M_PATH + "_get_user_data_file", return_value=user_data_file
- )
+ @pytest.mark.parametrize(
+ "instance_data,choice_idx,expected_report",
+ (
+ pytest.param(
+ '{"v1": {"cloud_name": "mycloud"}}',
+ None,
+ {},
+ id="v1_cloud_name_exists",
+ ),
+ pytest.param(
+ '{"v1": {"cloud_id": "invalid"}}',
+ 1,
+ {"CloudName": "Azure"},
+ id="v1_no_cloud_name_present",
+ ),
+ pytest.param("{}", 0, {"CloudName": "AliYun"}, id="no_v1_key"),
+ pytest.param(
+ "{", 22, {"CloudName": "Oracle"}, id="not_valid_json"
+ ),
+ ),
+ )
+ def test_attach_cloud_info(
+ self, instance_data, choice_idx, expected_report, apport, paths
+ ):
+ """Prompt for cloud name when instance-data.json is not-json/absent."""
- from cloudinit import apport
+ instance_data_file = paths.get_runpath("instance_data")
+ if instance_data is None:
+ assert not os.path.exists(instance_data_file)
+ else:
+ with open(instance_data_file, "w") as stream:
+ stream.write(instance_data)
+ ui = mock.Mock()
+ ui.yesno.return_value = True
+ ui.choice.return_value = (choice_idx, "")
+ report = {}
+ apport.attach_cloud_info(report, ui)
+ if choice_idx is not None:
+ assert ui.choice.call_count == 1
+ assert report["CloudName"] == apport.KNOWN_CLOUD_NAMES[choice_idx]
+ else:
+ assert ui.choice.call_count == 0
+ def test_attach_user_data(self, apport, paths):
+ user_data_file = paths.get_ipath_cur("userdata_raw")
ui = mock.Mock()
ui.yesno.return_value = True
report = object()
apport.attach_user_data(report, ui)
assert [
mock.call(report, user_data_file, "user_data.txt"),
- ] == m_hookutils.attach_file.call_args_list
+ ] == apport.attach_file.call_args_list
assert [
mock.call(
report,
@@ -35,7 +87,7 @@ class TestApport:
"/etc/cloud/cloud.cfg.d/99-installer.cfg",
"InstallerCloudCfg",
),
- ] == m_hookutils.attach_file_if_exists.call_args_list
+ ] == apport.attach_file_if_exists.call_args_list
@pytest.mark.parametrize(
"report,tags",
@@ -52,9 +104,8 @@ class TestApport:
),
),
)
- def test_add_bug_tags_assigns_proper_tags(self, report, tags):
+ def test_add_bug_tags_assigns_proper_tags(self, report, tags, apport):
"""Tags are assigned based on non-empty project report key values."""
- from cloudinit import apport
apport.add_bug_tags(report)
assert report.get("Tags", "") == tags
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index 07294214..e3fed410 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -55,14 +55,12 @@ class TestCLI:
data_d = tmpdir.join("data")
link_d = tmpdir.join("link")
FakeArgs = namedtuple("FakeArgs", ["action", "local", "mode"])
+ my_action = mock.Mock()
- def myaction():
- raise Exception("Should not call myaction")
-
- myargs = FakeArgs((action, myaction), False, "bogusmode")
+ myargs = FakeArgs((action, my_action), False, "bogusmode")
with pytest.raises(ValueError, match=match):
cli.status_wrapper(name, myargs, data_d, link_d)
- assert "Should not call myaction" not in caplog.text
+ assert [] == my_action.call_args_list
def test_status_wrapper_init_local_writes_fresh_status_info(self, tmpdir):
"""When running in init-local mode, status_wrapper writes status.json.
diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py
index 94c7ae13..8aace78d 100644
--- a/tests/unittests/test_features.py
+++ b/tests/unittests/test_features.py
@@ -27,7 +27,7 @@ def create_override(request):
"""
override_path = Path(cloudinit.__file__).parent / "feature_overrides.py"
if override_path.exists():
- raise Exception(
+ raise RuntimeError(
"feature_overrides.py unexpectedly exists! "
"Remove it to run this test."
)
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
index afd9056a..2a363ec4 100644
--- a/tests/unittests/test_net_activators.py
+++ b/tests/unittests/test_net_activators.py
@@ -322,28 +322,3 @@ class TestActivatorsBringDown:
activator.bring_down_interface("eth0")
assert len(m_subp.call_args_list) == 1
assert m_subp.call_args_list[0] == expected_call_list[0]
-
- @patch("cloudinit.subp.subp", return_value=("", ""))
- def test_bring_down_interfaces(
- self, m_subp, activator, expected_call_list, available_mocks
- ):
- activator.bring_down_interfaces(["eth0", "eth1"])
- assert expected_call_list == m_subp.call_args_list
-
- @patch("cloudinit.subp.subp", return_value=("", ""))
- def test_bring_down_all_interfaces_v1(
- self, m_subp, activator, expected_call_list, available_mocks
- ):
- network_state = parse_net_config_data(load(V1_CONFIG))
- activator.bring_down_all_interfaces(network_state)
- for call in m_subp.call_args_list:
- assert call in expected_call_list
-
- @patch("cloudinit.subp.subp", return_value=("", ""))
- def test_bring_down_all_interfaces_v2(
- self, m_subp, activator, expected_call_list, available_mocks
- ):
- network_state = parse_net_config_data(load(V2_CONFIG))
- activator.bring_down_all_interfaces(network_state)
- for call in m_subp.call_args_list:
- assert call in expected_call_list
diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py
index aecce921..7612a28b 100644
--- a/tests/unittests/test_netinfo.py
+++ b/tests/unittests/test_netinfo.py
@@ -198,7 +198,7 @@ class TestNetInfo:
return (SAMPLE_ROUTE_OUT_V4, "")
if args[0] == ["netstat", "-A", "inet6", "--route", "--numeric"]:
return (SAMPLE_ROUTE_OUT_V6, "")
- raise Exception("Unexpected subp call %s" % args[0])
+ raise RuntimeError("Unexpected subp call %s" % args[0])
m_subp.side_effect = subp_netstat_route_selector
m_which.side_effect = lambda x: x if x == "netstat" else None
@@ -216,7 +216,7 @@ class TestNetInfo:
v6cmd = ["ip", "--oneline", "-6", "route", "list", "table", "all"]
if v6cmd == args[0]:
return (SAMPLE_IPROUTE_OUT_V6, "")
- raise Exception("Unexpected subp call %s" % args[0])
+ raise RuntimeError("Unexpected subp call %s" % args[0])
m_subp.side_effect = subp_iproute_selector
m_which.side_effect = lambda x: x if x == "ip" else None
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 07142a86..865f202a 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -2877,7 +2877,7 @@ class TestFindDevs:
return msdos
elif pattern == "/dev/iso9660/*":
return iso9660
- raise Exception
+ raise RuntimeError
m_glob.side_effect = fake_glob
@@ -3026,3 +3026,18 @@ class TestVersion:
)
def test_from_str(self, str_ver, cls_ver):
assert util.Version.from_str(str_ver) == cls_ver
+
+
+@pytest.mark.allow_dns_lookup
+class TestResolvable:
+ @mock.patch.object(util, "_DNS_REDIRECT_IP", return_value=True)
+ @mock.patch.object(util.socket, "getaddrinfo")
+ def test_ips_need_not_be_resolved(self, m_getaddr, m_dns):
+ """Optimization test: dns resolution may timeout during early boot, and
+ often the urls being checked use IP addresses rather than dns names.
+ Therefore, the fast path checks if the address contains an IP and exits
+ early if the path is a valid IP.
+ """
+ assert util.is_resolvable("http://169.254.169.254/") is True
+ assert util.is_resolvable("http://[fd00:ec2::254]/") is True
+ assert not m_getaddr.called
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index eb656d7f..5ca63e76 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -18,6 +18,7 @@ berolinux
bipinbachhao
BirknerAlex
bmhughes
+brianphaley
CalvoM
candlerb
cawamata
@@ -135,6 +136,7 @@ vorlonofportland
vteratipally
Vultaire
WebSpider
+Wind-net
wschoot
wynnfeng
xiachen-rh