summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2023-02-03 15:35:51 -0600
committerJames Falcon <james.falcon@canonical.com>2023-02-03 15:35:51 -0600
commit7d144ed187d5e71b7bb23cd2d99b7b0a342bd630 (patch)
tree6e75e22427e45aa0d9c3b520dc9b827f47a74703
parent66266c8bf30654c137b56d958707d241f80b3853 (diff)
parent3b8b46926b7ef46ac0ee73d51285dd274906e4f3 (diff)
downloadcloud-init-git-7d144ed187d5e71b7bb23cd2d99b7b0a342bd630.tar.gz
merge from upstream/main at 22.4-98-g3b8b4692
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md2
-rw-r--r--.github/workflows/integration.yml84
-rw-r--r--.travis.yml105
-rw-r--r--README.md2
-rwxr-xr-xcloudinit/cmd/clean.py14
-rw-r--r--cloudinit/config/cc_puppet.py59
-rw-r--r--cloudinit/config/cc_resolv_conf.py18
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rw-r--r--cloudinit/config/cc_set_passwords.py38
-rw-r--r--cloudinit/config/cc_ssh.py4
-rw-r--r--cloudinit/distros/rhel.py1
-rw-r--r--cloudinit/net/__init__.py16
-rw-r--r--cloudinit/net/network_state.py2
-rw-r--r--cloudinit/sources/DataSourceAzure.py5
-rw-r--r--cloudinit/sources/DataSourceVultr.py4
-rw-r--r--cloudinit/sources/__init__.py11
-rw-r--r--cloudinit/sources/helpers/vultr.py12
-rw-r--r--cloudinit/stages.py2
-rw-r--r--doc/rtd/development/index.rst2
-rw-r--r--doc/rtd/explanation/analyze.rst (renamed from doc/rtd/development/analyze.rst)0
-rw-r--r--doc/rtd/explanation/index.rst2
-rw-r--r--doc/rtd/explanation/security.rst (renamed from doc/rtd/development/security.rst)0
-rw-r--r--doc/rtd/index.rst14
-rw-r--r--doc/rtd/reference/cli.rst8
-rw-r--r--doc/rtd/reference/datasources/cloudsigma.rst5
-rw-r--r--doc/rtd/reference/network-config.rst34
-rw-r--r--integration-requirements.txt2
-rwxr-xr-xpackages/bddeb6
-rw-r--r--packages/pkg-deps.json1
-rw-r--r--pyproject.toml1
-rw-r--r--systemd/cloud-init.service.tmpl3
-rw-r--r--tests/integration_tests/clouds.py12
-rw-r--r--tests/integration_tests/conftest.py2
-rw-r--r--tests/integration_tests/integration_settings.py1
-rw-r--r--tests/integration_tests/modules/test_ansible.py3
-rw-r--r--tests/integration_tests/modules/test_combined.py17
-rw-r--r--tests/integration_tests/modules/test_puppet.py7
-rw-r--r--tests/integration_tests/modules/test_set_password.py9
-rw-r--r--tests/unittests/cmd/test_clean.py24
-rw-r--r--tests/unittests/config/test_cc_puppet.py194
-rw-r--r--tests/unittests/config/test_cc_set_hostname.py17
-rw-r--r--tests/unittests/config/test_cc_set_passwords.py2
-rw-r--r--tests/unittests/config/test_cc_ssh.py6
-rw-r--r--tests/unittests/sources/test_azure.py3
-rw-r--r--tests/unittests/sources/test_init.py10
-rw-r--r--tests/unittests/sources/test_vultr.py67
-rw-r--r--tests/unittests/test_net.py22
-rw-r--r--tests/unittests/test_stages.py20
-rw-r--r--tools/.github-cla-signers2
-rw-r--r--tox.ini1
50 files changed, 582 insertions, 296 deletions
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 017e82e4..8f86cadc 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -27,6 +27,6 @@ setup, and teardown. Scripts used may be attached directly to this PR. -->
## Checklist:
<!-- Go over all the following points, and put an `x` in all the boxes
that apply. -->
- - [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html)
+ - [ ] My code follows the process laid out in [the documentation](https://cloudinit.readthedocs.io/en/latest/development/contributing.html)
- [ ] I have updated or added any unit tests accordingly
- [ ] I have updated or added any documentation accordingly
diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml
new file mode 100644
index 00000000..889897a3
--- /dev/null
+++ b/.github/workflows/integration.yml
@@ -0,0 +1,84 @@
+name: Integration Tests
+
+on:
+ pull_request:
+ branches-ignore:
+ - 'ubuntu/**'
+
+concurrency:
+ group: 'ci-${{ github.workflow }}-${{ github.ref }}'
+ cancel-in-progress: true
+
+defaults:
+ run:
+ shell: sh -ex {0}
+
+env:
+ RELEASE: bionic
+
+jobs:
+ package-build:
+ runs-on: ubuntu-22.04
+ steps:
+ - name: "Checkout"
+ uses: actions/checkout@v3
+ with:
+ # Fetch all tags for tools/read-version
+ fetch-depth: 0
+ - name: Prepare dependencies
+ run: |
+ sudo DEBIAN_FRONTEND=noninteractive apt-get update
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -y install \
+ debhelper \
+ dh-python \
+ fakeroot \
+ python3-setuptools \
+ sbuild \
+ ubuntu-dev-tools
+ sudo sbuild-adduser $USER
+ cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
+ - name: Build package
+ run: |
+ ./packages/bddeb -S -d --release ${{ env.RELEASE }}
+ sudo -E su $USER -c 'mk-sbuild ${{ env.RELEASE }}'
+ sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=${{ env.RELEASE }} --build-dir=${{ runner.temp }} cloud-init_*.dsc'
+ - name: Archive debs as artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: 'cloud-init-${{ env.RELEASE }}-deb'
+ path: '${{ runner.temp }}/cloud-init*.deb'
+ retention-days: 3
+
+ integration-tests:
+ needs: package-build
+ runs-on: ubuntu-22.04
+ steps:
+ - name: "Checkout"
+ uses: actions/checkout@v3
+ with:
+ # Fetch all tags for tools/read-version
+ fetch-depth: 0
+ - name: Retrieve cloud-init package
+ uses: actions/download-artifact@v3
+ with:
+ name: 'cloud-init-${{ env.RELEASE }}-deb'
+ - name: Verify deb package
+ run: |
+ ls -hal cloud-init*.deb
+ - name: Prepare test tools
+ run: |
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -y update
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -y install tox wireguard
+ - name: Initialize LXD
+ run: |
+ ssh-keygen -P "" -q -f ~/.ssh/id_rsa
+ echo "[lxd]" > /home/$USER/.config/pycloudlib.toml
+ sudo adduser $USER lxd
+ # Jammy GH Action runners have docker installed, which edits iptables
+ # in a way that is incompatible with lxd.
+ # https://linuxcontainers.org/lxd/docs/master/howto/network_bridge_firewalld/#prevent-issues-with-lxd-and-docker
+ sudo iptables -I DOCKER-USER -j ACCEPT
+ sudo lxd init --auto
+ - name: Run integration Tests
+ run: |
+ sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls cloud-init*.deb)" tox -e integration-tests-ci'
diff --git a/.travis.yml b/.travis.yml
index 1c45368c..b56fdfdc 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,27 +1,6 @@
language: python
dist: bionic
-# We use two different caching strategies. The default is to cache pip
-# packages (as most of our jobs use pip packages), which is configured here.
-# For the integration tests, we instead want to cache the lxd images and
-# package build schroot.
-#
-# We cache the lxd images because this saves a few seconds in the general
-# case, but provides substantial speed-ups when cloud-images.ubuntu.com, the
-# source of the images, is under heavy load. The directory in which the lxd
-# images are stored (/var/snap/lxd/common/lxd/images/) is not
-# readable/writeable by the default user (which is a requirement for caching),
-# so we instead cache the `lxd_images/` directory. We move lxd images out of
-# there before we run tests and back in once tests are complete. We _move_ the
-# images out and only copy the most recent lxd image back into the cache, to
-# avoid our cache growing without bound. (We only need the most recent lxd
-# image because the integration tests only use a single image.)
-#
-# We cache the package build schroot because it saves 2-3 minutes per build.
-# Without caching, we have to perform a debootstrap for every build. We update
-# the schroot before storing it back in the cache, to ensure that we aren't
-# just using an increasingly-old schroot as time passes. The cached schroot is
-# stored as a tarball, to preserve permissions/ownership.
cache: pip
install:
@@ -43,90 +22,6 @@ env:
matrix:
fast_finish: true
include:
- - name: "Integration Tests"
- if: NOT branch =~ /^ubuntu\//
- env: {}
- cache:
- - directories:
- - lxd_images
- - chroots
- before_cache:
- - |
- # Find the most recent image file
- latest_file="$(sudo ls -Art /var/snap/lxd/common/lxd/images/ | tail -n 1)"
- # This might be <hash>.rootfs or <hash>, normalise
- latest_file="$(basename $latest_file .rootfs)"
- # Find all files with that prefix and copy them to our cache dir
- sudo find /var/snap/lxd/common/lxd/images/ -name $latest_file* -print -exec cp {} "$TRAVIS_BUILD_DIR/lxd_images/" \;
- install:
- - git fetch --unshallow
- - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox debhelper wireguard
- - pip install .
- - pip install tox
- # bionic has lxd from deb installed, remove it first to ensure
- # pylxd talks only to the lxd from snap
- - sudo apt remove --purge lxd lxd-client
- - sudo rm -Rf /var/lib/lxd
- - sudo snap install lxd
- - sudo lxd init --auto
- - sudo mkdir --mode=1777 -p /var/snap/lxd/common/consoles
- # Move any cached lxd images into lxd's image dir
- - sudo find "$TRAVIS_BUILD_DIR/lxd_images/" -type f -print -exec mv {} /var/snap/lxd/common/lxd/images/ \;
- - sudo usermod -a -G lxd $USER
- - sudo sbuild-adduser $USER
- - cp /usr/share/doc/sbuild/examples/example.sbuildrc /home/$USER/.sbuildrc
- - echo "[lxd]" > /home/$USER/.config/pycloudlib.toml
- script:
- # Ubuntu LTS: Build
- - ./packages/bddeb -S -d --release bionic
- - |
- needs_caching=false
- if [ -e "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" ]; then
- # If we have a cached chroot, move it into place
- sudo mkdir -p /var/lib/schroot/chroots/bionic-amd64
- sudo tar --sparse --xattrs --preserve-permissions --numeric-owner -xf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64
- # Write its configuration
- cat > sbuild-bionic-amd64 << EOM
- [bionic-amd64]
- description=bionic-amd64
- groups=sbuild,root,admin
- root-groups=sbuild,root,admin
- # Uncomment these lines to allow members of these groups to access
- # the -source chroots directly (useful for automated updates, etc).
- #source-root-users=sbuild,root,admin
- #source-root-groups=sbuild,root,admin
- type=directory
- profile=sbuild
- union-type=overlay
- directory=/var/lib/schroot/chroots/bionic-amd64
- EOM
- sudo mv sbuild-bionic-amd64 /etc/schroot/chroot.d/
- sudo chown root /etc/schroot/chroot.d/sbuild-bionic-amd64
- # And ensure it's up-to-date.
- before_pkgs="$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)"
- sudo schroot -c source:bionic-amd64 -d / -- sh -c "apt-get update && apt-get -qqy upgrade"
- after_pkgs=$(sudo schroot -c source:bionic-amd64 -d / dpkg -l | sha256sum)
- if [ "$before_pkgs" != "$after_pkgs" ]; then
- needs_caching=true
- fi
- else
- # Otherwise, create the chroot
- sudo -E su $USER -c 'mk-sbuild bionic'
- needs_caching=true
- fi
- # If there are changes to the schroot (or it's entirely new),
- # tar up the schroot (to preserve ownership/permissions) and
- # move it into the cached dir; no need to compress it because
- # Travis will do that anyway
- if [ "$needs_caching" = "true" ]; then
- sudo tar --sparse --xattrs --xattrs-include=* -cf "$TRAVIS_BUILD_DIR/chroots/bionic-amd64.tar" -C /var/lib/schroot/chroots/bionic-amd64 .
- fi
- # Use sudo to get a new shell where we're in the sbuild group
- # Don't run integration tests when build fails
- - |
- sudo -E su $USER -c 'DEB_BUILD_OPTIONS=nocheck sbuild --nolog --no-run-lintian --no-run-autopkgtest --verbose --dist=bionic cloud-init_*.dsc' &&
- ssh-keygen -P "" -q -f ~/.ssh/id_rsa &&
- sg lxd -c 'CLOUD_INIT_CLOUD_INIT_SOURCE="$(ls *.deb)" tox -e integration-tests-ci'
- python: 3.6
env:
TOXENV=lowest-supported
diff --git a/README.md b/README.md
index feb896ac..87864679 100644
--- a/README.md
+++ b/README.md
@@ -43,7 +43,7 @@ get in contact with that distribution and send them our way!
## To start developing cloud-init
-Checkout the [contributing](https://cloudinit.readthedocs.io/en/latest/topics/contributing.html)
+Checkout the [contributing](https://cloudinit.readthedocs.io/en/latest/development/contributing.html)
document that outlines the steps necessary to develop, test, and submit code.
## Daily builds
diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py
index 65d3eece..5a61eac5 100755
--- a/cloudinit/cmd/clean.py
+++ b/cloudinit/cmd/clean.py
@@ -12,6 +12,7 @@ import os
import sys
from cloudinit import settings
+from cloudinit.distros import uses_systemd
from cloudinit.stages import Init
from cloudinit.subp import ProcessExecutionError, runparts, subp
from cloudinit.util import (
@@ -20,6 +21,7 @@ from cloudinit.util import (
error,
get_config_logfiles,
is_link,
+ write_file,
)
ETC_MACHINE_ID = "/etc/machine-id"
@@ -55,8 +57,9 @@ def get_parser(parser=None):
action="store_true",
default=False,
help=(
- "Remove /etc/machine-id for golden image creation."
- " Next boot generates a new machine-id."
+ "Set /etc/machine-id to 'uninitialized\n' for golden image"
+ "creation. On next boot, systemd generates a new machine-id."
+ " Remove /etc/machine-id on non-systemd environments."
),
)
parser.add_argument(
@@ -120,7 +123,12 @@ def handle_clean_args(name, args):
"""Handle calls to 'cloud-init clean' as a subcommand."""
exit_code = remove_artifacts(args.remove_logs, args.remove_seed)
if args.machine_id:
- del_file(ETC_MACHINE_ID)
+ if uses_systemd():
+ # Systemd v237 and later will create a new machine-id on next boot
+ write_file(ETC_MACHINE_ID, "uninitialized\n", mode=0o444)
+ else:
+ # Non-systemd like FreeBSD regen machine-id when file is absent
+ del_file(ETC_MACHINE_ID)
if exit_code == 0 and args.reboot:
cmd = ["shutdown", "-r", "now"]
try:
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index b8a9fe17..38c2cc99 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -25,6 +25,7 @@ from cloudinit.settings import PER_INSTANCE
AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501
PUPPET_AGENT_DEFAULT_ARGS = ["--test"]
+PUPPET_PACKAGE_NAMES = ("puppet-agent", "puppet")
MODULE_DESCRIPTION = """\
This module handles puppet installation and configuration. If the ``puppet``
@@ -118,26 +119,21 @@ class PuppetConstants:
self.csr_attributes_path = csr_attributes_path
-def _autostart_puppet(log):
- # Set puppet to automatically start
- if os.path.exists("/etc/default/puppet"):
- subp.subp(
- [
- "sed",
- "-i",
- "-e",
- "s/^START=.*/START=yes/",
- "/etc/default/puppet",
- ],
- capture=False,
- )
- elif subp.which("systemctl"):
- subp.subp(["systemctl", "enable", "puppet.service"], capture=False)
- elif os.path.exists("/sbin/chkconfig"):
- subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False)
- else:
+def _manage_puppet_services(log, cloud: Cloud, action: str):
+ """Attempts to perform action on one of the puppet services"""
+ service_managed: str = ""
+ for puppet_name in PUPPET_PACKAGE_NAMES:
+ try:
+ cloud.distro.manage_service(action, f"{puppet_name}.service")
+ service_managed = puppet_name
+ break
+ except subp.ProcessExecutionError:
+ pass
+ if not service_managed:
log.warning(
- "Sorry we do not know how to enable puppet services on this system"
+ "Could not '%s' any of the following services: %s",
+ action,
+ ", ".join(PUPPET_PACKAGE_NAMES),
)
@@ -221,7 +217,7 @@ def handle(
else: # default to 'packages'
puppet_user = "puppet"
puppet_bin = "puppet"
- puppet_package = "puppet"
+ puppet_package = None # changes with distro
package_name = util.get_cfg_option_str(
puppet_cfg, "package_name", puppet_package
@@ -238,7 +234,22 @@ def handle(
)
if install_type == "packages":
- cloud.distro.install_packages((package_name, version))
+ if package_name is None: # conf has no package_nam
+ for puppet_name in PUPPET_PACKAGE_NAMES:
+ try:
+ cloud.distro.install_packages((puppet_name, version))
+ package_name = puppet_name
+ break
+ except subp.ProcessExecutionError:
+ pass
+ if not package_name:
+ log.warning(
+ "No installable puppet package in any of: %s",
+ ", ".join(PUPPET_PACKAGE_NAMES),
+ )
+ else:
+ cloud.distro.install_packages((package_name, version))
+
elif install_type == "aio":
install_puppet_aio(
cloud.distro, aio_install_url, version, collection, cleanup
@@ -316,9 +327,9 @@ def handle(
yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False),
)
- # Set it up so it autostarts
if start_puppetd:
- _autostart_puppet(log)
+ # Enables the services
+ _manage_puppet_services(log, cloud, "enable")
# Run the agent if needed
if run:
@@ -344,7 +355,7 @@ def handle(
if start_puppetd:
# Start puppetd
- subp.subp(["service", "puppet", "start"], capture=False)
+ _manage_puppet_services(log, cloud, "start")
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 8dbed71e..ce19fff3 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -26,17 +26,23 @@ RESOLVE_CONFIG_TEMPLATE_MAP = {
}
MODULE_DESCRIPTION = """\
+Unless manually editing :file:`/etc/resolv.conf` is the correct way to manage
+nameserver information on your operating system, you do not want to use
+this module. Many distros have moved away from manually editing ``resolv.conf``
+so please verify that this is the preferred nameserver management method for
+your distro before using this module.
+
+Note that using :ref:`network_config` is preferred, rather than using this
+module, when possible.
+
This module is intended to manage resolv.conf in environments where early
configuration of resolv.conf is necessary for further bootstrapping and/or
where configuration management such as puppet or chef own DNS configuration.
-As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat
-will use sysconfig, this module is likely to be of little use unless those
-are configured correctly.
When using a :ref:`datasource_config_drive` and a RHEL-like system,
resolv.conf will also be managed automatically due to the available
information provided for DNS servers in the :ref:`network_config_v2` format.
-For those that with to have different settings, use this module.
+For those that wish to have different settings, use this module.
In order for the ``resolv_conf`` section to be applied, ``manage_resolv_conf``
must be set ``true``.
@@ -44,10 +50,6 @@ must be set ``true``.
.. note::
For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP
enabled NICs.
-
-.. note::
- And, in Ubuntu/Debian it is recommended that DNS be configured via the
- standard /etc/network/interfaces configuration file.
"""
meta: MetaSchema = {
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index c0bda6fe..fa5c023c 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -107,7 +107,7 @@ def handle(
# distro._read_hostname implementation so we only validate one artifact.
prev_fn = os.path.join(cloud.get_cpath("data"), "set-hostname")
prev_hostname = {}
- if os.path.exists(prev_fn):
+ if os.path.exists(prev_fn) and os.stat(prev_fn).st_size > 0:
prev_hostname = util.load_json(util.load_file(prev_fn))
hostname_changed = hostname != prev_hostname.get(
"hostname"
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 850fef86..3a0b3f5b 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -108,6 +108,18 @@ def get_users_by_type(users_list: list, pw_type: str) -> list:
)
+def _restart_ssh_daemon(distro, service):
+ try:
+ distro.manage_service("restart", service)
+ LOG.debug("Restarted the SSH daemon.")
+ except subp.ProcessExecutionError as e:
+ LOG.warning(
+ "'ssh_pwauth' configuration may not be applied. Cloud-init was "
+ "unable to restart SSH daemon due to error: '%s'",
+ e,
+ )
+
+
def handle_ssh_pwauth(pw_auth, distro: Distro):
"""Apply sshd PasswordAuthentication changes.
@@ -145,23 +157,19 @@ def handle_ssh_pwauth(pw_auth, distro: Distro):
if distro.uses_systemd():
state = subp.subp(
- f"systemctl show --property ActiveState --value {service}"
- ).stdout
+ [
+ "systemctl",
+ "show",
+ "--property",
+ "ActiveState",
+ "--value",
+ service,
+ ]
+ ).stdout.strip()
if state.lower() in ["active", "activating", "reloading"]:
- distro.manage_service("restart", service)
- LOG.debug("Restarted the SSH daemon.")
- else:
- LOG.debug("Not restarting SSH service: service is stopped.")
+ _restart_ssh_daemon(distro, service)
else:
- try:
- distro.manage_service("restart", service)
- LOG.debug("Restarted the SSH daemon.")
- except subp.ProcessExecutionError:
- util.logexc(
- LOG,
- "Cloud-init was unable to restart SSH daemon. "
- "'ssh_pwauth' configuration may not be applied.",
- )
+ _restart_ssh_daemon(distro, service)
def handle(
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index c9e59d16..c01dd48c 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -187,8 +187,8 @@ for k in GENERATE_KEY_NAMES:
CONFIG_KEY_TO_FILE.update(
{
f"{k}_private": (KEY_FILE_TPL % k, 0o600),
- f"{k}_public": (f"{KEY_FILE_TPL % k}.pub", 0o600),
- f"{k}_certificate": (f"{KEY_FILE_TPL % k}-cert.pub", 0o600),
+ f"{k}_public": (f"{KEY_FILE_TPL % k}.pub", 0o644),
+ f"{k}_certificate": (f"{KEY_FILE_TPL % k}-cert.pub", 0o644),
}
)
PRIV_TO_PUB[f"{k}_private"] = f"{k}_public"
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 320f4ba1..df7dc3d6 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -132,6 +132,7 @@ class Distro(distros.Distro):
return util.load_file(filename).strip()
elif self.uses_systemd():
(out, _err) = subp.subp(["hostname"])
+ out = out.strip()
if len(out):
return out
else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 0a41a2d4..50e445ec 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -1034,6 +1034,22 @@ def get_interfaces_by_mac_on_linux(blacklist_drivers=None) -> dict:
% (ret[mac], driver_map[mac], name)
)
+ # This is intended to be a short-term fix of LP: #1997922
+ # Long term, we should better handle configuration of virtual
+ # devices where duplicate MACs are expected early in boot if
+ # cloud-init happens to enumerate network interfaces before drivers
+ # have fully initialized the leader/subordinate relationships for
+ # those devices or switches.
+ if driver == "mscc_felix" or driver == "fsl_enetc":
+ LOG.debug(
+ "Ignoring duplicate macs from '%s' and '%s' due to "
+ "driver '%s'.",
+ name,
+ ret[mac],
+ driver,
+ )
+ continue
+
if raise_duplicate_mac_error:
raise RuntimeError(msg)
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 36cd582e..f88b1321 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -989,7 +989,7 @@ def _normalize_net_keys(network, address_keys=()):
@returns: A dict containing normalized prefix and matching addr_key.
"""
- net = dict((k, v) for k, v in network.items() if v)
+ net = {k: v for k, v in network.items() if v or v == 0}
addr_key = None
for key in address_keys:
if net.get(key):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 8ee1bea7..9dac4c6b 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -79,7 +79,10 @@ IMDS_RETRY_CODES = (
imds_readurl_exception_callback = functools.partial(
retry_on_url_exc,
retry_codes=IMDS_RETRY_CODES,
- retry_instances=(requests.Timeout,),
+ retry_instances=(
+ requests.ConnectionError,
+ requests.Timeout,
+ ),
)
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index a2c16991..9d7c84fb 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -70,10 +70,10 @@ class DataSourceVultr(sources.DataSource):
if "cloud_interfaces" in md:
# In the future we will just drop pre-configured
# network configs into the array. They need names though.
- self.netcfg = vultr.add_interface_names(md["cloud_interfaces"])
+ vultr.add_interface_names(md["cloud_interfaces"])
+ self.netcfg = md["cloud_interfaces"]
else:
self.netcfg = vultr.generate_network_config(md["interfaces"])
-
# Grab vendordata
self.vendordata_raw = md["vendor-data"]
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 85e094ac..12430401 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -435,12 +435,15 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
cloud_id = instance_data["v1"].get("cloud_id", "none")
cloud_id_file = os.path.join(self.paths.run_dir, "cloud-id")
util.write_file(f"{cloud_id_file}-{cloud_id}", f"{cloud_id}\n")
+ # cloud-id not found, then no previous cloud-id fle
+ prev_cloud_id_file = None
+ new_cloud_id_file = f"{cloud_id_file}-{cloud_id}"
+ # cloud-id found, then the prev cloud-id file is source of symlink
if os.path.exists(cloud_id_file):
prev_cloud_id_file = os.path.realpath(cloud_id_file)
- else:
- prev_cloud_id_file = cloud_id_file
- util.sym_link(f"{cloud_id_file}-{cloud_id}", cloud_id_file, force=True)
- if prev_cloud_id_file != cloud_id_file:
+
+ util.sym_link(new_cloud_id_file, cloud_id_file, force=True)
+ if prev_cloud_id_file and prev_cloud_id_file != new_cloud_id_file:
util.del_file(prev_cloud_id_file)
write_json(json_sensitive_file, processed_data, mode=0o600)
json_file = self.paths.get_runpath("instance_data")
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index 30c8cfee..836108d4 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -276,17 +276,17 @@ def generate_interface_additional_addresses(interface, netcfg):
# Make required adjustments to the network configs provided
-def add_interface_names(interfaces):
- for interface in interfaces:
- interface_name = get_interface_name(interface["mac"])
+def add_interface_names(netcfg):
+ for interface in netcfg["config"]:
+ if interface["type"] != "physical":
+ continue
+ interface_name = get_interface_name(interface["mac_address"])
if not interface_name:
raise RuntimeError(
"Interface: %s could not be found on the system"
- % interface["mac"]
+ % interface["mac_address"]
)
interface["name"] = interface_name
- return interfaces
-
# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 58b53d96..9494a0bf 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -853,6 +853,8 @@ class Init:
return (None, cfg_source)
if ncfg:
return (ncfg, cfg_source)
+ if not self.cfg.get("network", True):
+ LOG.warning("Empty network config found")
return (
self.distro.generate_fallback_config(),
NetworkConfigSource.FALLBACK,
diff --git a/doc/rtd/development/index.rst b/doc/rtd/development/index.rst
index 8c077d8e..9768fab5 100644
--- a/doc/rtd/development/index.rst
+++ b/doc/rtd/development/index.rst
@@ -26,8 +26,6 @@ Debugging and reporting
:maxdepth: 1
../howto/bugs.rst
- security.rst
- analyze.rst
logging.rst
debugging.rst
diff --git a/doc/rtd/development/analyze.rst b/doc/rtd/explanation/analyze.rst
index 93961928..93961928 100644
--- a/doc/rtd/development/analyze.rst
+++ b/doc/rtd/explanation/analyze.rst
diff --git a/doc/rtd/explanation/index.rst b/doc/rtd/explanation/index.rst
index ea015a40..c6114096 100644
--- a/doc/rtd/explanation/index.rst
+++ b/doc/rtd/explanation/index.rst
@@ -16,3 +16,5 @@ knowledge and become better at using and configuring ``cloud-init``.
events.rst
instancedata.rst
vendordata.rst
+ security.rst
+ analyze.rst
diff --git a/doc/rtd/development/security.rst b/doc/rtd/explanation/security.rst
index c1ffd9ce..c1ffd9ce 100644
--- a/doc/rtd/development/security.rst
+++ b/doc/rtd/explanation/security.rst
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 1d581129..81045051 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -37,11 +37,9 @@ that can be integrated with your cloud.
Step-by-step guides covering key operations and common tasks
- .. grid-item-card:: **Explanation**
- :link: explanation/index
- :link-type: doc
-
- Discussion and clarification of key topics
+.. grid:: 1 1 2 2
+ :gutter: 3
+ :reverse:
.. grid-item-card:: **Reference**
:link: reference/index
@@ -49,6 +47,12 @@ that can be integrated with your cloud.
Technical information - specifications, APIs, architecture
+ .. grid-item-card:: **Explanation**
+ :link: explanation/index
+ :link-type: doc
+
+ Discussion and clarification of key topics
+
-----
Having trouble? We would like to help!
diff --git a/doc/rtd/reference/cli.rst b/doc/rtd/reference/cli.rst
index a4b41315..246b9721 100644
--- a/doc/rtd/reference/cli.rst
+++ b/doc/rtd/reference/cli.rst
@@ -74,9 +74,11 @@ first boot.
* :command:`--logs`: Optionally remove all ``cloud-init`` log files in
:file:`/var/log/`.
* :command:`--reboot`: Reboot the system after removing artifacts.
-* :command:`--machine-id`: Remove :file:`/etc/machine-id` on this image. Best
- practice when cloning a golden image, to ensure that the next boot of that
- image auto-generates a unique machine ID. `More details on machine-id`_.
+* :command:`--machine-id`: Set :file:`/etc/machine-id` to ``uninitialized\n``
+ on this image for systemd environments. On distributions without systemd,
+ remove the file. Best practice when cloning a golden image, to ensure the
+ next boot of that image auto-generates a unique machine ID.
+ `More details on machine-id`_.
.. _cli_collect_logs:
diff --git a/doc/rtd/reference/datasources/cloudsigma.rst b/doc/rtd/reference/datasources/cloudsigma.rst
index 84c42706..50f255ef 100644
--- a/doc/rtd/reference/datasources/cloudsigma.rst
+++ b/doc/rtd/reference/datasources/cloudsigma.rst
@@ -21,8 +21,8 @@ Providing user data
You can provide user data to the VM using the dedicated `meta field`_ in the
`server context`_ ``cloudinit-user-data``. By default, *cloud-config* format
is expected there, and the ``#cloud-config`` header can be omitted. However,
-since this is a raw-text field you could provide any of the valid `config
-formats`_.
+since this is a raw-text field you could provide any of the valid :ref:`config
+formats<user_data_formats>`.
You have the option to encode your user data using Base64. In order to do that
you have to add the ``cloudinit-user-data`` field to the ``base64_fields``.
@@ -36,4 +36,3 @@ the value. If this field does not exist, the default value is "net".
.. _CloudSigma: http://cloudsigma.com/
.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
-.. _config formats: http://cloudinit.readthedocs.org/en/latest/topics/format.html
diff --git a/doc/rtd/reference/network-config.rst b/doc/rtd/reference/network-config.rst
index 5a2386d7..ea331f1c 100644
--- a/doc/rtd/reference/network-config.rst
+++ b/doc/rtd/reference/network-config.rst
@@ -126,6 +126,10 @@ The following datasources optionally provide network configuration:
- `DigitalOcean JSON metadata`_
+- :ref:`datasource_lxd`
+
+ - `LXD`_
+
- :ref:`datasource_nocloud`
- :ref:`network_config_v1`
@@ -246,39 +250,46 @@ supplying an updated configuration in cloud-config. ::
Network configuration tools
===========================
-``Cloud-init`` contains one tool used to test input/output conversion between
+``Cloud-init`` contains a command used to test input/output conversion between
formats. The :file:`tools/net-convert.py` in the ``cloud-init`` source
-repository is helpful in examining expected output for a given input format.
+repository is helpful in examining expected output for a given input
+format. If running these commands from the cloud-init source directory,
+make sure to set the correct path ``PYTHON_PATH=.``
CLI Interface:
.. code-block:: shell-session
- $ tools/net-convert.py --help
+ $ cloud-init devel net-convert --help
Example output:
.. code-block::
- usage: net-convert.py [-h] --network-data PATH --kind
- {eni,network_data.json,yaml} -d PATH [-m name,mac]
- --output-kind {eni,netplan,sysconfig}
+ usage: /usr/bin/cloud-init devel net-convert [-h] -p PATH -k {eni,network_data.json,yaml,azure-imds,vmware-imc} -d PATH -D
+ {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler}
+ [-m name,mac] [--debug] -O {eni,netplan,networkd,sysconfig,network-manager}
- optional arguments:
+ options:
-h, --help show this help message and exit
- --network-data PATH, -p PATH
- --kind {eni,network_data.json,yaml}, -k {eni,network_data.json,yaml}
+ -p PATH, --network-data PATH
+ The network configuration to read
+ -k {eni,network_data.json,yaml,azure-imds,vmware-imc}, --kind {eni,network_data.json,yaml,azure-imds,vmware-imc}
+ The format of the given network config
-d PATH, --directory PATH
directory to place output in
+ -D {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler}, --distro {alpine,arch,debian,ubuntu,freebsd,dragonfly,gentoo,cos,netbsd,openbsd,almalinux,amazon,centos,cloudlinux,eurolinux,fedora,mariner,miraclelinux,openmandriva,photon,rhel,rocky,virtuozzo,opensuse,sles,openEuler}
-m name,mac, --mac name,mac
interface name to mac mapping
- --output-kind {eni,netplan,sysconfig}, -ok {eni,netplan,sysconfig}
+ --debug enable debug logging to stderr.
+ -O {eni,netplan,networkd,sysconfig,network-manager}, --output-kind {eni,netplan,networkd,sysconfig,network-manager}
+ The network config format to emit
Example of converting V2 to sysconfig:
.. code-block:: shell-session
- $ tools/net-convert.py --network-data v2.yaml --kind yaml \
+ $ cloud-init devel net-convert --network-data v2.yaml --kind yaml \
--output-kind sysconfig -d target
$ cat target/etc/sysconfig/network-scripts/ifcfg-eth*
@@ -306,6 +317,7 @@ Example output:
.. _Cloud-init: https://launchpad.net/cloud-init
+.. _LXD: https://linuxcontainers.org/lxd/docs/master/cloud-init/#custom-network-configuration
.. _NetworkManager: https://networkmanager.dev
.. _Netplan: https://netplan.io/
.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/
diff --git a/integration-requirements.txt b/integration-requirements.txt
index e539d4ac..99fc06e9 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@fe7facd3676b6f125bd7ab7e2141a48c714d77a8
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@057e4848ab022330052c63d4082a8cd98f69b45e
pytest
diff --git a/packages/bddeb b/packages/bddeb
index fdb541d4..44d82a78 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -100,8 +100,10 @@ def write_debian_folder(root, templ_data, cloud_util_deps):
requires.extend(['python3'] + reqs + test_reqs)
if templ_data['debian_release'] == 'xenial':
requires.append('python3-pytest-catchlog')
- elif templ_data['debian_release'] == 'impish':
- requires.remove('dh-systemd')
+ elif templ_data['debian_release'] in (
+ 'buster', 'xenial', 'bionic', 'focal'
+ ):
+ requires.append('dh-systemd')
templater.render_to_file(util.abs_join(find_root(),
'packages', 'debian', 'control.in'),
util.abs_join(deb_dir, 'control'),
diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json
index 8ba27e85..64c299a4 100644
--- a/packages/pkg-deps.json
+++ b/packages/pkg-deps.json
@@ -3,7 +3,6 @@
"build-requires" : [
"debhelper",
"dh-python",
- "dh-systemd",
"python3-debconf"
],
"renames" : {
diff --git a/pyproject.toml b/pyproject.toml
index 1a3fc176..88b350b0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,7 +21,6 @@ module = [
"configobj",
"debconf",
"httplib",
- "httpretty",
"jsonpatch",
"netifaces",
"paramiko.*",
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index c86aa4fc..ac589821 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -13,7 +13,8 @@ After=systemd-networkd-wait-online.service
After=networking.service
{% endif %}
{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora",
- "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky", "virtuozzo"] %}
+ "miraclelinux", "openEuler", "openmandriva", "rhel", "rocky",
+ "suse", "virtuozzo"] %}
After=network.service
After=NetworkManager.service
After=NetworkManager-wait-online.service
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index c4dd4eec..308ffedd 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -13,6 +13,7 @@ from uuid import UUID
from pycloudlib import (
EC2,
GCE,
+ IBM,
OCI,
Azure,
LXDContainer,
@@ -408,3 +409,14 @@ class OpenstackCloud(IntegrationCloud):
"OS image id: {}".format(image.image_id)
) from e
return image.image_id
+
+
+class IbmCloud(IntegrationCloud):
+ datasource = "ibm"
+ cloud_instance: IBM
+
+ def _get_cloud_instance(self) -> IBM:
+ # Note: IBM image names starting with `ibm` are reserved.
+ return IBM(
+ tag="integration-test-ibm",
+ )
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 6157bad8..782ca7e5 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -17,6 +17,7 @@ from tests.integration_tests.clouds import (
AzureCloud,
Ec2Cloud,
GceCloud,
+ IbmCloud,
ImageSpecification,
IntegrationCloud,
LxdContainerCloud,
@@ -39,6 +40,7 @@ platforms: Dict[str, Type[IntegrationCloud]] = {
"gce": GceCloud,
"azure": AzureCloud,
"oci": OciCloud,
+ "ibm": IbmCloud,
"lxd_container": LxdContainerCloud,
"lxd_vm": LxdVmCloud,
"openstack": OpenstackCloud,
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index abc70fe4..c4f28fcb 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -21,6 +21,7 @@ RUN_UNSTABLE = False
# azure
# ec2
# gce
+# ibm
# oci
# openstack
PLATFORM = "lxd_container"
diff --git a/tests/integration_tests/modules/test_ansible.py b/tests/integration_tests/modules/test_ansible.py
index 587385d4..3d0f96cb 100644
--- a/tests/integration_tests/modules/test_ansible.py
+++ b/tests/integration_tests/modules/test_ansible.py
@@ -301,6 +301,9 @@ def test_ansible_pull_distro(client):
@pytest.mark.user_data(ANSIBLE_CONTROL)
@pytest.mark.lxd_vm
+# Not bionic because test uses pip install and version in pip is removing
+# support for python version in bionic
+@pytest.mark.not_bionic
def test_ansible_controller(client):
log = client.read_from_file("/var/log/cloud-init.log")
verify_clean_log(log)
diff --git a/tests/integration_tests/modules/test_combined.py b/tests/integration_tests/modules/test_combined.py
index 3c1013eb..647e8728 100644
--- a/tests/integration_tests/modules/test_combined.py
+++ b/tests/integration_tests/modules/test_combined.py
@@ -424,6 +424,23 @@ class TestCombined:
assert v1_data["instance_id"] == client.instance.instance_id
assert v1_data["local_hostname"] == client.instance.name
+ @pytest.mark.lxd_container
+ @pytest.mark.azure
+ @pytest.mark.gce
+ @pytest.mark.ec2
+ def test_instance_cloud_id_across_reboot(
+ self, class_client: IntegrationInstance
+ ):
+ client = class_client
+ platform = client.settings.PLATFORM
+ cloud_id_alias = {"ec2": "aws", "lxd_container": "lxd"}
+ cloud_file = f"cloud-id-{cloud_id_alias.get(platform, platform)}"
+ assert client.execute(f"test -f /run/cloud-init/{cloud_file}").ok
+ assert client.execute("test -f /run/cloud-init/cloud-id").ok
+ client.restart()
+ assert client.execute(f"test -f /run/cloud-init/{cloud_file}").ok
+ assert client.execute("test -f /run/cloud-init/cloud-id").ok
+
@pytest.mark.user_data(USER_DATA)
class TestCombinedNoCI:
diff --git a/tests/integration_tests/modules/test_puppet.py b/tests/integration_tests/modules/test_puppet.py
index 1bd9cee4..b8613866 100644
--- a/tests/integration_tests/modules/test_puppet.py
+++ b/tests/integration_tests/modules/test_puppet.py
@@ -17,7 +17,12 @@ def test_puppet_service(client: IntegrationInstance):
"""Basic test that puppet gets installed and runs."""
log = client.read_from_file("/var/log/cloud-init.log")
verify_clean_log(log)
- assert client.execute("systemctl is-active puppet").ok
+ puppet_ok = client.execute("systemctl is-active puppet.service").ok
+ puppet_agent_ok = client.execute(
+ "systemctl is-active puppet-agent.service"
+ ).ok
+ assert True in [puppet_ok, puppet_agent_ok]
+ assert False in [puppet_ok, puppet_agent_ok]
assert "Running command ['puppet', 'agent'" not in log
diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py
index 4e0ee122..765dd30c 100644
--- a/tests/integration_tests/modules/test_set_password.py
+++ b/tests/integration_tests/modules/test_set_password.py
@@ -191,6 +191,15 @@ class Mixin:
# We look for the exact line match, to avoid a commented line matching
assert "PasswordAuthentication yes" in sshd_config.splitlines()
+ @pytest.mark.ubuntu
+ def test_check_ssh_service(self, class_client):
+ """Ensure we check the sshd status because we modified the config"""
+ log = class_client.read_from_file("/var/log/cloud-init.log")
+ assert (
+ "'systemctl', 'show', '--property', 'ActiveState', "
+ "'--value', 'ssh'" in log
+ )
+
def test_sshd_config(self, class_client):
"""Test that SSH password auth is enabled."""
sshd_config = class_client.execute("sshd -T").stdout
diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py
index 232cc731..c5385b79 100644
--- a/tests/unittests/cmd/test_clean.py
+++ b/tests/unittests/cmd/test_clean.py
@@ -238,12 +238,22 @@ class TestClean:
assert 0 == retcode
assert [(["shutdown", "-r", "now"], False)] == called_cmds
- @pytest.mark.parametrize("machine_id", (True, False))
+ @pytest.mark.parametrize(
+ "machine_id,systemd_val",
+ (
+ pytest.param(True, True, id="machine_id_on_systemd_uninitialized"),
+ pytest.param(
+ True, False, id="machine_id_non_systemd_removes_file"
+ ),
+ pytest.param(False, False, id="no_machine_id_param_file_remains"),
+ ),
+ )
+ @mock.patch("cloudinit.cmd.clean.uses_systemd")
def test_handle_clean_args_removed_machine_id(
- self, machine_id, clean_paths, init_class
+ self, uses_systemd, machine_id, systemd_val, clean_paths, init_class
):
"""handle_clean_args removes /etc/machine-id when arg is True."""
-
+ uses_systemd.return_value = systemd_val
myargs = namedtuple(
"MyArgs", "remove_logs remove_seed reboot machine_id"
)
@@ -271,7 +281,13 @@ class TestClean:
args=cmdargs,
)
assert 0 == retcode
- assert machine_id_path.exists() is bool(not machine_id)
+ if systemd_val:
+ if machine_id:
+ assert "uninitialized\n" == machine_id_path.read()
+ else:
+ assert "SOME-AMAZN-MACHINE-ID" == machine_id_path.read()
+ else:
+ assert machine_id_path.exists() is bool(not machine_id)
def test_status_main(self, clean_paths, init_class):
"""clean.main can be run as a standalone script."""
diff --git a/tests/unittests/config/test_cc_puppet.py b/tests/unittests/config/test_cc_puppet.py
index 27a49722..23461c2b 100644
--- a/tests/unittests/config/test_cc_puppet.py
+++ b/tests/unittests/config/test_cc_puppet.py
@@ -12,6 +12,7 @@ from cloudinit.config.schema import (
get_schema,
validate_cloudconfig_schema,
)
+from cloudinit.subp import ProcessExecutionError
from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema
from tests.unittests.util import get_cloud
@@ -25,67 +26,55 @@ def fake_tempdir(mocker, tmpdir):
).return_value.__enter__.return_value = str(tmpdir)
-@mock.patch("cloudinit.config.cc_puppet.subp.which")
@mock.patch("cloudinit.config.cc_puppet.subp.subp")
-@mock.patch("cloudinit.config.cc_puppet.os")
-class TestAutostartPuppet(CiTestCase):
- def test_wb_autostart_puppet_updates_puppet_default(
- self, m_os, m_subp, m_subpw
- ):
- """Update /etc/default/puppet to autostart if it exists."""
-
- def _fake_exists(path):
- return path == "/etc/default/puppet"
-
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
- self.assertEqual(
- [
- mock.call(
- [
- "sed",
- "-i",
- "-e",
- "s/^START=.*/START=yes/",
- "/etc/default/puppet",
- ],
- capture=False,
- )
- ],
- m_subp.call_args_list,
- )
+class TestManagePuppetServices(CiTestCase):
+ def setUp(self):
+ super(TestManagePuppetServices, self).setUp()
+ self.cloud = get_cloud()
- def test_wb_autostart_pupppet_enables_puppet_systemctl(
- self, m_os, m_subp, m_subpw
+ def test_wb_manage_puppet_services_enables_puppet_systemctl(
+ self,
+ m_subp,
):
- """If systemctl is present, enable puppet via systemctl."""
-
- m_os.path.exists.return_value = False
- m_subpw.return_value = "/usr/bin/systemctl"
- cc_puppet._autostart_puppet(LOG)
+ cc_puppet._manage_puppet_services(LOG, self.cloud, "enable")
expected_calls = [
- mock.call(["systemctl", "enable", "puppet.service"], capture=False)
+ mock.call(
+ ["systemctl", "enable", "puppet-agent.service"],
+ capture=True,
+ )
]
- self.assertEqual(expected_calls, m_subp.call_args_list)
+ self.assertIn(expected_calls, m_subp.call_args_list)
- def test_wb_autostart_pupppet_enables_puppet_chkconfig(
- self, m_os, m_subp, m_subpw
+ def test_wb_manage_puppet_services_starts_puppet_systemctl(
+ self,
+ m_subp,
):
- """If chkconfig is present, enable puppet via checkcfg."""
-
- def _fake_exists(path):
- return path == "/sbin/chkconfig"
+ cc_puppet._manage_puppet_services(LOG, self.cloud, "start")
+ expected_calls = [
+ mock.call(
+ ["systemctl", "start", "puppet-agent.service"],
+ capture=True,
+ )
+ ]
+ self.assertIn(expected_calls, m_subp.call_args_list)
- m_subpw.return_value = None
- m_os.path.exists.side_effect = _fake_exists
- cc_puppet._autostart_puppet(LOG)
+ def test_enable_fallback_on_failure(self, m_subp):
+ m_subp.side_effect = (ProcessExecutionError, 0)
+ cc_puppet._manage_puppet_services(LOG, self.cloud, "enable")
expected_calls = [
- mock.call(["/sbin/chkconfig", "puppet", "on"], capture=False)
+ mock.call(
+ ["systemctl", "enable", "puppet-agent.service"],
+ capture=True,
+ ),
+ mock.call(
+ ["systemctl", "enable", "puppet.service"],
+ capture=True,
+ ),
]
self.assertEqual(expected_calls, m_subp.call_args_list)
-@mock.patch("cloudinit.config.cc_puppet._autostart_puppet")
+@mock.patch("cloudinit.config.cc_puppet._manage_puppet_services")
class TestPuppetHandle(CiTestCase):
with_logs = True
@@ -97,35 +86,36 @@ class TestPuppetHandle(CiTestCase):
self.csr_attributes_path = self.tmp_path("csr_attributes.yaml")
self.cloud = get_cloud()
- def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto):
+ def test_skips_missing_puppet_key_in_cloudconfig(self, m_man_puppet):
"""Cloud-config containing no 'puppet' key is skipped."""
cfg = {}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
self.assertIn("no 'puppet' configuration found", self.logs.getvalue())
- self.assertEqual(0, m_auto.call_count)
+ self.assertEqual(0, m_man_puppet.call_count)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_config_starts_puppet_service(self, m_subp, m_auto):
+ def test_puppet_config_starts_puppet_service(self, m_subp, m_man_puppet):
"""Cloud-config 'puppet' configuration starts puppet."""
cfg = {"puppet": {"install": False}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
- self.assertIn(
- [mock.call(["service", "puppet", "start"], capture=False)],
- m_subp.call_args_list,
- )
+ self.assertEqual(2, m_man_puppet.call_count)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto):
+ def test_empty_puppet_config_installs_puppet(self, m_subp, m_man_puppet):
"""Cloud-config empty 'puppet' configuration installs latest puppet."""
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
self.assertEqual(
- [mock.call(("puppet", None))],
+ [mock.call(("puppet-agent", None))],
self.cloud.distro.install_packages.call_args_list,
)
@@ -136,8 +126,8 @@ class TestPuppetHandle(CiTestCase):
self.cloud.distro = mock.MagicMock()
cfg = {"puppet": {"install": True}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(
- [mock.call(("puppet", None))],
+ self.assertIn(
+ [mock.call(("puppet-agent", None))],
self.cloud.distro.install_packages.call_args_list,
)
@@ -246,14 +236,14 @@ class TestPuppetHandle(CiTestCase):
cfg = {"puppet": {"version": "3.8"}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
self.assertEqual(
- [mock.call(("puppet", "3.8"))],
+ [mock.call(("puppet-agent", "3.8"))],
self.cloud.distro.install_packages.call_args_list,
)
@mock.patch("cloudinit.config.cc_puppet.get_config_value")
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_config_updates_puppet_conf(
- self, m_subp, m_default, m_auto
+ self, m_subp, m_default, m_man_puppet
):
"""When 'conf' is provided update values in PUPPET_CONF_PATH."""
@@ -277,7 +267,7 @@ class TestPuppetHandle(CiTestCase):
@mock.patch("cloudinit.config.cc_puppet.get_config_value")
@mock.patch("cloudinit.config.cc_puppet.subp.subp")
def test_puppet_writes_csr_attributes_file(
- self, m_subp, m_default, m_auto
+ self, m_subp, m_default, m_man_puppet
):
"""When csr_attributes is provided
creates file in PUPPET_CSR_ATTRIBUTES_PATH."""
@@ -321,44 +311,50 @@ class TestPuppetHandle(CiTestCase):
self.assertEqual(expected, content)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto):
+ def test_puppet_runs_puppet_if_requested(self, m_subp, m_man_puppet):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {"exec": True}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
+ self.assertEqual(2, m_man_puppet.call_count)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
self.assertIn(
[mock.call(["puppet", "agent", "--test"], capture=False)],
m_subp.call_args_list,
)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_starts_puppetd(self, m_subp, m_auto):
+ def test_puppet_starts_puppetd(self, m_subp, m_man_puppet):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
- self.assertIn(
- [mock.call(["service", "puppet", "start"], capture=False)],
- m_subp.call_args_list,
- )
+ self.assertEqual(2, m_man_puppet.call_count)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
- def test_puppet_skips_puppetd(self, m_subp, m_auto):
+ def test_puppet_skips_puppetd(self, m_subp, m_man_puppet):
"""Run puppet with default args if 'exec' is set to True."""
cfg = {"puppet": {"start_service": False}}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(0, m_auto.call_count)
+ self.assertEqual(0, m_man_puppet.call_count)
self.assertNotIn(
- [mock.call(["service", "puppet", "start"], capture=False)],
+ [mock.call(["systemctl", "start", "puppet-agent"], capture=False)],
m_subp.call_args_list,
)
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_runs_puppet_with_args_list_if_requested(
- self, m_subp, m_auto
+ self, m_subp, m_man_puppet
):
"""Run puppet with 'exec_args' list if 'exec' is set to True."""
@@ -369,7 +365,7 @@ class TestPuppetHandle(CiTestCase):
}
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
+ self.assertEqual(2, m_man_puppet.call_count)
self.assertIn(
[
mock.call(
@@ -382,7 +378,7 @@ class TestPuppetHandle(CiTestCase):
@mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
def test_puppet_runs_puppet_with_args_string_if_requested(
- self, m_subp, m_auto
+ self, m_subp, m_man_puppet
):
"""Run puppet with 'exec_args' string if 'exec' is set to True."""
@@ -393,7 +389,7 @@ class TestPuppetHandle(CiTestCase):
}
}
cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
- self.assertEqual(1, m_auto.call_count)
+ self.assertEqual(2, m_man_puppet.call_count)
self.assertIn(
[
mock.call(
@@ -404,6 +400,48 @@ class TestPuppetHandle(CiTestCase):
m_subp.call_args_list,
)
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_falls_back_to_older_name(self, m_subp, m_man_puppet):
+ cfg = {"puppet": {}}
+ with mock.patch(
+ "tests.unittests.util.MockDistro.install_packages"
+ ) as install_pkg:
+ # puppet-agent not installed, but puppet is
+ install_pkg.side_effect = (ProcessExecutionError, 0)
+
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ expected_calls = [
+ mock.call(LOG, self.cloud, "enable"),
+ mock.call(LOG, self.cloud, "start"),
+ ]
+ self.assertEqual(expected_calls, m_man_puppet.call_args_list)
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_with_conf_package_name_fails(self, m_subp, m_man_puppet):
+ cfg = {"puppet": {"package_name": "puppet"}}
+ with mock.patch(
+ "tests.unittests.util.MockDistro.install_packages"
+ ) as install_pkg:
+ # puppet-agent not installed, but puppet is
+ install_pkg.side_effect = (ProcessExecutionError, 0)
+ with pytest.raises(ProcessExecutionError):
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(0, m_man_puppet.call_count)
+ self.assertNotIn(
+ [
+ mock.call(
+ ["systemctl", "start", "puppet-agent"], capture=True
+ )
+ ],
+ m_subp.call_args_list,
+ )
+
+ @mock.patch("cloudinit.config.cc_puppet.subp.subp", return_value=("", ""))
+ def test_puppet_with_conf_package_name_success(self, m_subp, m_man_puppet):
+ cfg = {"puppet": {"package_name": "puppet"}}
+ cc_puppet.handle("notimportant", cfg, self.cloud, LOG, None)
+ self.assertEqual(2, m_man_puppet.call_count)
+
URL_MOCK = mock.Mock()
URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"'
diff --git a/tests/unittests/config/test_cc_set_hostname.py b/tests/unittests/config/test_cc_set_hostname.py
index 3d1d86ee..2c92949f 100644
--- a/tests/unittests/config/test_cc_set_hostname.py
+++ b/tests/unittests/config/test_cc_set_hostname.py
@@ -5,6 +5,7 @@ import os
import shutil
import tempfile
from io import BytesIO
+from pathlib import Path
from unittest import mock
from configobj import ConfigObj
@@ -242,5 +243,21 @@ class TestHostname(t_help.FilesystemMockingTestCase):
str(ctx_mgr.exception),
)
+ def test_ignore_empty_previous_artifact_file(self):
+ cfg = {
+ "hostname": "blah",
+ "fqdn": "blah.blah.blah.yahoo.com",
+ }
+ distro = self._fetch_distro("debian")
+ paths = helpers.Paths({"cloud_dir": self.tmp})
+ ds = None
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ prev_fn = Path(cc.get_cpath("data")) / "set-hostname"
+ prev_fn.touch()
+ cc_set_hostname.handle("cc_set_hostname", cfg, cc, LOG, [])
+ contents = util.load_file("/etc/hostname")
+ self.assertEqual("blah", contents.strip())
+
# vi: ts=4 expandtab
diff --git a/tests/unittests/config/test_cc_set_passwords.py b/tests/unittests/config/test_cc_set_passwords.py
index d79f9659..0553f781 100644
--- a/tests/unittests/config/test_cc_set_passwords.py
+++ b/tests/unittests/config/test_cc_set_passwords.py
@@ -18,7 +18,7 @@ from tests.unittests.util import get_cloud
MODPATH = "cloudinit.config.cc_set_passwords."
LOG = logging.getLogger(__name__)
SYSTEMD_CHECK_CALL = mock.call(
- "systemctl show --property ActiveState --value ssh"
+ ["systemctl", "show", "--property", "ActiveState", "--value", "ssh"]
)
SYSTEMD_RESTART_CALL = mock.call(["systemctl", "restart", "ssh"], capture=True)
SERVICE_RESTART_CALL = mock.call(["service", "ssh", "restart"], capture=True)
diff --git a/tests/unittests/config/test_cc_ssh.py b/tests/unittests/config/test_cc_ssh.py
index 8f2ca8bf..cc4032de 100644
--- a/tests/unittests/config/test_cc_ssh.py
+++ b/tests/unittests/config/test_cc_ssh.py
@@ -330,17 +330,17 @@ class TestHandleSsh:
mock.call(
"/etc/ssh/ssh_host_{}_key".format(key_type),
private_value,
- 384,
+ 0o600,
),
mock.call(
"/etc/ssh/ssh_host_{}_key.pub".format(key_type),
public_value,
- 384,
+ 0o644,
),
mock.call(
"/etc/ssh/ssh_host_{}_key-cert.pub".format(key_type),
cert_value,
- 384,
+ 0o644,
),
mock.call(
sshd_conf_fname,
diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py
index a877ce33..6f98cb27 100644
--- a/tests/unittests/sources/test_azure.py
+++ b/tests/unittests/sources/test_azure.py
@@ -3966,6 +3966,7 @@ class TestIMDS:
fake_http_error_for_code(410),
fake_http_error_for_code(429),
fake_http_error_for_code(500),
+ requests.ConnectionError("Fake connection error"),
requests.Timeout("Fake connection timeout"),
],
)
@@ -4007,6 +4008,7 @@ class TestIMDS:
fake_http_error_for_code(410),
fake_http_error_for_code(429),
fake_http_error_for_code(500),
+ requests.ConnectionError("Fake connection error"),
requests.Timeout("Fake connection timeout"),
],
)
@@ -4049,7 +4051,6 @@ class TestIMDS:
[
fake_http_error_for_code(403),
fake_http_error_for_code(501),
- requests.ConnectionError("Fake Network Unreachable"),
],
)
def test_will_not_retry_errors(
diff --git a/tests/unittests/sources/test_init.py b/tests/unittests/sources/test_init.py
index a81c33a2..0447e02c 100644
--- a/tests/unittests/sources/test_init.py
+++ b/tests/unittests/sources/test_init.py
@@ -716,9 +716,13 @@ class TestDataSource(CiTestCase):
"cloudinit.sources.canonical_cloud_id", return_value="my-cloud"
):
datasource.get_data()
- self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
- # A symlink with the generic /run/cloud-init/cloud-id link is present
- self.assertTrue(util.is_link(cloud_id_link))
+ self.assertEqual("my-cloud\n", util.load_file(cloud_id_link))
+ # A symlink with the generic /run/cloud-init/cloud-id
+ # link is present
+ self.assertTrue(util.is_link(cloud_id_link))
+ datasource.persist_instance_data()
+ # cloud-id<cloud-type> not deleted: no cloud-id change
+ self.assertTrue(os.path.exists(cloud_id_file))
# When cloud-id changes, symlink and content change
with mock.patch(
"cloudinit.sources.canonical_cloud_id", return_value="my-cloud2"
diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py
index cfd2f376..488df4f3 100644
--- a/tests/unittests/sources/test_vultr.py
+++ b/tests/unittests/sources/test_vultr.py
@@ -144,8 +144,46 @@ VULTR_V1_2 = {
],
}
+VULTR_V1_3 = None
+
SSH_KEYS_1 = ["ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key"]
+CLOUD_INTERFACES = {
+ "version": 1,
+ "config": [
+ {
+ "type": "nameserver",
+ "address": ["108.61.10.10", "2001:19f0:300:1704::6"],
+ },
+ {
+ "type": "physical",
+ "mac_address": "56:00:03:1b:4e:ca",
+ "accept-ra": 1,
+ "subnets": [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ {
+ "type": "static6",
+ "control": "auto",
+ "address": "2002:19f0:5:28a7::/64",
+ },
+ ],
+ },
+ {
+ "type": "physical",
+ "mac_address": "5a:00:03:1b:4e:ca",
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": "10.1.112.3",
+ "netmask": "255.255.240.0",
+ }
+ ],
+ },
+ ],
+}
+
INTERFACES = ["lo", "dummy0", "eth1", "eth0", "eth2"]
ORDERED_INTERFACES = ["eth0", "eth1", "eth2"]
@@ -246,8 +284,14 @@ def check_route(url):
class TestDataSourceVultr(CiTestCase):
def setUp(self):
+ global VULTR_V1_3
super(TestDataSourceVultr, self).setUp()
+ # Create v3
+ VULTR_V1_3 = VULTR_V1_2.copy()
+ VULTR_V1_3["cloud_interfaces"] = CLOUD_INTERFACES.copy()
+ VULTR_V1_3["interfaces"] = []
+
# Stored as a dict to make it easier to maintain
raw1 = json.dumps(VULTR_V1_1["vendor-data"][0])
raw2 = json.dumps(VULTR_V1_2["vendor-data"][0])
@@ -255,6 +299,7 @@ class TestDataSourceVultr(CiTestCase):
# Make expected format
VULTR_V1_1["vendor-data"] = [raw1]
VULTR_V1_2["vendor-data"] = [raw2]
+ VULTR_V1_3["vendor-data"] = [raw2]
self.tmp = self.tmp_dir()
@@ -302,6 +347,28 @@ class TestDataSourceVultr(CiTestCase):
# Test network config generation
self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+ # Test the datasource with new network config type
+ @mock.patch("cloudinit.net.get_interfaces_by_mac")
+ @mock.patch("cloudinit.sources.helpers.vultr.is_vultr")
+ @mock.patch("cloudinit.sources.helpers.vultr.get_metadata")
+ def test_datasource_cloud_interfaces(
+ self, mock_getmeta, mock_isvultr, mock_netmap
+ ):
+ mock_getmeta.return_value = VULTR_V1_3
+ mock_isvultr.return_value = True
+ mock_netmap.return_value = INTERFACE_MAP
+
+ distro = mock.MagicMock()
+ distro.get_tmp_exec_path = self.tmp_dir
+ source = DataSourceVultr.DataSourceVultr(
+ settings.CFG_BUILTIN, distro, helpers.Paths({"run_dir": self.tmp})
+ )
+
+ source._get_data()
+
+ # Test network config generation
+ self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config)
+
# Test network config generation
@mock.patch("cloudinit.net.get_interfaces_by_mac")
def test_network_config(self, mock_netmap):
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 7ed041d0..056aaeb6 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -34,6 +34,7 @@ from tests.unittests.helpers import (
CiTestCase,
FilesystemMockingTestCase,
dir2dict,
+ does_not_raise,
mock,
populate_dir,
)
@@ -5225,7 +5226,7 @@ USERCTL=no
# Created by cloud-init on instance boot automatically, do not edit.
#
2a00:1730:fff9:100::1/128 via ::0 dev eth0
- ::0/64 via 2a00:1730:fff9:100::1 dev eth0
+ ::0/0 via 2a00:1730:fff9:100::1 dev eth0
""" # noqa: E501
),
}
@@ -8008,6 +8009,7 @@ class TestInterfaceHasOwnMac(CiTestCase):
mock.Mock(return_value=False),
)
class TestGetInterfacesByMac(CiTestCase):
+ with_logs = True
_data = {
"bonds": ["bond1"],
"bridges": ["bridge1"],
@@ -8220,6 +8222,24 @@ class TestGetInterfacesByMac(CiTestCase):
}
self.assertEqual(expected, result)
+ def test_duplicate_ignored_macs(self):
+ # LP: #199792
+ self._data = copy.deepcopy(self._data)
+ self._data["macs"]["swp0"] = "9a:57:7d:78:47:c0"
+ self._data["macs"]["swp1"] = "9a:57:7d:78:47:c0"
+ self._data["own_macs"].append("swp0")
+ self._data["own_macs"].append("swp1")
+ self._data["drivers"]["swp0"] = "mscc_felix"
+ self._data["drivers"]["swp1"] = "mscc_felix"
+ self._mock_setup()
+ with does_not_raise():
+ net.get_interfaces_by_mac()
+ pattern = (
+ "Ignoring duplicate macs from 'swp[0-1]' and 'swp[0-1]' due to "
+ "driver 'mscc_felix'."
+ )
+ assert re.search(pattern, self.logs.getvalue())
+
class TestInterfacesSorting(CiTestCase):
def test_natural_order(self):
diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py
index 7fde2bac..15a7e973 100644
--- a/tests/unittests/test_stages.py
+++ b/tests/unittests/test_stages.py
@@ -356,6 +356,26 @@ class TestInit:
) == self.init._find_networking_config()
assert "network config disabled" not in caplog.text
+ @mock.patch(M_PATH + "cmdline.read_initramfs_config", return_value={})
+ @mock.patch(M_PATH + "cmdline.read_kernel_cmdline_config", return_value={})
+ def test_warn_on_empty_network(self, m_cmdline, m_initramfs, caplog):
+ """funky whitespace can lead to a network key that is None, which then
+ causes fallback. Test warning log on empty network key.
+ """
+ m_cmdline.return_value = {} # Kernel doesn't disable networking
+ m_initramfs.return_value = {} # no initramfs network config
+ # Neither datasource nor system_info disable or provide network
+ self.init._cfg = {
+ "system_info": {"paths": {"cloud_dir": self.tmpdir}},
+ "network": None,
+ }
+ self.init.datasource = FakeDataSource(network_config={"network": None})
+
+ self.init.distro.generate_fallback_config = lambda: {}
+
+ self.init._find_networking_config()
+ assert "Empty network config found" in caplog.text
+
def test_apply_network_config_disabled(self, caplog):
"""Log when network is disabled by upgraded-network."""
disable_file = os.path.join(
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index c61f1e24..6833aa9a 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -76,9 +76,11 @@ mal
ManassehZhou
mamercad
manuelisimo
+MarkMielke
marlluslustosa
matthewruffell
maxnet
+Mazorius
megian
michaelrommel
mitechie
diff --git a/tox.ini b/tox.ini
index 3b668bc0..b49272ac 100644
--- a/tox.ini
+++ b/tox.ini
@@ -302,6 +302,7 @@ markers =
ec2: test will only run on EC2 platform
gce: test will only run on GCE platform
hypothesis_slow: hypothesis test too slow to run as unit test
+ ibm: test will only run on IBM platform
instance_name: the name to be used for the test instance
integration_cloud_args: args for IntegrationCloud customization
is_iscsi: whether is an instance has iscsi net cfg or not