summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-07-19 14:23:12 -0500
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2021-07-19 20:08:09 +0000
commita85ca5b85b69bb228b136c1dba23e3a289115b5c (patch)
treeb556285cf403c37955343348be237e6a356841cb
parentd0a8d63f380283af319a981cb4eb89c8b812bbbc (diff)
downloadcloud-init-git-a85ca5b85b69bb228b136c1dba23e3a289115b5c.tar.gz
21.2-43-g184c836a-0ubuntu1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--HACKING.rst8
-rw-r--r--README.md6
-rw-r--r--bash_completion/cloud-init5
-rw-r--r--cloudinit/cmd/devel/hotplug_hook.py236
-rwxr-xr-xcloudinit/cmd/devel/net_convert.py14
-rw-r--r--cloudinit/cmd/devel/parser.py3
-rw-r--r--cloudinit/cmd/main.py30
-rw-r--r--cloudinit/cmd/tests/test_main.py2
-rw-r--r--cloudinit/config/cc_apt_configure.py26
-rw-r--r--cloudinit/config/cc_growpart.py4
-rw-r--r--cloudinit/config/cc_ntp.py26
-rw-r--r--cloudinit/config/cc_resizefs.py5
-rw-r--r--cloudinit/config/cc_resolv_conf.py25
-rw-r--r--cloudinit/config/cc_yum_add_repo.py4
-rw-r--r--cloudinit/config/tests/test_resolv_conf.py28
-rwxr-xr-xcloudinit/distros/__init__.py54
-rw-r--r--cloudinit/distros/alpine.py13
-rw-r--r--cloudinit/distros/arch.py11
-rw-r--r--cloudinit/distros/bsd.py3
-rw-r--r--cloudinit/distros/debian.py14
-rw-r--r--cloudinit/distros/dragonflybsd.py12
-rw-r--r--cloudinit/distros/freebsd.py15
-rw-r--r--cloudinit/distros/gentoo.py1
-rw-r--r--cloudinit/distros/opensuse.py10
-rw-r--r--cloudinit/distros/photon.py135
-rw-r--r--cloudinit/distros/rhel.py10
-rw-r--r--cloudinit/distros/rocky.py9
-rw-r--r--cloudinit/event.py70
-rw-r--r--cloudinit/handlers/jinja_template.py5
-rw-r--r--cloudinit/log.py4
-rw-r--r--cloudinit/net/__init__.py6
-rw-r--r--cloudinit/net/activators.py252
-rw-r--r--cloudinit/net/bsd.py15
-rw-r--r--cloudinit/net/dhcp.py2
-rw-r--r--cloudinit/net/freebsd.py22
-rw-r--r--cloudinit/net/netbsd.py8
-rw-r--r--cloudinit/net/netplan.py10
-rw-r--r--cloudinit/net/network_state.py125
-rw-r--r--cloudinit/net/networkd.py259
-rw-r--r--cloudinit/net/openbsd.py9
-rw-r--r--cloudinit/net/renderer.py2
-rw-r--r--cloudinit/net/renderers.py17
-rw-r--r--cloudinit/net/sysconfig.py11
-rw-r--r--cloudinit/net/tests/test_dhcp.py6
-rw-r--r--cloudinit/net/tests/test_network_state.py109
-rw-r--r--cloudinit/reporting/events.py8
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py15
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py10
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py2
-rw-r--r--cloudinit/sources/DataSourceEc2.py17
-rw-r--r--cloudinit/sources/DataSourceOVF.py35
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py11
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py9
-rw-r--r--cloudinit/sources/DataSourceScaleway.py10
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py8
-rw-r--r--cloudinit/sources/__init__.py62
-rw-r--r--cloudinit/sources/tests/test_init.py29
-rw-r--r--cloudinit/ssh_util.py22
-rw-r--r--cloudinit/stages.py125
-rw-r--r--cloudinit/tests/.test_util.py.swpbin0 -> 16384 bytes
-rw-r--r--cloudinit/tests/helpers.py5
-rw-r--r--cloudinit/tests/test_event.py26
-rw-r--r--cloudinit/tests/test_stages.py122
-rw-r--r--cloudinit/tests/test_url_helper.py4
-rw-r--r--cloudinit/tests/test_util.py55
-rw-r--r--cloudinit/util.py50
-rw-r--r--config/cloud.cfg.tmpl71
-rw-r--r--debian/changelog56
-rw-r--r--doc/examples/cloud-config-apt.txt6
-rw-r--r--doc/examples/cloud-config-user-groups.txt2
-rw-r--r--doc/rtd/index.rst3
-rw-r--r--doc/rtd/topics/availability.rst7
-rw-r--r--doc/rtd/topics/cli.rst4
-rw-r--r--doc/rtd/topics/code_review.rst8
-rw-r--r--doc/rtd/topics/datasources/digitalocean.rst4
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst2
-rw-r--r--doc/rtd/topics/datasources/ovf.rst6
-rw-r--r--doc/rtd/topics/debugging.rst4
-rw-r--r--doc/rtd/topics/events.rst89
-rw-r--r--doc/rtd/topics/faq.rst4
-rw-r--r--doc/rtd/topics/format.rst2
-rw-r--r--doc/rtd/topics/instancedata.rst41
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst5
-rw-r--r--doc/rtd/topics/testing.rst4
-rw-r--r--integration-requirements.txt2
-rw-r--r--packages/redhat/cloud-init.spec.in7
-rwxr-xr-xsetup.py4
-rw-r--r--[-rwxr-xr-x]systemd/cloud-init-generator.tmpl2
-rw-r--r--systemd/cloud-init-hotplugd.service22
-rw-r--r--systemd/cloud-init-hotplugd.socket13
-rw-r--r--systemd/cloud-init.service.tmpl4
-rwxr-xr-xsysvinit/freebsd/cloudinit2
-rw-r--r--templates/chrony.conf.photon.tmpl48
-rw-r--r--templates/hosts.photon.tmpl22
-rw-r--r--templates/ntp.conf.photon.tmpl61
-rw-r--r--templates/resolv.conf.tmpl2
-rw-r--r--templates/systemd.resolved.conf.tmpl15
-rw-r--r--tests/cloud_tests/releases.yaml17
-rw-r--r--tests/cloud_tests/util.py2
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test138
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test1.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test238
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test2.pub1
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test338
-rw-r--r--tests/integration_tests/assets/keys/id_rsa.test3.pub1
-rw-r--r--tests/integration_tests/assets/test_version_change.pklbin0 -> 21 bytes
-rw-r--r--tests/integration_tests/assets/trusty_with_mime.pkl572
-rw-r--r--tests/integration_tests/bugs/test_gh868.py20
-rw-r--r--tests/integration_tests/bugs/test_lp1920939.py140
-rw-r--r--tests/integration_tests/clouds.py30
-rw-r--r--tests/integration_tests/conftest.py8
-rw-r--r--tests/integration_tests/integration_settings.py7
-rw-r--r--tests/integration_tests/modules/test_hotplug.py94
-rw-r--r--tests/integration_tests/modules/test_persistence.py30
-rw-r--r--tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py2
-rw-r--r--tests/integration_tests/modules/test_ssh_keysfile.py85
-rw-r--r--tests/integration_tests/modules/test_user_events.py95
-rw-r--r--tests/integration_tests/modules/test_version_change.py56
-rw-r--r--tests/integration_tests/test_upgrade.py164
-rw-r--r--tests/integration_tests/util.py17
-rw-r--r--tests/unittests/cmd/devel/test_hotplug_hook.py218
-rw-r--r--tests/unittests/test_builtin_handlers.py30
-rw-r--r--tests/unittests/test_cli.py3
-rw-r--r--tests/unittests/test_datasource/test_azure.py4
-rw-r--r--tests/unittests/test_datasource/test_ovf.py79
-rw-r--r--tests/unittests/test_datasource/test_smartos.py10
-rw-r--r--tests/unittests/test_distros/test_dragonflybsd.py25
-rw-r--r--tests/unittests/test_distros/test_netconfig.py140
-rw-r--r--tests/unittests/test_handler/test_handler_apt_source_v3.py23
-rw-r--r--tests/unittests/test_handler/test_handler_set_hostname.py26
-rw-r--r--tests/unittests/test_net.py250
-rw-r--r--tests/unittests/test_net_activators.py232
-rw-r--r--tests/unittests/test_net_freebsd.py63
-rw-r--r--tests/unittests/test_render_cloudcfg.py3
-rw-r--r--tests/unittests/test_reporting.py18
-rw-r--r--tests/unittests/test_sshutil.py246
-rw-r--r--tests/unittests/test_util.py18
-rw-r--r--tools/.github-cla-signers6
-rwxr-xr-xtools/build-on-netbsd23
-rwxr-xr-xtools/hook-hotplug21
-rwxr-xr-xtools/read-dependencies8
-rwxr-xr-xtools/render-cloudcfg4
-rwxr-xr-xtools/run-container9
-rw-r--r--tox.ini14
-rw-r--r--udev/10-cloud-init-hook-hotplug.rules6
145 files changed, 5205 insertions, 518 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 623b3136..fc858672 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -34,7 +34,7 @@ Follow these steps to submit your first pull request to cloud-init:
"Cloud-Init CLA"
* You also may contact user ``rick_h`` in the ``#cloud-init``
- channel on the Freenode IRC network.
+ channel on the Libera IRC network.
* Configure git with your email and name for commit messages.
@@ -55,7 +55,7 @@ Follow these steps to submit your first pull request to cloud-init:
git clone git://github.com/canonical/cloud-init
cd cloud-init
git remote add GH_USER git@github.com:GH_USER/cloud-init.git
- git push GH_USER master
+ git push GH_USER main
* Read through the cloud-init `Code Review Process`_, so you understand
how your changes will end up in cloud-init's codebase.
@@ -78,7 +78,7 @@ Follow these steps to submit your first pull request to cloud-init:
.. _repository: https://github.com/canonical/cloud-init
.. _contributor license agreement: https://ubuntu.com/legal/contributors
.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
-.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/master/tools/.github-cla-signers
+.. _tools/.github-cla-signers: https://github.com/canonical/cloud-init/blob/main/tools/.github-cla-signers
.. _PR #344: https://github.com/canonical/cloud-init/pull/344
.. _PR #345: https://github.com/canonical/cloud-init/pull/345
@@ -159,7 +159,7 @@ Then, someone in the `Ubuntu Server`_ team will review your changes and
follow up in the pull request. Look at the `Code Review Process`_ doc
to understand the following steps.
-Feel free to ping and/or join ``#cloud-init`` on freenode irc if you
+Feel free to ping and/or join ``#cloud-init`` on Libera irc if you
have any questions.
.. _tox: https://tox.readthedocs.io/en/latest/
diff --git a/README.md b/README.md
index 01fd3b07..462e3204 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# cloud-init
-[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=master)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
+[![Build Status](https://travis-ci.com/canonical/cloud-init.svg?branch=main)](https://travis-ci.com/canonical/cloud-init) [![Read the Docs](https://readthedocs.org/projects/cloudinit/badge/?version=latest&style=flat)](https://cloudinit.readthedocs.org)
Cloud-init is the *industry standard* multi-distribution method for
cross-platform cloud instance initialization. It is supported across all
@@ -26,7 +26,7 @@ If you need support, start with the [user documentation](https://cloudinit.readt
If you need additional help consider reaching out with one of the following options:
-- Ask a question in the [``#cloud-init`` IRC channel on Freenode](https://webchat.freenode.net/?channel=#cloud-init)
+- Ask a question in the [``#cloud-init`` IRC channel on Libera](https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init)
- Search the cloud-init [mailing list archive](https://lists.launchpad.net/cloud-init/)
- Better yet, join the [cloud-init mailing list](https://launchpad.net/~cloud-init) and participate
- Find a bug? [Report bugs on Launchpad](https://bugs.launchpad.net/cloud-init/+filebug)
@@ -39,7 +39,7 @@ get in contact with that distribution and send them our way!
| Supported OSes | Supported Public Clouds | Supported Private Clouds |
| --- | --- | --- |
-| Alpine Linux<br />ArchLinux<br />Debian<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />Digital Ocean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
+| Alpine Linux<br />ArchLinux<br />Debian<br />DragonFlyBSD<br />Fedora<br />FreeBSD<br />Gentoo Linux<br />NetBSD<br />OpenBSD<br />RHEL/CentOS/AlmaLinux/Rocky/PhotonOS<br />SLES/openSUSE<br />Ubuntu<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /> | Amazon Web Services<br />Microsoft Azure<br />Google Cloud Platform<br />Oracle Cloud Infrastructure<br />Softlayer<br />Rackspace Public Cloud<br />IBM Cloud<br />DigitalOcean<br />Bigstep<br />Hetzner<br />Joyent<br />CloudSigma<br />Alibaba Cloud<br />OVH<br />OpenNebula<br />Exoscale<br />Scaleway<br />CloudStack<br />AltCloud<br />SmartOS<br />HyperOne<br />Vultr<br />Rootbox<br /> | Bare metal installs<br />OpenStack<br />LXD<br />KVM<br />Metal-as-a-Service (MAAS)<br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br /><br />|
## To start developing cloud-init
diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init
index a9577e9d..b9f137b1 100644
--- a/bash_completion/cloud-init
+++ b/bash_completion/cloud-init
@@ -28,7 +28,7 @@ _cloudinit_complete()
COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word))
;;
devel)
- COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word))
+ COMPREPLY=($(compgen -W "--help hotplug-hook schema net-convert" -- $cur_word))
;;
dhclient-hook)
COMPREPLY=($(compgen -W "--help up down" -- $cur_word))
@@ -64,6 +64,9 @@ _cloudinit_complete()
--frequency)
COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word))
;;
+ hotplug-hook)
+ COMPREPLY=($(compgen -W "--help" -- $cur_word))
+ ;;
net-convert)
COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word))
;;
diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py
new file mode 100644
index 00000000..0282f24a
--- /dev/null
+++ b/cloudinit/cmd/devel/hotplug_hook.py
@@ -0,0 +1,236 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Handle reconfiguration on hotplug events"""
+import abc
+import argparse
+import os
+import time
+
+from cloudinit import log
+from cloudinit import reporting
+from cloudinit.event import EventScope, EventType
+from cloudinit.net import activators, read_sys_net_safe
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.reporting import events
+from cloudinit.stages import Init
+from cloudinit.sources import DataSource
+
+
+LOG = log.getLogger(__name__)
+NAME = 'hotplug-hook'
+
+
+def get_parser(parser=None):
+ """Build or extend an arg parser for hotplug-hook utility.
+
+ @param parser: Optional existing ArgumentParser instance representing the
+ subcommand which will be extended to support the args of this utility.
+
+ @returns: ArgumentParser with proper argument configuration.
+ """
+ if not parser:
+ parser = argparse.ArgumentParser(prog=NAME, description=__doc__)
+
+ parser.description = __doc__
+ parser.add_argument("-d", "--devpath", required=True,
+ metavar="PATH",
+ help="sysfs path to hotplugged device")
+ parser.add_argument("-s", "--subsystem", required=True,
+ help="subsystem to act on",
+ choices=['net'])
+ parser.add_argument("-u", "--udevaction", required=True,
+ help="action to take",
+ choices=['add', 'remove'])
+
+ return parser
+
+
+class UeventHandler(abc.ABC):
+ def __init__(self, id, datasource, devpath, action, success_fn):
+ self.id = id
+ self.datasource = datasource # type: DataSource
+ self.devpath = devpath
+ self.action = action
+ self.success_fn = success_fn
+
+ @abc.abstractmethod
+ def apply(self):
+ raise NotImplementedError()
+
+ @property
+ @abc.abstractmethod
+ def config(self):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def device_detected(self) -> bool:
+ raise NotImplementedError()
+
+ def detect_hotplugged_device(self):
+ detect_presence = None
+ if self.action == 'add':
+ detect_presence = True
+ elif self.action == 'remove':
+ detect_presence = False
+ else:
+ raise ValueError('Unknown action: %s' % self.action)
+
+ if detect_presence != self.device_detected():
+ raise RuntimeError(
+ 'Failed to detect %s in updated metadata' % self.id)
+
+ def success(self):
+ return self.success_fn()
+
+ def update_metadata(self):
+ result = self.datasource.update_metadata_if_supported([
+ EventType.HOTPLUG])
+ if not result:
+ raise RuntimeError(
+ 'Datasource %s not updated for '
+ 'event %s' % (self.datasource, EventType.HOTPLUG)
+ )
+ return result
+
+
+class NetHandler(UeventHandler):
+ def __init__(self, datasource, devpath, action, success_fn):
+ # convert devpath to mac address
+ id = read_sys_net_safe(os.path.basename(devpath), 'address')
+ super().__init__(id, datasource, devpath, action, success_fn)
+
+ def apply(self):
+ self.datasource.distro.apply_network_config(
+ self.config,
+ bring_up=False,
+ )
+ interface_name = os.path.basename(self.devpath)
+ activator = activators.select_activator()
+ if self.action == 'add':
+ if not activator.bring_up_interface(interface_name):
+ raise RuntimeError(
+ 'Failed to bring up device: {}'.format(self.devpath))
+ elif self.action == 'remove':
+ if not activator.bring_down_interface(interface_name):
+ raise RuntimeError(
+ 'Failed to bring down device: {}'.format(self.devpath))
+
+ @property
+ def config(self):
+ return self.datasource.network_config
+
+ def device_detected(self) -> bool:
+ netstate = parse_net_config_data(self.config)
+ found = [
+ iface for iface in netstate.iter_interfaces()
+ if iface.get('mac_address') == self.id
+ ]
+ LOG.debug('Ifaces with ID=%s : %s', self.id, found)
+ return len(found) > 0
+
+
+SUBSYSTEM_PROPERTES_MAP = {
+ 'net': (NetHandler, EventScope.NETWORK),
+}
+
+
+def handle_hotplug(
+ hotplug_init: Init, devpath, subsystem, udevaction
+):
+ handler_cls, event_scope = SUBSYSTEM_PROPERTES_MAP.get(
+ subsystem, (None, None)
+ )
+ if handler_cls is None:
+ raise Exception(
+ 'hotplug-hook: cannot handle events for subsystem: {}'.format(
+ subsystem))
+
+ LOG.debug('Fetching datasource')
+ datasource = hotplug_init.fetch(existing="trust")
+
+ if not hotplug_init.update_event_enabled(
+ event_source_type=EventType.HOTPLUG,
+ scope=EventScope.NETWORK
+ ):
+ LOG.debug('hotplug not enabled for event of type %s', event_scope)
+ return
+
+ LOG.debug('Creating %s event handler', subsystem)
+ event_handler = handler_cls(
+ datasource=datasource,
+ devpath=devpath,
+ action=udevaction,
+ success_fn=hotplug_init._write_to_cache
+ ) # type: UeventHandler
+ wait_times = [1, 3, 5, 10, 30]
+ for attempt, wait in enumerate(wait_times):
+ LOG.debug(
+ 'subsystem=%s update attempt %s/%s',
+ subsystem,
+ attempt,
+ len(wait_times)
+ )
+ try:
+ LOG.debug('Refreshing metadata')
+ event_handler.update_metadata()
+ LOG.debug('Detecting device in updated metadata')
+ event_handler.detect_hotplugged_device()
+ LOG.debug('Applying config change')
+ event_handler.apply()
+ LOG.debug('Updating cache')
+ event_handler.success()
+ break
+ except Exception as e:
+ LOG.debug('Exception while processing hotplug event. %s', e)
+ time.sleep(wait)
+ last_exception = e
+ else:
+ raise last_exception # type: ignore
+
+
+def handle_args(name, args):
+ # Note that if an exception happens between now and when logging is
+ # setup, we'll only see it in the journal
+ hotplug_reporter = events.ReportEventStack(
+ name, __doc__, reporting_enabled=True
+ )
+
+ hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter)
+ hotplug_init.read_cfg()
+
+ log.setupLogging(hotplug_init.cfg)
+ if 'reporting' in hotplug_init.cfg:
+ reporting.update_configuration(hotplug_init.cfg.get('reporting'))
+
+ # Logging isn't going to be setup until now
+ LOG.debug(
+ '%s called with the following arguments: {udevaction: %s, '
+ 'subsystem: %s, devpath: %s}',
+ name, args.udevaction, args.subsystem, args.devpath
+ )
+ LOG.debug(
+ '%s called with the following arguments:\n'
+ 'udevaction: %s\n'
+ 'subsystem: %s\n'
+ 'devpath: %s',
+ name, args.udevaction, args.subsystem, args.devpath
+ )
+
+ with hotplug_reporter:
+ try:
+ handle_hotplug(
+ hotplug_init=hotplug_init,
+ devpath=args.devpath,
+ subsystem=args.subsystem,
+ udevaction=args.udevaction,
+ )
+ except Exception:
+ LOG.exception('Received fatal exception handling hotplug!')
+ raise
+
+ LOG.debug('Exiting hotplug handler')
+ reporting.flush_events()
+
+
+if __name__ == '__main__':
+ args = get_parser().parse_args()
+ handle_args(NAME, args)
diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py
index 0668ffa3..f4a98e5e 100755
--- a/cloudinit/cmd/devel/net_convert.py
+++ b/cloudinit/cmd/devel/net_convert.py
@@ -11,7 +11,7 @@ from cloudinit.sources import DataSourceAzure as azure
from cloudinit.sources import DataSourceOVF as ovf
from cloudinit import distros, safeyaml
-from cloudinit.net import eni, netplan, network_state, sysconfig
+from cloudinit.net import eni, netplan, networkd, network_state, sysconfig
from cloudinit import log
NAME = 'net-convert'
@@ -51,7 +51,7 @@ def get_parser(parser=None):
parser.add_argument("--debug", action='store_true',
help='enable debug logging to stderr.')
parser.add_argument("-O", "--output-kind",
- choices=['eni', 'netplan', 'sysconfig'],
+ choices=['eni', 'netplan', 'networkd', 'sysconfig'],
required=True,
help="The network config format to emit")
return parser
@@ -96,9 +96,6 @@ def handle_args(name, args):
pre_ns = ovf.get_network_config_from_conf(config, False)
ns = network_state.parse_net_config_data(pre_ns)
- if not ns:
- raise RuntimeError("No valid network_state object created from"
- " input data")
if args.debug:
sys.stderr.write('\n'.join(
@@ -118,9 +115,14 @@ def handle_args(name, args):
config['netplan_path'] = config['netplan_path'][1:]
# enable some netplan features
config['features'] = ['dhcp-use-domains', 'ipv6-mtu']
- else:
+ elif args.output_kind == "networkd":
+ r_cls = networkd.Renderer
+ config = distro.renderer_configs.get('networkd')
+ elif args.output_kind == "sysconfig":
r_cls = sysconfig.Renderer
config = distro.renderer_configs.get('sysconfig')
+ else:
+ raise RuntimeError("Invalid output_kind")
r = r_cls(config=config)
sys.stderr.write(''.join([
diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py
index 1a3c46a4..be304630 100644
--- a/cloudinit/cmd/devel/parser.py
+++ b/cloudinit/cmd/devel/parser.py
@@ -7,6 +7,7 @@
import argparse
from cloudinit.config import schema
+from . import hotplug_hook
from . import net_convert
from . import render
from . import make_mime
@@ -21,6 +22,8 @@ def get_parser(parser=None):
subparsers.required = True
subcmds = [
+ (hotplug_hook.NAME, hotplug_hook.__doc__,
+ hotplug_hook.get_parser, hotplug_hook.handle_args),
('schema', 'Validate cloud-config files for document schema',
schema.get_parser, schema.handle_schema_args),
(net_convert.NAME, net_convert.__doc__,
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index baf1381f..21213a4a 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
(cmdline_name, url, path))
+def purge_cache_on_python_version_change(init):
+ """Purge the cache if python version changed on us.
+
+ There could be changes not represented in our cache (obj.pkl) after we
+ upgrade to a new version of python, so at that point clear the cache
+ """
+ current_python_version = '%d.%d' % (
+ sys.version_info.major, sys.version_info.minor
+ )
+ python_version_path = os.path.join(
+ init.paths.get_cpath('data'), 'python-version'
+ )
+ if os.path.exists(python_version_path):
+ cached_python_version = open(python_version_path).read()
+ # The Python version has changed out from under us, anything that was
+ # pickled previously is likely useless due to API changes.
+ if cached_python_version != current_python_version:
+ LOG.debug('Python version change detected. Purging cache')
+ init.purge_cache(True)
+ util.write_file(python_version_path, current_python_version)
+ else:
+ if os.path.exists(init.paths.get_ipath_cur('obj_pkl')):
+ LOG.info(
+ 'Writing python-version file. '
+ 'Cache compatibility status is currently unknown.'
+ )
+ util.write_file(python_version_path, current_python_version)
+
+
def main_init(name, args):
deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
if args.local:
@@ -276,6 +305,7 @@ def main_init(name, args):
util.logexc(LOG, "Failed to initialize, likely bad things to come!")
# Stage 4
path_helper = init.paths
+ purge_cache_on_python_version_change(init)
mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
if mode == sources.DSMODE_NETWORK:
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 78b27441..1f5975b0 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand')
class TestMain(FilesystemMockingTestCase):
+ with_logs = True
+ allowed_subp = False
def setUp(self):
super(TestMain, self).setUp()
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index bb8a1278..0c9c7925 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -57,6 +57,15 @@ mirror_property = {
},
'search_dns': {
'type': 'boolean',
+ },
+ 'keyid': {
+ 'type': 'string'
+ },
+ 'key': {
+ 'type': 'string'
+ },
+ 'keyserver': {
+ 'type': 'string'
}
}
}
@@ -228,6 +237,15 @@ schema = {
key, the search pattern will be
``<distro>-security-mirror``.
+ Each mirror may also specify a key to import via
+ any of the following optional keys:
+
+ - ``keyid``: a key to import via shortid or \
+ fingerprint.
+ - ``key``: a raw PGP key.
+ - ``keyserver``: alternate keyserver to pull \
+ ``keyid`` key from.
+
If no mirrors are specified, or all lookups fail,
then default mirrors defined in the datasource
are used. If none are present in the datasource
@@ -453,6 +471,7 @@ def apply_apt(cfg, cloud, target):
LOG.debug("Apt Mirror info: %s", mirrors)
if util.is_false(cfg.get('preserve_sources_list', False)):
+ add_mirror_keys(cfg, target)
generate_sources_list(cfg, release, mirrors, cloud)
rename_apt_lists(mirrors, target, arch)
@@ -660,6 +679,13 @@ def disable_suites(disabled, src, release):
return retsrc
+def add_mirror_keys(cfg, target):
+ """Adds any keys included in the primary/security mirror clauses"""
+ for key in ('primary', 'security'):
+ for mirror in cfg.get(key, []):
+ add_apt_key(mirror, target)
+
+
def generate_sources_list(cfg, release, mirrors, cloud):
"""generate_sources_list
create a source.list file based on a custom or default template
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 9f338ad1..9f5525a1 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -224,6 +224,10 @@ def device_part_info(devpath):
freebsd_part = "/dev/" + util.find_freebsd_part(devpath)
m = re.search('^(/dev/.+)p([0-9])$', freebsd_part)
return (m.group(1), m.group(2))
+ elif util.is_DragonFlyBSD():
+ dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath)
+ m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part)
+ return (m.group(1), m.group(2))
if not os.path.exists(syspath):
raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 41c278ff..acf3251d 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -25,7 +25,7 @@ frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
NR_POOL_SERVERS = 4
distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse',
- 'rhel', 'sles', 'ubuntu']
+ 'photon', 'rhel', 'rocky', 'sles', 'ubuntu']
NTP_CLIENT_CONFIG = {
'chrony': {
@@ -80,24 +80,37 @@ DISTRO_CLIENT_CONFIG = {
'confpath': '/etc/chrony/chrony.conf',
},
},
- 'rhel': {
+ 'opensuse': {
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
'ntp': {
+ 'confpath': '/etc/ntp.conf',
'service_name': 'ntpd',
},
- 'chrony': {
- 'service_name': 'chronyd',
+ 'systemd-timesyncd': {
+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
},
},
- 'opensuse': {
+ 'photon': {
'chrony': {
'service_name': 'chronyd',
},
'ntp': {
- 'confpath': '/etc/ntp.conf',
'service_name': 'ntpd',
+ 'confpath': '/etc/ntp.conf'
},
'systemd-timesyncd': {
'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ 'confpath': '/etc/systemd/timesyncd.conf',
+ },
+ },
+ 'rhel': {
+ 'ntp': {
+ 'service_name': 'ntpd',
+ },
+ 'chrony': {
+ 'service_name': 'chronyd',
},
},
'sles': {
@@ -551,7 +564,6 @@ def handle(name, cfg, cloud, log, _args):
# Select which client is going to be used and get the configuration
ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
cloud.distro)
-
# Allow user ntp config to override distro configurations
ntp_client_config = util.mergemanydict(
[ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 9afbb847..990a6939 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -85,6 +85,10 @@ def _resize_zfs(mount_point, devpth):
return ('zpool', 'online', '-e', mount_point, devpth)
+def _resize_hammer2(mount_point, devpth):
+ return ('hammer2', 'growfs', mount_point)
+
+
def _can_skip_resize_ufs(mount_point, devpth):
# possible errors cases on the code-path to growfs -N following:
# https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c
@@ -113,6 +117,7 @@ RESIZE_FS_PREFIXES_CMDS = [
('xfs', _resize_xfs),
('ufs', _resize_ufs),
('zfs', _resize_zfs),
+ ('hammer2', _resize_hammer2),
]
RESIZE_FS_PRECHECK_CMDS = {
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 466dad03..c51967e2 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -30,7 +30,7 @@ are configured correctly.
**Module frequency:** per instance
-**Supported distros:** alpine, fedora, rhel, sles
+**Supported distros:** alpine, fedora, photon, rhel, sles
**Config keys**::
@@ -47,18 +47,23 @@ are configured correctly.
"""
from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
+from cloudinit.settings import PER_INSTANCE
from cloudinit import util
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles']
+distros = ['alpine', 'fedora', 'opensuse', 'photon', 'rhel', 'sles']
+
+RESOLVE_CONFIG_TEMPLATE_MAP = {
+ '/etc/resolv.conf': 'resolv.conf',
+ '/etc/systemd/resolved.conf': 'systemd.resolved.conf',
+}
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
+def generate_resolv_conf(template_fn, params, target_fname):
flags = []
false_flags = []
@@ -104,12 +109,18 @@ def handle(name, cfg, cloud, log, _args):
if "resolv_conf" not in cfg:
log.warning("manage_resolv_conf True but no parameters provided!")
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
+ try:
+ template_fn = cloud.get_template_filename(
+ RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolv_conf_fn])
+ except KeyError:
log.warning("No template found, not rendering /etc/resolv.conf")
return
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
+ generate_resolv_conf(
+ template_fn=template_fn,
+ params=cfg["resolv_conf"],
+ target_fname=cloud.disro.resolve_conf_fn
+ )
return
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index db513ed7..67f09686 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,7 +18,7 @@ entry, the config entry will be skipped.
**Module frequency:** per always
-**Supported distros:** almalinux, centos, fedora, rhel
+**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky
**Config keys**::
@@ -36,7 +36,7 @@ from configparser import ConfigParser
from cloudinit import util
-distros = ['almalinux', 'centos', 'fedora', 'rhel']
+distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky']
def _canonicalize_id(repo_id):
diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py
index 6546a0b5..45a06c22 100644
--- a/cloudinit/config/tests/test_resolv_conf.py
+++ b/cloudinit/config/tests/test_resolv_conf.py
@@ -1,9 +1,8 @@
-from unittest import mock
-
import pytest
+from unittest import mock
from cloudinit.config.cc_resolv_conf import generate_resolv_conf
-
+from tests.unittests.test_distros.test_create_users import MyBaseDistro
EXPECTED_HEADER = """\
# Your system has been configured with 'manage-resolv-conf' set to true.
@@ -14,22 +13,28 @@ EXPECTED_HEADER = """\
class TestGenerateResolvConf:
+
+ dist = MyBaseDistro()
+ tmpl_fn = "templates/resolv.conf.tmpl"
+
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
- def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file):
- generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock())
+ def test_dist_resolv_conf_fn(self, m_render_to_file):
+ self.dist.resolve_conf_fn = "/tmp/resolv-test.conf"
+ generate_resolv_conf(self.tmpl_fn,
+ mock.MagicMock(),
+ self.dist.resolve_conf_fn)
assert [
- mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY)
+ mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY)
] == m_render_to_file.call_args_list
@mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file")
def test_target_fname_is_used_if_passed(self, m_render_to_file):
- generate_resolv_conf(
- "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path"
- )
+ path = "/use/this/path"
+ generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path)
assert [
- mock.call(mock.ANY, "/use/this/path", mock.ANY)
+ mock.call(mock.ANY, path, mock.ANY)
] == m_render_to_file.call_args_list
# Patch in templater so we can assert on the actual generated content
@@ -75,7 +80,8 @@ class TestGenerateResolvConf:
def test_flags_and_options(
self, m_write_file, params, expected_extra_line
):
- generate_resolv_conf("templates/resolv.conf.tmpl", params)
+ target_fn = "/etc/resolv.conf"
+ generate_resolv_conf(self.tmpl_fn, params, target_fn)
expected_content = EXPECTED_HEADER
if expected_extra_line is not None:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 107b928c..7bdf2197 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -16,13 +16,16 @@ import stat
import string
import urllib.parse
from io import StringIO
+from typing import Any, Mapping
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import net
+from cloudinit.net import activators
from cloudinit.net import eni
from cloudinit.net import network_state
from cloudinit.net import renderers
+from cloudinit.net.network_state import parse_net_config_data
from cloudinit import persistence
from cloudinit import ssh_util
from cloudinit import type_utils
@@ -46,7 +49,8 @@ OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'freebsd': ['freebsd'],
'gentoo': ['gentoo'],
- 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel'],
+ 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'photon', 'rhel',
+ 'rocky'],
'suse': ['opensuse', 'sles'],
}
@@ -71,7 +75,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
init_cmd = ['service'] # systemctl, service etc
- renderer_configs = {}
+ renderer_configs = {} # type: Mapping[str, Mapping[str, Any]]
_preferred_ntp_clients = None
networking_cls = LinuxNetworking
# This is used by self.shutdown_command(), and can be overridden in
@@ -80,6 +84,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
_ci_pkl_version = 1
prefer_fqdn = False
+ resolve_conf_fn = "/etc/resolv.conf"
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -104,14 +109,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
raise NotImplementedError()
def _write_network(self, settings):
- raise RuntimeError(
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
+ raise NotImplementedError(
"Legacy function '_write_network' was called in distro '%s'.\n"
"_write_network_config needs implementation.\n" % self.name)
- def _write_network_config(self, settings):
- raise NotImplementedError()
-
- def _supported_write_network_config(self, network_config):
+ def _write_network_state(self, network_state):
priority = util.get_cfg_by_path(
self._cfg, ('network', 'renderers'), None)
@@ -119,8 +122,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.debug("Selected renderer '%s' from priority list: %s",
name, priority)
renderer = render_cls(config=self.renderer_configs.get(name))
- renderer.render_network_config(network_config)
- return []
+ renderer.render_network_state(network_state)
def _find_tz_file(self, tz):
tz_file = os.path.join(self.tz_zone_dir, str(tz))
@@ -172,6 +174,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
mirror_info=arch_info)
def apply_network(self, settings, bring_up=True):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
# this applies network where 'settings' is interfaces(5) style
# it is obsolete compared to apply_network_config
# Write it out
@@ -186,6 +189,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return False
def _apply_network_from_network_config(self, netconfig, bring_up=True):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
distro = self.__class__
LOG.warning("apply_network_config is not currently implemented "
"for distribution '%s'. Attempting to use apply_network",
@@ -202,12 +206,20 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
def generate_fallback_config(self):
return net.generate_fallback_config()
- def apply_network_config(self, netconfig, bring_up=False):
- # apply network config netconfig
+ def apply_network_config(self, netconfig, bring_up=False) -> bool:
+ """Apply the network config.
+
+ If bring_up is True, attempt to bring up the passed in devices. If
+ devices is None, attempt to bring up devices returned by
+ _write_network_config.
+
+ Returns True if any devices failed to come up, otherwise False.
+ """
# This method is preferred to apply_network which only takes
# a much less complete network config format (interfaces(5)).
+ network_state = parse_net_config_data(netconfig)
try:
- dev_names = self._write_network_config(netconfig)
+ self._write_network_state(network_state)
except NotImplementedError:
# backwards compat until all distros have apply_network_config
return self._apply_network_from_network_config(
@@ -215,7 +227,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
# Now try to bring them up
if bring_up:
- return self._bring_up_interfaces(dev_names)
+ network_activator = activators.select_activator()
+ network_activator.bring_up_all_interfaces(network_state)
return False
def apply_network_config_names(self, netconfig):
@@ -391,20 +404,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
return self._preferred_ntp_clients
def _bring_up_interface(self, device_name):
- cmd = ['ifup', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = subp.subp(cmd)
- if len(err):
- LOG.warning("Running %s resulted in stderr output: %s",
- cmd, err)
- return True
- except subp.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
+ raise NotImplementedError
def _bring_up_interfaces(self, device_names):
+ """Deprecated. Remove if/when arch and gentoo support renderers."""
am_failed = 0
for d in device_names:
if not self._bring_up_interface(d):
diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py
index ca5bfe80..e4bed5a2 100644
--- a/cloudinit/distros/alpine.py
+++ b/cloudinit/distros/alpine.py
@@ -73,19 +73,6 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('add', pkgs=pkglist)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '-a')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
-
def _write_hostname(self, your_hostname, out_fn):
conf = None
try:
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index f8385f7f..c9acb11f 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -25,7 +25,6 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
locale_gen_fn = "/etc/locale.gen"
network_conf_dir = "/etc/netctl"
- resolve_conf_fn = "/etc/resolv.conf"
init_cmd = ['systemctl'] # init scripts
renderer_configs = {
"netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml",
@@ -62,9 +61,9 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('', pkgs=pkglist)
- def _write_network_config(self, netconfig):
+ def _write_network_state(self, network_state):
try:
- return self._supported_write_network_config(netconfig)
+ super()._write_network_state(network_state)
except RendererNotFoundError as e:
# Fall back to old _write_network
raise NotImplementedError from e
@@ -102,12 +101,6 @@ class Distro(distros.Distro):
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
- def _bring_up_interfaces(self, device_names):
- for d in device_names:
- if not self._bring_up_interface(d):
- return False
- return True
-
def _write_hostname(self, your_hostname, out_fn):
conf = None
try:
diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py
index f717a667..c2fc1e0b 100644
--- a/cloudinit/distros/bsd.py
+++ b/cloudinit/distros/bsd.py
@@ -120,9 +120,6 @@ class BSD(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def set_timezone(self, tz):
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 844aaf21..089e0c3e 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -111,19 +111,9 @@ class Distro(distros.Distro):
self.update_package_sources()
self.package_command('install', pkgs=pkglist)
- def _write_network_config(self, netconfig):
+ def _write_network_state(self, network_state):
_maybe_remove_legacy_eth0()
- return self._supported_write_network_config(netconfig)
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '--all')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
+ return super()._write_network_state(network_state)
def _write_hostname(self, your_hostname, out_fn):
conf = None
diff --git a/cloudinit/distros/dragonflybsd.py b/cloudinit/distros/dragonflybsd.py
new file mode 100644
index 00000000..2d825518
--- /dev/null
+++ b/cloudinit/distros/dragonflybsd.py
@@ -0,0 +1,12 @@
+# Copyright (C) 2020-2021 Gonéri Le Bouder
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import cloudinit.distros.freebsd
+
+
+class Distro(cloudinit.distros.freebsd.Distro):
+ home_dir = '/home'
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 9659843f..d94a52b8 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -18,6 +18,12 @@ LOG = logging.getLogger(__name__)
class Distro(cloudinit.distros.bsd.BSD):
+ """
+ Distro subclass for FreeBSD.
+
+ (N.B. DragonFlyBSD inherits from this class.)
+ """
+
usr_lib_exec = '/usr/local/lib'
login_conf_fn = '/etc/login.conf'
login_conf_fn_bak = '/etc/login.conf.orig'
@@ -28,6 +34,7 @@ class Distro(cloudinit.distros.bsd.BSD):
pkg_cmd_update_prefix = ["pkg", "update"]
pkg_cmd_upgrade_prefix = ["pkg", "upgrade"]
prefer_fqdn = True # See rc.conf(5) in FreeBSD
+ home_dir = '/usr/home'
def _get_add_member_to_group_cmd(self, member_name, group_name):
return ['pw', 'usermod', '-n', member_name, '-G', group_name]
@@ -66,9 +73,12 @@ class Distro(cloudinit.distros.bsd.BSD):
pw_useradd_cmd.append('-d/nonexistent')
log_pw_useradd_cmd.append('-d/nonexistent')
else:
- pw_useradd_cmd.append('-d/usr/home/%s' % name)
+ pw_useradd_cmd.append('-d{home_dir}/{name}'.format(
+ home_dir=self.home_dir, name=name))
pw_useradd_cmd.append('-m')
- log_pw_useradd_cmd.append('-d/usr/home/%s' % name)
+ log_pw_useradd_cmd.append('-d{home_dir}/{name}'.format(
+ home_dir=self.home_dir, name=name))
+
log_pw_useradd_cmd.append('-m')
# Run the command
@@ -155,4 +165,5 @@ class Distro(cloudinit.distros.bsd.BSD):
"update-sources", self.package_command,
["update"], freq=PER_INSTANCE)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index e9b82602..68c03e7f 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -23,7 +23,6 @@ LOG = logging.getLogger(__name__)
class Distro(distros.Distro):
locale_conf_fn = '/etc/locale.gen'
network_conf_fn = '/etc/conf.d/net'
- resolve_conf_fn = '/etc/resolv.conf'
hostname_conf_fn = '/etc/conf.d/hostname'
init_cmd = ['rc-service'] # init scripts
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 7ca0ef99..b4193ac2 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -27,7 +27,6 @@ class Distro(distros.Distro):
locale_conf_fn = '/etc/sysconfig/language'
network_conf_fn = '/etc/sysconfig/network/config'
network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- resolve_conf_fn = '/etc/resolv.conf'
route_conf_tpl = '/etc/sysconfig/network/ifroute-%s'
systemd_hostname_conf_fn = '/etc/hostname'
systemd_locale_conf_fn = '/etc/locale.conf'
@@ -117,12 +116,6 @@ class Distro(distros.Distro):
self._runner.run("update-sources", self.package_command,
['refresh'], freq=PER_INSTANCE)
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
def _read_hostname(self, filename, default=None):
if self.uses_systemd() and filename.endswith('/previous-hostname'):
return util.load_file(filename).strip()
@@ -175,9 +168,6 @@ class Distro(distros.Distro):
conf.set_hostname(hostname)
util.write_file(out_fn, str(conf), 0o644)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
@property
def preferred_ntp_clients(self):
"""The preferred ntp client is dependent on the version."""
diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py
new file mode 100644
index 00000000..0ced7b5f
--- /dev/null
+++ b/cloudinit/distros/photon.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit import util
+from cloudinit import subp
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit.distros import rhel_util as rhutil
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ hostname_conf_fn = '/etc/hostname'
+ network_conf_dir = '/etc/systemd/network/'
+ systemd_locale_conf_fn = '/etc/locale.conf'
+ resolve_conf_fn = '/etc/systemd/resolved.conf'
+
+ renderer_configs = {
+ 'networkd': {
+ 'resolv_conf_fn': resolve_conf_fn,
+ 'network_conf_dir': network_conf_dir,
+ }
+ }
+
+ # Should be fqdn if we can use it
+ prefer_fqdn = True
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'photon'
+ self.init_cmd = ['systemctl']
+
+ def exec_cmd(self, cmd, capture=False):
+ LOG.debug('Attempting to run: %s', cmd)
+ try:
+ (out, err) = subp.subp(cmd, capture=capture)
+ if err:
+ LOG.warning('Running %s resulted in stderr output: %s',
+ cmd, err)
+ return True, out, err
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, 'Command %s failed', cmd)
+ return False, None, None
+
+ def apply_locale(self, locale, out_fn=None):
+ # This has a dependancy on glibc-i18n, user need to manually install it
+ # and enable the option in cloud.cfg
+ if not out_fn:
+ out_fn = self.systemd_locale_conf_fn
+
+ locale_cfg = {
+ 'LANG': locale,
+ }
+
+ rhutil.update_sysconfig_file(out_fn, locale_cfg)
+
+ # rhutil will modify /etc/locale.conf
+ # For locale change to take effect, reboot is needed or we can restart
+ # systemd-localed. This is equivalent of localectl
+ cmd = ['systemctl', 'restart', 'systemd-localed']
+ _ret, _out, _err = self.exec_cmd(cmd)
+
+ def install_packages(self, pkglist):
+ # self.update_package_sources()
+ self.package_command('install', pkgs=pkglist)
+
+ def _bring_up_interfaces(self, device_names):
+ cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved']
+ LOG.debug('Attempting to run bring up interfaces using command %s',
+ cmd)
+ ret, _out, _err = self.exec_cmd(cmd)
+ return ret
+
+ def _write_hostname(self, hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # Let's see if we can read it first.
+ conf = HostnameConf(util.load_file(out_fn))
+ conf.parse()
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(hostname)
+ util.write_file(out_fn, str(conf), mode=0o644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname(self, filename, default=None):
+ _ret, out, _err = self.exec_cmd(['hostname'])
+
+ return out if out else default
+
+ def _get_localhost_ip(self):
+ return '127.0.1.1'
+
+ def set_timezone(self, tz):
+ distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['tdnf', '-y']
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ cmd.append(command)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ _ret, _out, _err = self.exec_cmd(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run('update-sources', self.package_command,
+ ['makecache'], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 0c00a531..be5b3d24 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -36,7 +36,6 @@ class Distro(distros.Distro):
hostname_conf_fn = "/etc/sysconfig/network"
systemd_hostname_conf_fn = "/etc/hostname"
network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
- resolve_conf_fn = "/etc/resolv.conf"
tz_local_fn = "/etc/localtime"
usr_lib_exec = "/usr/libexec"
renderer_configs = {
@@ -66,9 +65,6 @@ class Distro(distros.Distro):
def install_packages(self, pkglist):
self.package_command('install', pkgs=pkglist)
- def _write_network_config(self, netconfig):
- return self._supported_write_network_config(netconfig)
-
def apply_locale(self, locale, out_fn=None):
if self.uses_systemd():
if not out_fn:
@@ -118,12 +114,6 @@ class Distro(distros.Distro):
else:
return default
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
def set_timezone(self, tz):
tz_file = self._find_tz_file(tz)
if self.uses_systemd():
diff --git a/cloudinit/distros/rocky.py b/cloudinit/distros/rocky.py
new file mode 100644
index 00000000..edb3165d
--- /dev/null
+++ b/cloudinit/distros/rocky.py
@@ -0,0 +1,9 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.distros import rhel
+
+
+class Distro(rhel.Distro):
+ pass
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/event.py b/cloudinit/event.py
index f7b311fb..53ad4c25 100644
--- a/cloudinit/event.py
+++ b/cloudinit/event.py
@@ -1,17 +1,73 @@
# This file is part of cloud-init. See LICENSE file for license information.
-
"""Classes and functions related to event handling."""
+from enum import Enum
+from typing import Dict, Set
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
-# Event types which can generate maintenance requests for cloud-init.
-class EventType(object):
- BOOT = "System boot"
- BOOT_NEW_INSTANCE = "New instance first boot"
+class EventScope(Enum):
+ # NETWORK is currently the only scope, but we want to leave room to
+ # grow other scopes (e.g., STORAGE) without having to make breaking
+ # changes to the user config
+ NETWORK = 'network'
- # TODO: Cloud-init will grow support for the follow event types:
- # UDEV
+ def __str__(self): # pylint: disable=invalid-str-returned
+ return self.value
+
+
+class EventType(Enum):
+ """Event types which can generate maintenance requests for cloud-init."""
+ # Cloud-init should grow support for the follow event types:
+ # HOTPLUG
# METADATA_CHANGE
# USER_REQUEST
+ BOOT = "boot"
+ BOOT_NEW_INSTANCE = "boot-new-instance"
+ BOOT_LEGACY = "boot-legacy"
+ HOTPLUG = 'hotplug'
+
+ def __str__(self): # pylint: disable=invalid-str-returned
+ return self.value
+
+
+def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]:
+ """Convert userdata into update config format defined on datasource.
+
+ Userdata is in the form of (e.g):
+ {'network': {'when': ['boot']}}
+
+ DataSource config is in the form of:
+ {EventScope.Network: {EventType.BOOT}}
+
+ Take the first and return the second
+ """
+ update_config = {}
+ for scope, scope_list in user_config.items():
+ try:
+ new_scope = EventScope(scope)
+ except ValueError as e:
+ LOG.warning(
+ "%s! Update data will be ignored for '%s' scope",
+ str(e),
+ scope,
+ )
+ continue
+ try:
+ new_values = [EventType(x) for x in scope_list['when']]
+ except ValueError as e:
+ LOG.warning(
+ "%s! Update data will be ignored for '%s' scope",
+ str(e),
+ scope,
+ )
+ new_values = []
+ update_config[new_scope] = set(new_values)
+
+ return update_config
# vi: ts=4 expandtab
diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py
index aadfbf86..5033abbb 100644
--- a/cloudinit/handlers/jinja_template.py
+++ b/cloudinit/handlers/jinja_template.py
@@ -12,7 +12,7 @@ except ImportError:
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit.sources import INSTANCE_JSON_FILE
+from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE
from cloudinit.templater import render_string, MISSING_JINJA_PREFIX
from cloudinit.util import b64d, load_file, load_json, json_dumps
@@ -36,7 +36,8 @@ class JinjaTemplatePartHandler(handlers.Handler):
def handle_part(self, data, ctype, filename, payload, frequency, headers):
if ctype in handlers.CONTENT_SIGNALS:
return
- jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
+ jinja_json_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE)
rendered_payload = render_jinja_payload_from_file(
payload, filename, jinja_json_file)
if not rendered_payload:
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 2e5df042..10149907 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -8,7 +8,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-import collections
+import collections.abc
import io
import logging
import logging.config
@@ -78,7 +78,7 @@ def setupLogging(cfg=None):
for a_cfg in cfg['log_cfgs']:
if isinstance(a_cfg, str):
log_cfgs.append(a_cfg)
- elif isinstance(a_cfg, (collections.Iterable)):
+ elif isinstance(a_cfg, (collections.abc.Iterable)):
cfg_str = [str(c) for c in a_cfg]
log_cfgs.append('\n'.join(cfg_str))
else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index 6b3b84f7..b827d41a 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -351,7 +351,7 @@ def device_devid(devname):
def get_devicelist():
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return list(get_interfaces_by_mac().values())
try:
@@ -376,7 +376,7 @@ def is_disabled_cfg(cfg):
def find_fallback_nic(blacklist_drivers=None):
"""Return the name of the 'fallback' network device."""
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return find_fallback_nic_on_freebsd(blacklist_drivers)
elif util.is_NetBSD() or util.is_OpenBSD():
return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers)
@@ -816,7 +816,7 @@ def get_ib_interface_hwaddr(ifname, ethernet_format):
def get_interfaces_by_mac(blacklist_drivers=None) -> dict:
- if util.is_FreeBSD():
+ if util.is_FreeBSD() or util.is_DragonFlyBSD():
return get_interfaces_by_mac_on_freebsd(
blacklist_drivers=blacklist_drivers)
elif util.is_NetBSD():
diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py
new file mode 100644
index 00000000..84aaafc9
--- /dev/null
+++ b/cloudinit/net/activators.py
@@ -0,0 +1,252 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+import logging
+import os
+from abc import ABC, abstractmethod
+from typing import Iterable, List, Type
+
+from cloudinit import subp
+from cloudinit import util
+from cloudinit.net.eni import available as eni_available
+from cloudinit.net.netplan import available as netplan_available
+from cloudinit.net.network_state import NetworkState
+from cloudinit.net.sysconfig import NM_CFG_FILE
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _alter_interface(cmd, device_name) -> bool:
+ LOG.debug("Attempting command %s for device %s", cmd, device_name)
+ try:
+ (_out, err) = subp.subp(cmd)
+ if len(err):
+ LOG.warning("Running %s resulted in stderr output: %s",
+ cmd, err)
+ return True
+ except subp.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+
+
+class NetworkActivator(ABC):
+ @staticmethod
+ @abstractmethod
+ def available() -> bool:
+ """Return True if activator is available, otherwise return False."""
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface.
+
+ Return True is successful, otherwise return False
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring down interface.
+
+ Return True is successful, otherwise return False
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def bring_up_interfaces(cls, device_names: Iterable[str]) -> bool:
+ """Bring up specified list of interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return all(cls.bring_up_interface(device) for device in device_names)
+
+ @classmethod
+ def bring_up_all_interfaces(cls, network_state: NetworkState) -> bool:
+ """Bring up all interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return cls.bring_up_interfaces(
+ [i['name'] for i in network_state.iter_interfaces()]
+ )
+
+ @classmethod
+ def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool:
+ """Bring down specified list of interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return all(cls.bring_down_interface(device) for device in device_names)
+
+ @classmethod
+ def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool:
+ """Bring down all interfaces.
+
+ Return True is successful, otherwise return False
+ """
+ return cls.bring_down_interfaces(
+ [i['name'] for i in network_state.iter_interfaces()]
+ )
+
+
+class IfUpDownActivator(NetworkActivator):
+ # Note that we're not overriding bring_up_interfaces to pass something
+ # like ifup --all because it isn't supported everywhere.
+ # E.g., NetworkManager has a ifupdown plugin that requires the name
+ # of a specific connection.
+ @staticmethod
+ def available(target=None) -> bool:
+ """Return true if ifupdown can be used on this system."""
+ return eni_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface using ifup.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['ifup', device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring up interface using ifup.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['ifdown', device_name]
+ return _alter_interface(cmd, device_name)
+
+
+class NetworkManagerActivator(NetworkActivator):
+ @staticmethod
+ def available(target=None) -> bool:
+ """ Return true if network manager can be used on this system."""
+ config_present = os.path.isfile(
+ subp.target_path(target, path=NM_CFG_FILE)
+ )
+ nmcli_present = subp.which('nmcli', target=target)
+ return config_present and bool(nmcli_present)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Bring up interface using nmcli.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['nmcli', 'connection', 'up', 'ifname', device_name]
+ return _alter_interface(cmd, device_name)
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Bring down interface using nmcli.
+
+ Return True is successful, otherwise return False
+ """
+ cmd = ['nmcli', 'connection', 'down', device_name]
+ return _alter_interface(cmd, device_name)
+
+
+class NetplanActivator(NetworkActivator):
+ NETPLAN_CMD = ['netplan', 'apply']
+
+ @staticmethod
+ def available(target=None) -> bool:
+ """ Return true if netplan can be used on this system."""
+ return netplan_available(target=target)
+
+ @staticmethod
+ def bring_up_interface(device_name: str) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_up_interfaces(device_names: Iterable[str]) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_up_all_interfaces(network_state: NetworkState) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_down_interface(device_name: str) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_down_interfaces(device_names: Iterable[str]) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ LOG.debug("Calling 'netplan apply' rather than "
+ "altering individual interfaces")
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+ @staticmethod
+ def bring_down_all_interfaces(network_state: NetworkState) -> bool:
+ """Apply netplan config.
+
+ Return True is successful, otherwise return False
+ """
+ return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all')
+
+
+# This section is mostly copied and pasted from renderers.py. An abstract
+# version to encompass both seems overkill at this point
+DEFAULT_PRIORITY = [
+ IfUpDownActivator,
+ NetworkManagerActivator,
+ NetplanActivator,
+]
+
+
+def search_activator(
+ priority=None, target=None
+) -> List[Type[NetworkActivator]]:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+
+ unknown = [i for i in priority if i not in DEFAULT_PRIORITY]
+ if unknown:
+ raise ValueError(
+ "Unknown activators provided in priority list: %s" % unknown)
+
+ return [activator for activator in priority if activator.available(target)]
+
+
+def select_activator(priority=None, target=None) -> Type[NetworkActivator]:
+ found = search_activator(priority, target)
+ if not found:
+ if priority is None:
+ priority = DEFAULT_PRIORITY
+ tmsg = ""
+ if target and target != "/":
+ tmsg = " in target=%s" % target
+ raise RuntimeError(
+ "No available network activators found%s. Searched "
+ "through list: %s" % (tmsg, priority))
+ selected = found[0]
+ LOG.debug('Using selected activator: %s', selected)
+ return selected
diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py
index e34e0454..916cea32 100644
--- a/cloudinit/net/bsd.py
+++ b/cloudinit/net/bsd.py
@@ -33,7 +33,7 @@ class BSDRenderer(renderer.Renderer):
self.interface_configurations = {}
self._postcmds = config.get('postcmds', True)
- def _ifconfig_entries(self, settings, target=None):
+ def _ifconfig_entries(self, settings):
ifname_by_mac = net.get_interfaces_by_mac()
for interface in settings.iter_interfaces():
device_name = interface.get("name")
@@ -76,9 +76,10 @@ class BSDRenderer(renderer.Renderer):
self.interface_configurations[device_name] = {
'address': subnet.get('address'),
'netmask': subnet.get('netmask'),
+ 'mtu': subnet.get('mtu') or interface.get('mtu'),
}
- def _route_entries(self, settings, target=None):
+ def _route_entries(self, settings):
routes = list(settings.iter_routes())
for interface in settings.iter_interfaces():
subnets = interface.get("subnets", [])
@@ -101,7 +102,7 @@ class BSDRenderer(renderer.Renderer):
gateway = route.get('gateway')
self.set_route(network, netmask, gateway)
- def _resolve_conf(self, settings, target=None):
+ def _resolve_conf(self, settings):
nameservers = settings.dns_nameservers
searchdomains = settings.dns_searchdomains
for interface in settings.iter_interfaces():
@@ -114,11 +115,11 @@ class BSDRenderer(renderer.Renderer):
# fails.
try:
resolvconf = ResolvConf(util.load_file(subp.target_path(
- target, self.resolv_conf_fn)))
+ self.target, self.resolv_conf_fn)))
resolvconf.parse()
except IOError:
util.logexc(LOG, "Failed to parse %s, use new empty file",
- subp.target_path(target, self.resolv_conf_fn))
+ subp.target_path(self.target, self.resolv_conf_fn))
resolvconf = ResolvConf('')
resolvconf.parse()
@@ -136,10 +137,12 @@ class BSDRenderer(renderer.Renderer):
except ValueError:
util.logexc(LOG, "Failed to add search domain %s", domain)
util.write_file(
- subp.target_path(target, self.resolv_conf_fn),
+ subp.target_path(self.target, self.resolv_conf_fn),
str(resolvconf), 0o644)
def render_network_state(self, network_state, templates=None, target=None):
+ if target:
+ self.target = target
self._ifconfig_entries(settings=network_state)
self._route_entries(settings=network_state)
self._resolve_conf(settings=network_state)
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 4394c68b..9b94c9a0 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -173,7 +173,7 @@ def parse_dhcp_lease_file(lease_file):
@raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile
content.
"""
- lease_regex = re.compile(r"lease {(?P<lease>[^}]*)}\n")
+ lease_regex = re.compile(r"lease {(?P<lease>.*?)}\n", re.DOTALL)
dhcp_leases = []
lease_content = util.load_file(lease_file)
if len(lease_content) == 0:
diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py
index 0285dfec..f8faf240 100644
--- a/cloudinit/net/freebsd.py
+++ b/cloudinit/net/freebsd.py
@@ -19,18 +19,26 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def write_config(self):
for device_name, v in self.interface_configurations.items():
+ net_config = 'DHCP'
if isinstance(v, dict):
- self.set_rc_config_value(
- 'ifconfig_' + device_name,
- v.get('address') + ' netmask ' + v.get('netmask'))
- else:
- self.set_rc_config_value('ifconfig_' + device_name, 'DHCP')
+ net_config = v.get('address') + ' netmask ' + v.get('netmask')
+ mtu = v.get('mtu')
+ if mtu:
+ net_config += (' mtu %d' % mtu)
+ self.set_rc_config_value('ifconfig_' + device_name, net_config)
def start_services(self, run=False):
if not run:
LOG.debug("freebsd generate postcmd disabled")
return
+ for dhcp_interface in self.dhcp_interfaces():
+ # Observed on DragonFlyBSD 6. If we use the "restart" parameter,
+ # the routes are not recreated.
+ subp.subp(['service', 'dhclient', 'stop', dhcp_interface],
+ rcs=[0, 1],
+ capture=True)
+
subp.subp(['service', 'netif', 'restart'], capture=True)
# On FreeBSD 10, the restart of routing and dhclient is likely to fail
# because
@@ -41,7 +49,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1])
for dhcp_interface in self.dhcp_interfaces():
- subp.subp(['service', 'dhclient', 'restart', dhcp_interface],
+ subp.subp(['service', 'dhclient', 'start', dhcp_interface],
rcs=[0, 1],
capture=True)
@@ -56,4 +64,4 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
def available(target=None):
- return util.is_FreeBSD()
+ return util.is_FreeBSD() or util.is_DragonFlyBSD()
diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py
index 71b38ee6..5f8881a5 100644
--- a/cloudinit/net/netbsd.py
+++ b/cloudinit/net/netbsd.py
@@ -22,9 +22,11 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
)
for device_name, v in self.interface_configurations.items():
if isinstance(v, dict):
- self.set_rc_config_value(
- 'ifconfig_' + device_name,
- v.get('address') + ' netmask ' + v.get('netmask'))
+ net_config = v.get('address') + ' netmask ' + v.get('netmask')
+ mtu = v.get('mtu')
+ if mtu:
+ net_config += (' mtu %d' % mtu)
+ self.set_rc_config_value('ifconfig_' + device_name, net_config)
def start_services(self, run=False):
if not run:
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 53347c83..41acf963 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -4,7 +4,12 @@ import copy
import os
from . import renderer
-from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES
+from .network_state import (
+ NetworkState,
+ subnet_is_ipv6,
+ NET_CONFIG_TO_V2,
+ IPV6_DYNAMIC_TYPES,
+)
from cloudinit import log as logging
from cloudinit import util
@@ -256,7 +261,7 @@ class Renderer(renderer.Renderer):
os.path.islink(SYS_CLASS_NET + iface)]:
subp.subp(cmd, capture=True)
- def _render_content(self, network_state):
+ def _render_content(self, network_state: NetworkState):
# if content already in netplan format, pass it back
if network_state.version == 2:
@@ -426,4 +431,5 @@ def network_state_to_netplan(network_state, header=None):
contents = renderer._render_content(network_state)
return header + contents
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index e8bf9e39..95b064f0 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -58,38 +58,6 @@ NET_CONFIG_TO_V2 = {
'bridge_waitport': None}}
-def parse_net_config_data(net_config, skip_broken=True):
- """Parses the config, returns NetworkState object
-
- :param net_config: curtin network config dict
- """
- state = None
- version = net_config.get('version')
- config = net_config.get('config')
- if version == 2:
- # v2 does not have explicit 'config' key so we
- # pass the whole net-config as-is
- config = net_config
-
- if version and config is not None:
- nsi = NetworkStateInterpreter(version=version, config=config)
- nsi.parse_config(skip_broken=skip_broken)
- state = nsi.get_network_state()
-
- return state
-
-
-def parse_net_config(path, skip_broken=True):
- """Parses a curtin network configuration file and
- return network state"""
- ns = None
- net_config = util.read_conf(path)
- if 'network' in net_config:
- ns = parse_net_config_data(net_config.get('network'),
- skip_broken=skip_broken)
- return ns
-
-
def from_state_file(state_file):
state = util.read_conf(state_file)
nsi = NetworkStateInterpreter()
@@ -237,6 +205,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
self._network_state = copy.deepcopy(self.initial_network_state)
self._network_state['config'] = config
self._parsed = False
+ self._interface_dns_map = {}
@property
def network_state(self):
@@ -310,6 +279,21 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
LOG.warning("Skipping invalid command: %s", command,
exc_info=True)
LOG.debug(self.dump_network_state())
+ for interface, dns in self._interface_dns_map.items():
+ iface = None
+ try:
+ iface = self._network_state['interfaces'][interface]
+ except KeyError as e:
+ raise ValueError(
+ 'Nameserver specified for interface {0}, '
+ 'but interface {0} does not exist!'.format(interface)
+ ) from e
+ if iface:
+ nameservers, search = dns
+ iface['dns'] = {
+ 'addresses': nameservers,
+ 'search': search,
+ }
def parse_config_v2(self, skip_broken=True):
for command_type, command in self._config.items():
@@ -526,21 +510,40 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def handle_infiniband(self, command):
self.handle_physical(command)
- @ensure_command_keys(['address'])
- def handle_nameserver(self, command):
- dns = self._network_state.get('dns')
+ def _parse_dns(self, command):
+ nameservers = []
+ search = []
if 'address' in command:
addrs = command['address']
if not type(addrs) == list:
addrs = [addrs]
for addr in addrs:
- dns['nameservers'].append(addr)
+ nameservers.append(addr)
if 'search' in command:
paths = command['search']
if not isinstance(paths, list):
paths = [paths]
for path in paths:
- dns['search'].append(path)
+ search.append(path)
+ return nameservers, search
+
+ @ensure_command_keys(['address'])
+ def handle_nameserver(self, command):
+ dns = self._network_state.get('dns')
+ nameservers, search = self._parse_dns(command)
+ if 'interface' in command:
+ self._interface_dns_map[command['interface']] = (
+ nameservers, search
+ )
+ else:
+ dns['nameservers'].extend(nameservers)
+ dns['search'].extend(search)
+
+ @ensure_command_keys(['address'])
+ def _handle_individual_nameserver(self, command, iface):
+ _iface = self._network_state.get('interfaces')
+ nameservers, search = self._parse_dns(command)
+ _iface[iface]['dns'] = {'nameservers': nameservers, 'search': search}
@ensure_command_keys(['destination'])
def handle_route(self, command):
@@ -706,16 +709,17 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta):
def _v2_common(self, cfg):
LOG.debug('v2_common: handling config:\n%s', cfg)
- if 'nameservers' in cfg:
- search = cfg.get('nameservers').get('search', [])
- dns = cfg.get('nameservers').get('addresses', [])
- name_cmd = {'type': 'nameserver'}
- if len(search) > 0:
- name_cmd.update({'search': search})
- if len(dns) > 0:
- name_cmd.update({'addresses': dns})
- LOG.debug('v2(nameserver) -> v1(nameserver):\n%s', name_cmd)
- self.handle_nameserver(name_cmd)
+ for iface, dev_cfg in cfg.items():
+ if 'nameservers' in dev_cfg:
+ search = dev_cfg.get('nameservers').get('search', [])
+ dns = dev_cfg.get('nameservers').get('addresses', [])
+ name_cmd = {'type': 'nameserver'}
+ if len(search) > 0:
+ name_cmd.update({'search': search})
+ if len(dns) > 0:
+ name_cmd.update({'address': dns})
+ self.handle_nameserver(name_cmd)
+ self._handle_individual_nameserver(name_cmd, iface)
def _handle_bond_bridge(self, command, cmd_type=None):
"""Common handler for bond and bridge types"""
@@ -1052,4 +1056,31 @@ def mask_and_ipv4_to_bcast_addr(mask, ip):
return bcast_str
+def parse_net_config_data(net_config, skip_broken=True) -> NetworkState:
+ """Parses the config, returns NetworkState object
+
+ :param net_config: curtin network config dict
+ """
+ state = None
+ version = net_config.get('version')
+ config = net_config.get('config')
+ if version == 2:
+ # v2 does not have explicit 'config' key so we
+ # pass the whole net-config as-is
+ config = net_config
+
+ if version and config is not None:
+ nsi = NetworkStateInterpreter(version=version, config=config)
+ nsi.parse_config(skip_broken=skip_broken)
+ state = nsi.get_network_state()
+
+ if not state:
+ raise RuntimeError(
+ "No valid network_state object created from network config. "
+ "Did you specify the correct version?"
+ )
+
+ return state
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py
new file mode 100644
index 00000000..2dffce59
--- /dev/null
+++ b/cloudinit/net/networkd.py
@@ -0,0 +1,259 @@
+#!/usr/bin/env python3
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2021 VMware Inc.
+#
+# Author: Shreenidhi Shedi <yesshedi@gmail.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import os
+
+
+from . import renderer
+from cloudinit import util
+from cloudinit import subp
+from cloudinit import log as logging
+from collections import OrderedDict
+
+LOG = logging.getLogger(__name__)
+
+
+class CfgParser:
+ def __init__(self):
+ self.conf_dict = OrderedDict({
+ 'Match': [],
+ 'Link': [],
+ 'Network': [],
+ 'DHCPv4': [],
+ 'DHCPv6': [],
+ 'Address': [],
+ 'Route': [],
+ })
+
+ def update_section(self, sec, key, val):
+ for k in self.conf_dict.keys():
+ if k == sec:
+ self.conf_dict[k].append(key+'='+str(val))
+ # remove duplicates from list
+ self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k]))
+ self.conf_dict[k].sort()
+
+ def get_final_conf(self):
+ contents = ''
+ for k, v in self.conf_dict.items():
+ if not v:
+ continue
+ contents += '['+k+']\n'
+ for e in v:
+ contents += e + '\n'
+ contents += '\n'
+
+ return contents
+
+ def dump_data(self, target_fn):
+ if not target_fn:
+ LOG.warning('Target file not given')
+ return
+
+ contents = self.get_final_conf()
+ LOG.debug('Final content: %s', contents)
+ util.write_file(target_fn, contents)
+
+
+class Renderer(renderer.Renderer):
+ """
+ Renders network information in /etc/systemd/network
+
+ This Renderer is currently experimental and doesn't support all the
+ use cases supported by the other renderers yet.
+ """
+
+ def __init__(self, config=None):
+ if not config:
+ config = {}
+ self.resolved_conf = config.get('resolved_conf_fn',
+ '/etc/systemd/resolved.conf')
+ self.network_conf_dir = config.get('network_conf_dir',
+ '/etc/systemd/network/')
+
+ def generate_match_section(self, iface, cfg):
+ sec = 'Match'
+ match_dict = {
+ 'name': 'Name',
+ 'driver': 'Driver',
+ 'mac_address': 'MACAddress'
+ }
+
+ if not iface:
+ return
+
+ for k, v in match_dict.items():
+ if k in iface and iface[k]:
+ cfg.update_section(sec, v, iface[k])
+
+ return iface['name']
+
+ def generate_link_section(self, iface, cfg):
+ sec = 'Link'
+
+ if not iface:
+ return
+
+ if 'mtu' in iface and iface['mtu']:
+ cfg.update_section(sec, 'MTUBytes', iface['mtu'])
+
+ def parse_routes(self, conf, cfg):
+ sec = 'Route'
+ route_cfg_map = {
+ 'gateway': 'Gateway',
+ 'network': 'Destination',
+ 'metric': 'Metric',
+ }
+
+ # prefix is derived using netmask by network_state
+ prefix = ''
+ if 'prefix' in conf:
+ prefix = '/' + str(conf['prefix'])
+
+ for k, v in conf.items():
+ if k not in route_cfg_map:
+ continue
+ if k == 'network':
+ v += prefix
+ cfg.update_section(sec, route_cfg_map[k], v)
+
+ def parse_subnets(self, iface, cfg):
+ dhcp = 'no'
+ sec = 'Network'
+ for e in iface.get('subnets', []):
+ t = e['type']
+ if t == 'dhcp4' or t == 'dhcp':
+ if dhcp == 'no':
+ dhcp = 'ipv4'
+ elif dhcp == 'ipv6':
+ dhcp = 'yes'
+ elif t == 'dhcp6':
+ if dhcp == 'no':
+ dhcp = 'ipv6'
+ elif dhcp == 'ipv4':
+ dhcp = 'yes'
+ if 'routes' in e and e['routes']:
+ for i in e['routes']:
+ self.parse_routes(i, cfg)
+ if 'address' in e:
+ subnet_cfg_map = {
+ 'address': 'Address',
+ 'gateway': 'Gateway',
+ 'dns_nameservers': 'DNS',
+ 'dns_search': 'Domains',
+ }
+ for k, v in e.items():
+ if k == 'address':
+ if 'prefix' in e:
+ v += '/' + str(e['prefix'])
+ cfg.update_section('Address', subnet_cfg_map[k], v)
+ elif k == 'gateway':
+ cfg.update_section('Route', subnet_cfg_map[k], v)
+ elif k == 'dns_nameservers' or k == 'dns_search':
+ cfg.update_section(sec, subnet_cfg_map[k], ' '.join(v))
+
+ cfg.update_section(sec, 'DHCP', dhcp)
+
+ # This is to accommodate extra keys present in VMware config
+ def dhcp_domain(self, d, cfg):
+ for item in ['dhcp4domain', 'dhcp6domain']:
+ if item not in d:
+ continue
+ ret = str(d[item]).casefold()
+ try:
+ ret = util.translate_bool(ret)
+ ret = 'yes' if ret else 'no'
+ except ValueError:
+ if ret != 'route':
+ LOG.warning('Invalid dhcp4domain value - %s', ret)
+ ret = 'no'
+ if item == 'dhcp4domain':
+ section = 'DHCPv4'
+ else:
+ section = 'DHCPv6'
+ cfg.update_section(section, 'UseDomains', ret)
+
+ def parse_dns(self, iface, cfg, ns):
+ sec = 'Network'
+
+ dns_cfg_map = {
+ 'search': 'Domains',
+ 'nameservers': 'DNS',
+ 'addresses': 'DNS',
+ }
+
+ dns = iface.get('dns')
+ if not dns and ns.version == 1:
+ dns = {
+ 'search': ns.dns_searchdomains,
+ 'nameservers': ns.dns_nameservers,
+ }
+ elif not dns and ns.version == 2:
+ return
+
+ for k, v in dns_cfg_map.items():
+ if k in dns and dns[k]:
+ cfg.update_section(sec, v, ' '.join(dns[k]))
+
+ def create_network_file(self, link, conf, nwk_dir):
+ net_fn_owner = 'systemd-network'
+
+ LOG.debug('Setting Networking Config for %s', link)
+
+ net_fn = nwk_dir + '10-cloud-init-' + link + '.network'
+ util.write_file(net_fn, conf)
+ util.chownbyname(net_fn, net_fn_owner, net_fn_owner)
+
+ def render_network_state(self, network_state, templates=None, target=None):
+ fp_nwkd = self.network_conf_dir
+ if target:
+ fp_nwkd = subp.target_path(target) + fp_nwkd
+
+ util.ensure_dir(os.path.dirname(fp_nwkd))
+
+ ret_dict = self._render_content(network_state)
+ for k, v in ret_dict.items():
+ self.create_network_file(k, v, fp_nwkd)
+
+ def _render_content(self, ns):
+ ret_dict = {}
+ for iface in ns.iter_interfaces():
+ cfg = CfgParser()
+
+ link = self.generate_match_section(iface, cfg)
+ self.generate_link_section(iface, cfg)
+ self.parse_subnets(iface, cfg)
+ self.parse_dns(iface, cfg, ns)
+
+ for route in ns.iter_routes():
+ self.parse_routes(route, cfg)
+
+ if ns.version == 2:
+ name = iface['name']
+ # network state doesn't give dhcp domain info
+ # using ns.config as a workaround here
+ self.dhcp_domain(ns.config['ethernets'][name], cfg)
+
+ ret_dict.update({link: cfg.get_final_conf()})
+
+ return ret_dict
+
+
+def available(target=None):
+ expected = ['systemctl']
+ search = ['/usr/bin', '/bin']
+ for p in expected:
+ if not subp.which(p, search=search, target=target):
+ return False
+ return True
+
+
+def network_state_to_networkd(ns):
+ renderer = Renderer({})
+ return renderer._render_content(ns)
diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py
index 166d77e6..d87d8a4f 100644
--- a/cloudinit/net/openbsd.py
+++ b/cloudinit/net/openbsd.py
@@ -18,7 +18,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
content = 'dhcp\n'
elif isinstance(v, dict):
try:
- content = "inet {address} {netmask}\n".format(
+ content = "inet {address} {netmask}".format(
address=v['address'],
netmask=v['netmask']
)
@@ -26,12 +26,19 @@ class Renderer(cloudinit.net.bsd.BSDRenderer):
LOG.error(
"Invalid static configuration for %s",
device_name)
+ mtu = v.get("mtu")
+ if mtu:
+ content += (' mtu %d' % mtu)
+ content += "\n"
util.write_file(fn, content)
def start_services(self, run=False):
if not self._postcmds:
LOG.debug("openbsd generate postcmd disabled")
return
+ subp.subp(['pkill', 'dhclient'], capture=True, rcs=[0, 1])
+ subp.subp(['route', 'del', 'default'], capture=True, rcs=[0, 1])
+ subp.subp(['route', 'flush', 'default'], capture=True, rcs=[0, 1])
subp.subp(['sh', '/etc/netstart'], capture=True)
def set_route(self, network, netmask, gateway):
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
index 2a61a7a8..27447bc2 100644
--- a/cloudinit/net/renderer.py
+++ b/cloudinit/net/renderer.py
@@ -28,6 +28,8 @@ filter_by_physical = filter_by_type('physical')
class Renderer(object):
+ def __init__(self, config=None):
+ pass
@staticmethod
def _render_persistent_net(network_state):
diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py
index e2de4d55..822b45de 100644
--- a/cloudinit/net/renderers.py
+++ b/cloudinit/net/renderers.py
@@ -1,9 +1,13 @@
# This file is part of cloud-init. See LICENSE file for license information.
+from typing import List, Tuple, Type
+
from . import eni
from . import freebsd
from . import netbsd
from . import netplan
+from . import networkd
+from . import renderer
from . import RendererNotFoundError
from . import openbsd
from . import sysconfig
@@ -13,15 +17,18 @@ NAME_TO_RENDERER = {
"freebsd": freebsd,
"netbsd": netbsd,
"netplan": netplan,
+ "networkd": networkd,
"openbsd": openbsd,
"sysconfig": sysconfig,
}
DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd",
- "netbsd", "openbsd"]
+ "netbsd", "openbsd", "networkd"]
-def search(priority=None, target=None, first=False):
+def search(
+ priority=None, target=None, first=False
+) -> List[Tuple[str, Type[renderer.Renderer]]]:
if priority is None:
priority = DEFAULT_PRIORITY
@@ -38,13 +45,13 @@ def search(priority=None, target=None, first=False):
if render_mod.available(target):
cur = (name, render_mod.Renderer)
if first:
- return cur
+ return [cur]
found.append(cur)
return found
-def select(priority=None, target=None):
+def select(priority=None, target=None) -> Tuple[str, Type[renderer.Renderer]]:
found = search(priority, target=target, first=True)
if not found:
if priority is None:
@@ -55,6 +62,6 @@ def select(priority=None, target=None):
raise RendererNotFoundError(
"No available network renderers found%s. Searched "
"through list: %s" % (tmsg, priority))
- return found
+ return found[0]
# vi: ts=4 expandtab
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 089b44b2..8031cd3a 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -18,8 +18,8 @@ from .network_state import (
is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES)
LOG = logging.getLogger(__name__)
+KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse']
NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf"
-KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'suse']
def _make_header(sep='#'):
@@ -931,7 +931,9 @@ class Renderer(renderer.Renderer):
netrules_path = subp.target_path(target, self.netrules_path)
util.write_file(netrules_path, netrules_content, file_mode)
if available_nm(target=target):
- enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE))
+ enable_ifcfg_rh(subp.target_path(
+ target, path=NM_CFG_FILE
+ ))
sysconfig_path = subp.target_path(target, templates.get('control'))
# Distros configuring /etc/sysconfig/network as a file e.g. Centos
@@ -978,7 +980,10 @@ def available_sysconfig(target=None):
def available_nm(target=None):
- if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)):
+ if not os.path.isfile(subp.target_path(
+ target,
+ path=NM_CFG_FILE
+ )):
return False
return True
diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py
index 6f9a02de..5ae048e2 100644
--- a/cloudinit/net/tests/test_dhcp.py
+++ b/cloudinit/net/tests/test_dhcp.py
@@ -42,6 +42,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
lease {
interface "wlp3s0";
fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
option subnet-mask 255.255.255.0;
option routers 192.168.2.1;
renew 4 2017/07/27 18:02:30;
@@ -50,6 +51,7 @@ class TestParseDHCPLeasesFile(CiTestCase):
lease {
interface "wlp3s0";
fixed-address 192.168.2.74;
+ filename "http://192.168.2.50/boot.php?mac=${netX}";
option subnet-mask 255.255.255.0;
option routers 192.168.2.1;
}
@@ -58,8 +60,10 @@ class TestParseDHCPLeasesFile(CiTestCase):
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1',
'renew': '4 2017/07/27 18:02:30',
- 'expire': '5 2017/07/28 07:08:15'},
+ 'expire': '5 2017/07/28 07:08:15',
+ 'filename': 'http://192.168.2.50/boot.php?mac=${netX}'},
{'interface': 'wlp3s0', 'fixed-address': '192.168.2.74',
+ 'filename': 'http://192.168.2.50/boot.php?mac=${netX}',
'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}]
write_file(lease_file, content)
self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file))
diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py
index 07d726e2..84e8308a 100644
--- a/cloudinit/net/tests/test_network_state.py
+++ b/cloudinit/net/tests/test_network_state.py
@@ -2,12 +2,62 @@
from unittest import mock
+import pytest
+
+from cloudinit import safeyaml
from cloudinit.net import network_state
from cloudinit.tests.helpers import CiTestCase
netstate_path = 'cloudinit.net.network_state'
+_V1_CONFIG_NAMESERVERS = """\
+network:
+ version: 1
+ config:
+ - type: nameserver
+ interface: {iface}
+ address:
+ - 192.168.1.1
+ - 8.8.8.8
+ search:
+ - spam.local
+ - type: nameserver
+ address:
+ - 192.168.1.0
+ - 4.4.4.4
+ search:
+ - eggs.local
+ - type: physical
+ name: eth0
+ mac_address: '00:11:22:33:44:55'
+ - type: physical
+ name: eth1
+ mac_address: '66:77:88:99:00:11'
+"""
+
+V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface='eth1')
+V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface='eth90')
+
+V2_CONFIG_NAMESERVERS = """\
+network:
+ version: 2
+ ethernets:
+ eth0:
+ match:
+ macaddress: '00:11:22:33:44:55'
+ nameservers:
+ search: [spam.local, eggs.local]
+ addresses: [8.8.8.8]
+ eth1:
+ match:
+ macaddress: '66:77:88:99:00:11'
+ nameservers:
+ search: [foo.local, bar.local]
+ addresses: [4.4.4.4]
+"""
+
+
class TestNetworkStateParseConfig(CiTestCase):
def setUp(self):
@@ -17,11 +67,13 @@ class TestNetworkStateParseConfig(CiTestCase):
def test_missing_version_returns_none(self):
ncfg = {}
- self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
def test_unknown_versions_returns_none(self):
ncfg = {'version': 13.2}
- self.assertEqual(None, network_state.parse_net_config_data(ncfg))
+ with self.assertRaises(RuntimeError):
+ network_state.parse_net_config_data(ncfg)
def test_version_2_passes_self_as_config(self):
ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]}
@@ -55,4 +107,57 @@ class TestNetworkStateParseConfigV2(CiTestCase):
self.assertEqual(ncfg, nsi.as_dict()['config'])
+class TestNetworkStateParseNameservers:
+ def _parse_network_state_from_config(self, config):
+ yaml = safeyaml.load(config)
+ return network_state.parse_net_config_data(yaml['network'])
+
+ def test_v1_nameservers_valid(self):
+ config = self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_VALID)
+
+ # If an interface was specified, DNS shouldn't be in the global list
+ assert ['192.168.1.0', '4.4.4.4'] == sorted(
+ config.dns_nameservers)
+ assert ['eggs.local'] == config.dns_searchdomains
+
+ # If an interface was specified, DNS should be part of the interface
+ for iface in config.iter_interfaces():
+ if iface['name'] == 'eth1':
+ assert iface['dns']['addresses'] == ['192.168.1.1', '8.8.8.8']
+ assert iface['dns']['search'] == ['spam.local']
+ else:
+ assert 'dns' not in iface
+
+ def test_v1_nameservers_invalid(self):
+ with pytest.raises(ValueError):
+ self._parse_network_state_from_config(
+ V1_CONFIG_NAMESERVERS_INVALID)
+
+ def test_v2_nameservers(self):
+ config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS)
+
+ # Ensure DNS defined on interface exists on interface
+ for iface in config.iter_interfaces():
+ if iface['name'] == 'eth0':
+ assert iface['dns'] == {
+ 'nameservers': ['8.8.8.8'],
+ 'search': ['spam.local', 'eggs.local'],
+ }
+ else:
+ assert iface['dns'] == {
+ 'nameservers': ['4.4.4.4'],
+ 'search': ['foo.local', 'bar.local']
+ }
+
+ # Ensure DNS defined on interface also exists globally (since there
+ # is no global DNS definitions in v2)
+ assert ['4.4.4.4', '8.8.8.8'] == sorted(config.dns_nameservers)
+ assert [
+ 'bar.local',
+ 'eggs.local',
+ 'foo.local',
+ 'spam.local',
+ ] == sorted(config.dns_searchdomains)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index b8677c8b..9afad747 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -165,7 +165,15 @@ class ReportEventStack(object):
:param result_on_exception:
The result value to set if an exception is caught. default
value is FAIL.
+
+ :param post_files:
+ Can hold filepaths of files that are to get posted/created
+ regarding a given event. Something like success or failure information
+ in a given log file. For each filepath, if it's a valid regular file
+ it will get: read & encoded as base64 at the close of the event.
+ Default value, if None, is an empty list.
"""
+
def __init__(self, name, description, message=None, parent=None,
reporting_enabled=None, result_on_exception=status.FAIL,
post_files=None):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 2f3390c3..dcdf9f8f 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -22,7 +22,7 @@ import requests
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
from cloudinit import sources
@@ -338,6 +338,13 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
class DataSourceAzure(sources.DataSource):
dsname = 'Azure'
+ # Regenerate network config new_instance boot and every boot
+ default_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }}
+
_negotiated = False
_metadata_imds = sources.UNSET
_ci_pkl_version = 1
@@ -352,8 +359,6 @@ class DataSourceAzure(sources.DataSource):
BUILTIN_DS_CONFIG])
self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
self._network_config = None
- # Regenerate network config new_instance boot and every boot
- self.update_events['network'].add(EventType.BOOT)
self._ephemeral_dhcp_ctx = None
self.failed_desired_api_version = False
self.iso_dev = None
@@ -2309,8 +2314,8 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
LOG.info(
'Removing Ubuntu extended network scripts because'
' cloud-init updates Azure network configuration on the'
- ' following event: %s.',
- EventType.BOOT)
+ ' following events: %s.',
+ [EventType.BOOT.value, EventType.BOOT_LEGACY.value])
logged = True
if os.path.isdir(path):
util.del_dir(path)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 62756cf7..19c8d126 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -12,9 +12,8 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
-
+from cloudinit.event import EventScope, EventType
from cloudinit.net import eni
-
from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform
from cloudinit.sources.helpers import openstack
@@ -37,6 +36,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
dsname = 'ConfigDrive'
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }}
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5040ce5b..08805d99 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -54,7 +54,7 @@ class DataSourceDigitalOcean(sources.DataSource):
if not is_do:
return False
- LOG.info("Running on digital ocean. droplet_id=%s", droplet_id)
+ LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id)
ipv4LL_nic = None
if self.use_ip4LL:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index a2105dc7..700437b0 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -8,6 +8,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
import time
@@ -20,7 +21,7 @@ from cloudinit import sources
from cloudinit import url_helper as uhelp
from cloudinit import util
from cloudinit import warnings
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
@@ -75,6 +76,13 @@ class DataSourceEc2(sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }}
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -426,7 +434,12 @@ class DataSourceEc2(sources.DataSource):
# Non-VPC (aka Classic) Ec2 instances need to rewrite the
# network config file every boot due to MAC address change.
if self.is_classic_instance():
- self.update_events['network'].add(EventType.BOOT)
+ self.default_update_events = copy.deepcopy(
+ self.default_update_events)
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT)
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT_LEGACY)
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index bbeada0b..9e83dccc 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -98,10 +98,21 @@ class DataSourceOVF(sources.DataSource):
found.append(seed)
elif system_type and 'vmware' in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
+ allow_vmware_cust = False
+ allow_raw_data = False
if not self.vmware_customization_supported:
LOG.debug("Skipping the check for "
"VMware Customization support")
else:
+ allow_vmware_cust = not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True)
+ allow_raw_data = util.get_cfg_option_bool(
+ self.ds_cfg, "allow_raw_data", True)
+
+ if not (allow_vmware_cust or allow_raw_data):
+ LOG.debug(
+ "Customization for VMware platform is disabled.")
+ else:
search_paths = (
"/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
"/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")
@@ -148,19 +159,21 @@ class DataSourceOVF(sources.DataSource):
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
self._vmware_cust_conf)
- else:
- LOG.debug("Did not find VMware Customization Config File")
-
- # Honor disable_vmware_customization setting on metadata absent
- if not md_path:
- if util.get_cfg_option_bool(self.sys_cfg,
- "disable_vmware_customization",
- True):
+ # Don't handle the customization for below 2 cases:
+ # 1. meta data is found, allow_raw_data is False.
+ # 2. no meta data is found, allow_vmware_cust is False.
+ if md_path and not allow_raw_data:
LOG.debug(
- "Customization for VMware platform is disabled.")
+ "Customization using raw data is disabled.")
# reset vmwareImcConfigFilePath to None to avoid
# customization for VMware platform
vmwareImcConfigFilePath = None
+ if md_path is None and not allow_vmware_cust:
+ LOG.debug(
+ "Customization using VMware config is disabled.")
+ vmwareImcConfigFilePath = None
+ else:
+ LOG.debug("Did not find VMware Customization Config File")
use_raw_data = bool(vmwareImcConfigFilePath and md_path)
if use_raw_data:
@@ -429,7 +442,7 @@ def get_max_wait_from_cfg(cfg):
LOG.warning("Failed to get '%s', using %s",
max_wait_cfg_option, default_max_wait)
- if max_wait <= 0:
+ if max_wait < 0:
LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
max_wait, max_wait_cfg_option, default_max_wait)
max_wait = default_max_wait
@@ -440,6 +453,8 @@ def get_max_wait_from_cfg(cfg):
def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
dirpath="/var/run/vmware-imc"):
waited = 0
+ if maxwait <= naplen:
+ naplen = 1
while waited < maxwait:
fileFullPath = os.path.join(dirpath, filename)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 619a171e..a85b71d7 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -8,11 +8,11 @@ import time
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
-
+from cloudinit.event import EventScope, EventType
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceOracle as oracle
@@ -46,6 +46,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG
+ }}
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 0b8994bf..bb69e998 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -17,7 +17,7 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
ETC_HOSTS = '/etc/hosts'
@@ -206,10 +206,11 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
dsname = "RbxCloud"
- update_events = {'network': [
+ default_update_events = {EventScope.NETWORK: {
EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT
- ]}
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }}
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 41be7665..7b8974a2 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -31,8 +31,8 @@ from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
from cloudinit import net
+from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
@@ -172,7 +172,13 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
dsname = "Scaleway"
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index fd292baa..9b16bf8d 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -36,7 +36,7 @@ from cloudinit import serial
from cloudinit import sources
from cloudinit import subp
from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
@@ -170,6 +170,11 @@ class DataSourceSmartOS(sources.DataSource):
smartos_type = sources.UNSET
md_client = sources.UNSET
+ default_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY
+ }}
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -181,7 +186,6 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = {}
self.network_data = None
self._network_config = None
- self.update_events['network'].add(EventType.BOOT)
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7d74f8d9..bf6bf139 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,6 +13,7 @@ import copy
import json
import os
from collections import namedtuple
+from typing import Dict, List
from cloudinit import dmi
from cloudinit import importer
@@ -22,7 +23,8 @@ from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.atomic_helper import write_json
-from cloudinit.event import EventType
+from cloudinit.distros import Distro
+from cloudinit.event import EventScope, EventType
from cloudinit.filters import launch_index
from cloudinit.persistence import CloudInitPickleMixin
from cloudinit.reporting import events
@@ -74,6 +76,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource',
_NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+class DatasourceUnpickleUserDataError(Exception):
+ """Raised when userdata is unable to be unpickled due to python upgrades"""
+
+
class DataSourceNotFoundException(Exception):
pass
@@ -175,12 +181,23 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
- # network configuration on metadata changes.
- # A datasource which supports writing network config on each system boot
- # would call update_events['network'].add(EventType.BOOT).
+ # network configuration on metadata changes. These are defined in
+ # `supported_network_events`.
+ # The datasource also defines a set of default EventTypes that the
+ # datasource can react to. These are the event types that will be used
+ # if not overridden by the user.
+ # A datasource requiring to write network config on each system boot
+ # would call default_update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
+ supported_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }}
+ default_update_events = {EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ }}
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
@@ -199,7 +216,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
_ci_pkl_version = 1
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
self.paths = paths
@@ -227,6 +244,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self.vendordata2 = None
if not hasattr(self, 'vendordata2_raw'):
self.vendordata2_raw = None
+ if hasattr(self, 'userdata') and self.userdata is not None:
+ # If userdata stores MIME data, on < python3.6 it will be
+ # missing the 'policy' attribute that exists on >=python3.6.
+ # Calling str() on the userdata will attempt to access this
+ # policy attribute. This will raise an exception, causing
+ # the pickle load to fail, so cloud-init will discard the cache
+ try:
+ str(self.userdata)
+ except AttributeError as e:
+ LOG.debug(
+ "Unable to unpickle datasource: %s."
+ " Ignoring current cache.", e
+ )
+ raise DatasourceUnpickleUserDataError() from e
def __str__(self):
return type_utils.obj_name(self)
@@ -648,10 +679,12 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
- def update_metadata(self, source_event_types):
+ def update_metadata_if_supported(
+ self, source_event_types: List[EventType]
+ ) -> bool:
"""Refresh cached metadata if the datasource supports this event.
- The datasource has a list of update_events which
+ The datasource has a list of supported_update_events which
trigger refreshing all cached metadata as well as refreshing the
network configuration.
@@ -661,9 +694,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@return True if the datasource did successfully update cached metadata
due to source_event_type.
"""
- supported_events = {}
+ supported_events = {} # type: Dict[EventScope, set]
for event in source_event_types:
- for update_scope, update_events in self.update_events.items():
+ for update_scope, update_events in self.supported_update_events.items(): # noqa: E501
if event in update_events:
if not supported_events.get(update_scope):
supported_events[update_scope] = set()
@@ -671,7 +704,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
- scope, ', '.join(matched_events))
+ scope.value,
+ ', '.join([event.value for event in matched_events]))
# Each datasource has a cached config property which needs clearing
# Once cleared that config property will be regenerated from
# current metadata.
@@ -682,7 +716,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if result:
return True
LOG.debug("Datasource %s not updated for events: %s", self,
- ', '.join(source_event_types))
+ ', '.join([event.value for event in source_event_types]))
return False
def check_instance_id(self, sys_cfg):
@@ -789,7 +823,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
- if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
+ if s.update_metadata_if_supported(
+ [EventType.BOOT_NEW_INSTANCE]
+ ):
myrep.message = "found %s data from %s" % (mode, name)
return (s, type_utils.obj_name(cls))
except Exception:
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index 1420a988..a2b052a6 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -5,7 +5,7 @@ import inspect
import os
import stat
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit.helpers import Paths
from cloudinit import importer
from cloudinit.sources import (
@@ -618,24 +618,29 @@ class TestDataSource(CiTestCase):
self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
self.assertEqual('updated', self.datasource.myattr)
+ @mock.patch.dict(DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ @mock.patch.dict(DataSource.supported_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
def test_update_metadata_only_acts_on_supported_update_events(self):
- """update_metadata won't get_data on unsupported update events."""
- self.datasource.update_events['network'].discard(EventType.BOOT)
+ """update_metadata_if_supported wont get_data on unsupported events."""
self.assertEqual(
- {'network': set([EventType.BOOT_NEW_INSTANCE])},
- self.datasource.update_events)
+ {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])},
+ self.datasource.default_update_events
+ )
def fake_get_data():
raise Exception('get_data should not be called')
self.datasource.get_data = fake_get_data
self.assertFalse(
- self.datasource.update_metadata(
+ self.datasource.update_metadata_if_supported(
source_event_types=[EventType.BOOT]))
+ @mock.patch.dict(DataSource.supported_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
def test_update_metadata_returns_true_on_supported_update_event(self):
- """update_metadata returns get_data response on supported events."""
-
+ """update_metadata_if_supported returns get_data on supported events"""
def fake_get_data():
return True
@@ -643,14 +648,16 @@ class TestDataSource(CiTestCase):
self.datasource._network_config = 'something'
self.datasource._dirty_cache = True
self.assertTrue(
- self.datasource.update_metadata(
+ self.datasource.update_metadata_if_supported(
source_event_types=[
EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
self.assertEqual(UNSET, self.datasource._network_config)
+
self.assertIn(
"DEBUG: Update datasource metadata and network config due to"
- " events: New instance first boot",
- self.logs.getvalue())
+ " events: boot-new-instance",
+ self.logs.getvalue()
+ )
class TestRedactSensitiveData(CiTestCase):
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index c08042d6..89057262 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -252,13 +252,15 @@ def render_authorizedkeysfile_paths(value, homedir, username):
def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
(ssh_dir, pw_ent) = users_ssh_info(username)
default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys')
+ user_authorizedkeys_file = default_authorizedkeys_file
auth_key_fns = []
with util.SeLinuxGuard(ssh_dir, recursive=True):
try:
ssh_cfg = parse_ssh_config_map(sshd_cfg_file)
+ key_paths = ssh_cfg.get("authorizedkeysfile",
+ "%h/.ssh/authorized_keys")
auth_key_fns = render_authorizedkeysfile_paths(
- ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"),
- pw_ent.pw_dir, username)
+ key_paths, pw_ent.pw_dir, username)
except (IOError, OSError):
# Give up and use a default key filename
@@ -267,8 +269,22 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG):
"config from %r, using 'AuthorizedKeysFile' file "
"%r instead", DEF_SSHD_CFG, auth_key_fns[0])
+ # check if one of the keys is the user's one
+ for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns):
+ if any([
+ '%u' in key_path,
+ '%h' in key_path,
+ auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir))
+ ]):
+ user_authorizedkeys_file = auth_key_fn
+
+ if user_authorizedkeys_file != default_authorizedkeys_file:
+ LOG.debug(
+ "AuthorizedKeysFile has an user-specific authorized_keys, "
+ "using %s", user_authorizedkeys_file)
+
# always store all the keys in the user's private file
- return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns))
+ return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns))
def setup_user_keys(keys, username, options=None):
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 5bacc85d..bc164fa0 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -8,9 +8,11 @@ import copy
import os
import pickle
import sys
+from collections import namedtuple
+from typing import Dict, Set
from cloudinit.settings import (
- FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG)
+ FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG)
from cloudinit import handlers
@@ -21,7 +23,11 @@ from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler
from cloudinit.handlers.shell_script import ShellScriptPartHandler
from cloudinit.handlers.upstart_job import UpstartJobPartHandler
-from cloudinit.event import EventType
+from cloudinit.event import (
+ EventScope,
+ EventType,
+ userdata_to_events,
+)
from cloudinit.sources import NetworkConfigSource
from cloudinit import cloud
@@ -118,6 +124,7 @@ class Init(object):
def _initial_subdirs(self):
c_dir = self.paths.cloud_dir
+ run_dir = self.paths.run_dir
initial_dirs = [
c_dir,
os.path.join(c_dir, 'scripts'),
@@ -130,6 +137,7 @@ class Init(object):
os.path.join(c_dir, 'handlers'),
os.path.join(c_dir, 'sem'),
os.path.join(c_dir, 'data'),
+ os.path.join(run_dir, 'sem'),
]
return initial_dirs
@@ -148,7 +156,7 @@ class Init(object):
util.ensure_dirs(self._initial_subdirs())
log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
if log_file:
- util.ensure_file(log_file, preserve_mode=True)
+ util.ensure_file(log_file, mode=0o640, preserve_mode=True)
perms = self.cfg.get('syslog_fix_perms')
if not perms:
perms = {}
@@ -233,7 +241,7 @@ class Init(object):
else:
return (None, "cache invalid in datasource: %s" % ds)
- def _get_data_source(self, existing):
+ def _get_data_source(self, existing) -> sources.DataSource:
if self.datasource is not NULL_DATA_SOURCE:
return self.datasource
@@ -259,7 +267,7 @@ class Init(object):
cfg_list,
pkg_list, self.reporter)
LOG.info("Loaded datasource %s - %s", dsname, ds)
- self.datasource = ds
+ self.datasource = ds # type: sources.DataSource
# Ensure we adjust our path members datasource
# now that we have one (thus allowing ipath to be used)
self._reset()
@@ -341,6 +349,11 @@ class Init(object):
return self._previous_iid
def is_new_instance(self):
+ """Return true if this is a new instance.
+
+ If datasource has already been initialized, this will return False,
+ even on first boot.
+ """
previous = self.previous_iid()
ret = (previous == NO_PREVIOUS_INSTANCE_ID or
previous != self.datasource.get_instance_id())
@@ -702,6 +715,46 @@ class Init(object):
return (self.distro.generate_fallback_config(),
NetworkConfigSource.fallback)
+ def update_event_enabled(
+ self, event_source_type: EventType, scope: EventScope = None
+ ) -> bool:
+ """Determine if a particular EventType is enabled.
+
+ For the `event_source_type` passed in, check whether this EventType
+ is enabled in the `updates` section of the userdata. If `updates`
+ is not enabled in userdata, check if defined as one of the
+ `default_events` on the datasource. `scope` may be used to
+ narrow the check to a particular `EventScope`.
+
+ Note that on first boot, userdata may NOT be available yet. In this
+ case, we only have the data source's `default_update_events`,
+ so an event that should be enabled in userdata may be denied.
+ """
+ default_events = self.datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ user_events = userdata_to_events(self.cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501
+ # A value in the first will override a value in the second
+ allowed = util.mergemanydict([
+ copy.deepcopy(user_events),
+ copy.deepcopy(default_events),
+ ])
+ LOG.debug('Allowed events: %s', allowed)
+
+ if not scope:
+ scopes = allowed.keys()
+ else:
+ scopes = [scope]
+ scope_values = [s.value for s in scopes]
+
+ for evt_scope in scopes:
+ if event_source_type in allowed.get(evt_scope, []):
+ LOG.debug('Event Allowed: scope=%s EventType=%s',
+ evt_scope.value, event_source_type)
+ return True
+
+ LOG.debug('Event Denied: scopes=%s EventType=%s',
+ scope_values, event_source_type)
+ return False
+
def _apply_netcfg_names(self, netcfg):
try:
LOG.debug("applying net config names for %s", netcfg)
@@ -709,27 +762,51 @@ class Init(object):
except Exception as e:
LOG.warning("Failed to rename devices: %s", e)
+ def _get_per_boot_network_semaphore(self):
+ return namedtuple('Semaphore', 'semaphore args')(
+ helpers.FileSemaphores(self.paths.get_runpath('sem')),
+ ('apply_network_config', PER_ONCE)
+ )
+
+ def _network_already_configured(self) -> bool:
+ sem = self._get_per_boot_network_semaphore()
+ return sem.semaphore.has_run(*sem.args)
+
def apply_network_config(self, bring_up):
- # get a network config
+ """Apply the network config.
+
+ Find the config, determine whether to apply it, apply it via
+ the distro, and optionally bring it up
+ """
netcfg, src = self._find_networking_config()
if netcfg is None:
LOG.info("network config is disabled by %s", src)
return
- # request an update if needed/available
- if self.datasource is not NULL_DATA_SOURCE:
- if not self.is_new_instance():
- if not self.datasource.update_metadata([EventType.BOOT]):
- LOG.debug(
- "No network config applied. Neither a new instance"
- " nor datasource network update on '%s' event",
- EventType.BOOT)
- # nothing new, but ensure proper names
- self._apply_netcfg_names(netcfg)
- return
- else:
- # refresh netcfg after update
- netcfg, src = self._find_networking_config()
+ def event_enabled_and_metadata_updated(event_type):
+ return self.update_event_enabled(
+ event_type, scope=EventScope.NETWORK
+ ) and self.datasource.update_metadata_if_supported([event_type])
+
+ def should_run_on_boot_event():
+ return (not self._network_already_configured() and
+ event_enabled_and_metadata_updated(EventType.BOOT))
+
+ if (
+ self.datasource is not NULL_DATA_SOURCE and
+ not self.is_new_instance() and
+ not should_run_on_boot_event() and
+ not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY)
+ ):
+ LOG.debug(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed")
+ # nothing new, but ensure proper names
+ self._apply_netcfg_names(netcfg)
+ return
+
+ # refresh netcfg after update
+ netcfg, src = self._find_networking_config()
# ensure all physical devices in config are present
self.distro.networking.wait_for_physdevs(netcfg)
@@ -740,8 +817,12 @@ class Init(object):
# rendering config
LOG.info("Applying network configuration from %s bringup=%s: %s",
src, bring_up, netcfg)
+
+ sem = self._get_per_boot_network_semaphore()
try:
- return self.distro.apply_network_config(netcfg, bring_up=bring_up)
+ with sem.semaphore.lock(*sem.args):
+ return self.distro.apply_network_config(
+ netcfg, bring_up=bring_up)
except net.RendererNotFoundError as e:
LOG.error("Unable to render networking. Network config is "
"likely broken: %s", e)
@@ -989,6 +1070,8 @@ def _pkl_load(fname):
return None
try:
return pickle.loads(pickle_contents)
+ except sources.DatasourceUnpickleUserDataError:
+ return None
except Exception:
util.logexc(LOG, "Failed loading pickled blob from %s", fname)
return None
diff --git a/cloudinit/tests/.test_util.py.swp b/cloudinit/tests/.test_util.py.swp
new file mode 100644
index 00000000..78ef5865
--- /dev/null
+++ b/cloudinit/tests/.test_util.py.swp
Binary files differ
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 58f63b69..ccd56793 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -171,7 +171,7 @@ class CiTestCase(TestCase):
if self.with_logs:
# Remove the handler we setup
logging.getLogger().handlers = self.old_handlers
- logging.getLogger().level = None
+ logging.getLogger().setLevel(logging.NOTSET)
subp.subp = _real_subp
super(CiTestCase, self).tearDown()
@@ -360,6 +360,9 @@ class HttprettyTestCase(CiTestCase):
httpretty.HTTPretty.allow_net_connect = False
httpretty.reset()
httpretty.enable()
+ # Stop the logging from HttpPretty so our logs don't get mixed
+ # up with its logs
+ logging.getLogger('httpretty.core').setLevel(logging.CRITICAL)
def tearDown(self):
httpretty.disable()
diff --git a/cloudinit/tests/test_event.py b/cloudinit/tests/test_event.py
new file mode 100644
index 00000000..3da4c70c
--- /dev/null
+++ b/cloudinit/tests/test_event.py
@@ -0,0 +1,26 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Tests related to cloudinit.event module."""
+from cloudinit.event import EventType, EventScope, userdata_to_events
+
+
+class TestEvent:
+ def test_userdata_to_events(self):
+ userdata = {'network': {'when': ['boot']}}
+ expected = {EventScope.NETWORK: {EventType.BOOT}}
+ assert expected == userdata_to_events(userdata)
+
+ def test_invalid_scope(self, caplog):
+ userdata = {'networkasdfasdf': {'when': ['boot']}}
+ userdata_to_events(userdata)
+ assert (
+ "'networkasdfasdf' is not a valid EventScope! Update data "
+ "will be ignored for 'networkasdfasdf' scope"
+ ) in caplog.text
+
+ def test_invalid_event(self, caplog):
+ userdata = {'network': {'when': ['bootasdfasdf']}}
+ userdata_to_events(userdata)
+ assert (
+ "'bootasdfasdf' is not a valid EventType! Update data "
+ "will be ignored for 'network' scope"
+ ) in caplog.text
diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py
index d2d1b37f..a50836a4 100644
--- a/cloudinit/tests/test_stages.py
+++ b/cloudinit/tests/test_stages.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
"""Tests related to cloudinit.stages module."""
-
import os
import stat
@@ -11,7 +10,7 @@ from cloudinit import stages
from cloudinit import sources
from cloudinit.sources import NetworkConfigSource
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit.util import write_file
from cloudinit.tests.helpers import CiTestCase, mock
@@ -52,6 +51,8 @@ class TestInit(CiTestCase):
'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir,
'run_dir': self.tmpdir}}}
self.init.datasource = FakeDataSource(paths=self.init.paths)
+ self._real_is_new_instance = self.init.is_new_instance
+ self.init.is_new_instance = mock.Mock(return_value=True)
def test_wb__find_networking_config_disabled(self):
"""find_networking_config returns no config when disabled."""
@@ -291,6 +292,7 @@ class TestInit(CiTestCase):
m_macs.return_value = {'42:42:42:42:42:42': 'eth9'}
self.init._find_networking_config = fake_network_config
+
self.init.apply_network_config(True)
self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_called_with(
@@ -299,6 +301,7 @@ class TestInit(CiTestCase):
@mock.patch('cloudinit.distros.ubuntu.Distro')
def test_apply_network_on_same_instance_id(self, m_ubuntu):
"""Only call distro.apply_network_config_names on same instance id."""
+ self.init.is_new_instance = self._real_is_new_instance
old_instance_id = os.path.join(
self.init.paths.get_cpath('data'), 'instance-id')
write_file(old_instance_id, TEST_INSTANCE_ID)
@@ -311,18 +314,19 @@ class TestInit(CiTestCase):
return net_cfg, NetworkConfigSource.fallback
self.init._find_networking_config = fake_network_config
+
self.init.apply_network_config(True)
self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
self.init.distro.apply_network_config.assert_not_called()
- self.assertIn(
- 'No network config applied. Neither a new instance'
- " nor datasource network update on '%s' event" % EventType.BOOT,
- self.logs.getvalue())
-
- @mock.patch('cloudinit.net.get_interfaces_by_mac')
- @mock.patch('cloudinit.distros.ubuntu.Distro')
- def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs):
- """Apply network if datasource.update_metadata permits BOOT event."""
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed"
+ ) in self.logs.getvalue()
+
+ # CiTestCase doesn't work with pytest.mark.parametrize, and moving this
+ # functionality to a separate class is more cumbersome than it'd be worth
+ # at the moment, so use this as a simple setup
+ def _apply_network_setup(self, m_macs):
old_instance_id = os.path.join(
self.init.paths.get_cpath('data'), 'instance-id')
write_file(old_instance_id, TEST_INSTANCE_ID)
@@ -338,12 +342,80 @@ class TestInit(CiTestCase):
self.init._find_networking_config = fake_network_config
self.init.datasource = FakeDataSource(paths=self.init.paths)
- self.init.datasource.update_events = {'network': [EventType.BOOT]}
+ self.init.is_new_instance = mock.Mock(return_value=False)
+ return net_cfg
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}})
+ def test_apply_network_allowed_when_default_boot(
+ self, m_ubuntu, m_macs
+ ):
+ """Apply network if datasource permits BOOT event."""
+ net_cfg = self._apply_network_setup(m_macs)
+
self.init.apply_network_config(True)
- self.init.distro.apply_network_config_names.assert_called_with(net_cfg)
+ assert mock.call(
+ net_cfg
+ ) == self.init.distro.apply_network_config_names.call_args_list[-1]
+ assert mock.call(
+ net_cfg, bring_up=True
+ ) == self.init.distro.apply_network_config.call_args_list[-1]
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ def test_apply_network_disabled_when_no_default_boot(
+ self, m_ubuntu, m_macs
+ ):
+ """Don't apply network if datasource has no BOOT event."""
+ self._apply_network_setup(m_macs)
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed"
+ ) in self.logs.getvalue()
+
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.default_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ def test_apply_network_allowed_with_userdata_overrides(
+ self, m_ubuntu, m_macs
+ ):
+ """Apply network if userdata overrides default config"""
+ net_cfg = self._apply_network_setup(m_macs)
+ self.init._cfg = {'updates': {'network': {'when': ['boot']}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config_names.assert_called_with(
+ net_cfg)
self.init.distro.apply_network_config.assert_called_with(
net_cfg, bring_up=True)
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ @mock.patch('cloudinit.distros.ubuntu.Distro')
+ @mock.patch.dict(sources.DataSource.supported_update_events, {
+ EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}})
+ def test_apply_network_disabled_when_unsupported(
+ self, m_ubuntu, m_macs
+ ):
+ """Don't apply network config if unsupported.
+
+ Shouldn't work even when specified as userdata
+ """
+ self._apply_network_setup(m_macs)
+
+ self.init._cfg = {'updates': {'network': {'when': ['boot']}}}
+ self.init.apply_network_config(True)
+ self.init.distro.apply_network_config.assert_not_called()
+ assert (
+ "No network config applied. Neither a new instance nor datasource "
+ "network update allowed"
+ ) in self.logs.getvalue()
+
class TestInit_InitializeFilesystem:
"""Tests for cloudinit.stages.Init._initialize_filesystem.
@@ -356,22 +428,20 @@ class TestInit_InitializeFilesystem:
"""A fixture which yields a stages.Init instance with paths and cfg set
As it is replaced with a mock, consumers of this fixture can set
- `init.cfg` if the default empty dict configuration is not appropriate.
+ `init._cfg` if the default empty dict configuration is not appropriate.
"""
- with mock.patch(
- "cloudinit.stages.Init.cfg", mock.PropertyMock(return_value={})
- ):
- with mock.patch("cloudinit.stages.util.ensure_dirs"):
- init = stages.Init()
- init._paths = paths
- yield init
+ with mock.patch("cloudinit.stages.util.ensure_dirs"):
+ init = stages.Init()
+ init._cfg = {}
+ init._paths = paths
+ yield init
@mock.patch("cloudinit.stages.util.ensure_file")
def test_ensure_file_not_called_if_no_log_file_configured(
self, m_ensure_file, init
):
"""If no log file is configured, we should not ensure its existence."""
- init.cfg = {}
+ init._cfg = {}
init._initialize_filesystem()
@@ -380,11 +450,13 @@ class TestInit_InitializeFilesystem:
def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir):
"""If a log file is configured, we should ensure its existence."""
log_file = tmpdir.join("cloud-init.log")
- init.cfg = {"def_log_file": str(log_file)}
+ init._cfg = {"def_log_file": str(log_file)}
init._initialize_filesystem()
- assert log_file.exists
+ assert log_file.exists()
+ # Assert we create it 0o640 by default if it doesn't already exist
+ assert 0o640 == stat.S_IMODE(log_file.stat().mode)
def test_existing_file_permissions_are_not_modified(self, init, tmpdir):
"""If the log file already exists, we should not modify its permissions
@@ -397,7 +469,7 @@ class TestInit_InitializeFilesystem:
log_file = tmpdir.join("cloud-init.log")
log_file.ensure()
log_file.chmod(mode)
- init.cfg = {"def_log_file": str(log_file)}
+ init._cfg = {"def_log_file": str(log_file)}
init._initialize_filesystem()
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index 364ec822..c3918f80 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -8,6 +8,7 @@ from cloudinit import util
from cloudinit import version
import httpretty
+import logging
import requests
@@ -81,6 +82,9 @@ class TestReadFileOrUrl(CiTestCase):
url = 'http://hostname/path'
headers = {'sensitive': 'sekret', 'server': 'blah'}
httpretty.register_uri(httpretty.GET, url)
+ # By default, httpretty will log our request along with the header,
+ # so if we don't change this the secret will show up in the logs
+ logging.getLogger('httpretty.core').setLevel(logging.CRITICAL)
read_file_or_url(url, headers=headers, headers_redact=['sensitive'])
logs = self.logs.getvalue()
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index a4c02877..a1ccb1dc 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -124,6 +124,22 @@ OS_RELEASE_ALMALINUX_8 = dedent("""\
ALMALINUX_MANTISBT_PROJECT_VERSION="8.3"
""")
+OS_RELEASE_ROCKY_8 = dedent("""\
+ NAME="Rocky Linux"
+ VERSION="8.3 (Green Obsidian)"
+ ID="rocky"
+ ID_LIKE="rhel fedora"
+ VERSION_ID="8.3"
+ PLATFORM_ID="platform:el8"
+ PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)"
+ ANSI_COLOR="0;31"
+ CPE_NAME="cpe:/o:rocky:rocky:8"
+ HOME_URL="https://rockylinux.org/"
+ BUG_REPORT_URL="https://bugs.rockylinux.org/"
+ ROCKY_SUPPORT_PRODUCT="Rocky Linux"
+ ROCKY_SUPPORT_PRODUCT_VERSION="8"
+""")
+
REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)"
REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)"
REDHAT_RELEASE_REDHAT_6 = (
@@ -132,7 +148,8 @@ REDHAT_RELEASE_REDHAT_7 = (
"Red Hat Enterprise Linux Server release 7.5 (Maipo)")
REDHAT_RELEASE_ALMALINUX_8 = (
"AlmaLinux release 8.3 (Purple Manul)")
-
+REDHAT_RELEASE_ROCKY_8 = (
+ "Rocky Linux release 8.3 (Green Obsidian)")
OS_RELEASE_DEBIAN = dedent("""\
PRETTY_NAME="Debian GNU/Linux 9 (stretch)"
@@ -160,6 +177,17 @@ OS_RELEASE_UBUNTU = dedent("""\
UBUNTU_CODENAME=xenial\n
""")
+OS_RELEASE_PHOTON = ("""\
+ NAME="VMware Photon OS"
+ VERSION="4.0"
+ ID=photon
+ VERSION_ID=4.0
+ PRETTY_NAME="VMware Photon OS/Linux"
+ ANSI_COLOR="1;34"
+ HOME_URL="https://vmware.github.io/photon/"
+ BUG_REPORT_URL="https://github.com/vmware/photon/issues"
+""")
+
class FakeCloud(object):
@@ -538,6 +566,22 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist)
@mock.patch('cloudinit.util.load_file')
+ def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from redhat-release."""
+ m_os_release.return_value = REDHAT_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists):
+ """Verify rocky linux 8 read from os-release."""
+ m_os_release.return_value = OS_RELEASE_ROCKY_8
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist)
+
+ @mock.patch('cloudinit.util.load_file')
def test_get_linux_debian(self, m_os_release, m_path_exists):
"""Verify we get the correct name and release name on Debian."""
m_os_release.return_value = OS_RELEASE_DEBIAN
@@ -576,6 +620,15 @@ class TestGetLinuxDistro(CiTestCase):
self.assertEqual(
('opensuse-tumbleweed', '20180920', platform.machine()), dist)
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_photon_os_release(self, m_os_release, m_path_exists):
+ """Verify we get the correct name and machine arch on PhotonOS"""
+ m_os_release.return_value = OS_RELEASE_PHOTON
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(
+ ('photon', '4.0', 'VMware Photon OS/Linux'), dist)
+
@mock.patch('platform.system')
@mock.patch('platform.dist', create=True)
def test_get_linux_distro_no_data(self, m_platform_dist,
diff --git a/cloudinit/util.py b/cloudinit/util.py
index fdea1181..7995c6c8 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -392,7 +392,11 @@ def is_Linux():
@lru_cache()
def is_BSD():
- return 'BSD' in platform.system()
+ if 'BSD' in platform.system():
+ return True
+ if platform.system() == 'DragonFly':
+ return True
+ return False
@lru_cache()
@@ -401,6 +405,11 @@ def is_FreeBSD():
@lru_cache()
+def is_DragonFlyBSD():
+ return system_info()['variant'] == "dragonfly"
+
+
+@lru_cache()
def is_NetBSD():
return system_info()['variant'] == "netbsd"
@@ -474,6 +483,8 @@ def get_linux_distro():
# which will include both version codename and architecture
# on all distributions.
flavor = platform.machine()
+ elif distro_name == 'photon':
+ flavor = os_release.get('PRETTY_NAME', '')
else:
flavor = os_release.get('VERSION_CODENAME', '')
if not flavor:
@@ -522,7 +533,7 @@ def system_info():
linux_dist = info['dist'][0].lower()
if linux_dist in (
'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora',
- 'rhel', 'suse'):
+ 'photon', 'rhel', 'rocky', 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
@@ -534,7 +545,9 @@ def system_info():
var = 'suse'
else:
var = 'linux'
- elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"):
+ elif system in (
+ 'windows', 'darwin', "freebsd", "netbsd",
+ "openbsd", "dragonfly"):
var = system
info['variant'] = var
@@ -1195,6 +1208,23 @@ def find_devs_with_openbsd(criteria=None, oformat='device',
return ['/dev/' + i for i in devlist]
+def find_devs_with_dragonflybsd(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
+ out, _err = subp.subp(['sysctl', '-n', 'kern.disks'], rcs=[0])
+ devlist = [i for i in sorted(out.split(), reverse=True)
+ if not i.startswith("md") and not i.startswith("vn")]
+
+ if criteria == "TYPE=iso9660":
+ devlist = [i for i in devlist
+ if i.startswith('cd') or i.startswith('acd')]
+ elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]:
+ devlist = [i for i in devlist
+ if not (i.startswith('cd') or i.startswith('acd'))]
+ elif criteria:
+ LOG.debug("Unexpected criteria: %s", criteria)
+ return ['/dev/' + i for i in devlist]
+
+
def find_devs_with(criteria=None, oformat='device',
tag=None, no_cache=False, path=None):
"""
@@ -1213,6 +1243,9 @@ def find_devs_with(criteria=None, oformat='device',
elif is_OpenBSD():
return find_devs_with_openbsd(criteria, oformat,
tag, no_cache, path)
+ elif is_DragonFlyBSD():
+ return find_devs_with_dragonflybsd(criteria, oformat,
+ tag, no_cache, path)
blk_id_cmd = ['blkid']
options = []
@@ -2211,6 +2244,14 @@ def find_freebsd_part(fs):
LOG.warning("Unexpected input in find_freebsd_part: %s", fs)
+def find_dragonflybsd_part(fs):
+ splitted = fs.split('/')
+ if len(splitted) == 3 and splitted[1] == 'dev':
+ return splitted[2]
+ else:
+ LOG.warning("Unexpected input in find_dragonflybsd_part: %s", fs)
+
+
def get_path_dev_freebsd(path, mnt_list):
path_found = None
for line in mnt_list.split("\n"):
@@ -2264,6 +2305,9 @@ def parse_mount(path):
# https://regex101.com/r/T2en7a/1
regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) '
r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
+ if is_DragonFlyBSD():
+ regex = (r'^(/dev/[\S]+|\S*?) on (/[\S]*) '
+ r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
for line in mount_locs:
m = re.search(regex, line)
if not m:
diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl
index 8656daa7..cb2a625b 100644
--- a/config/cloud.cfg.tmpl
+++ b/config/cloud.cfg.tmpl
@@ -1,8 +1,8 @@
## template:jinja
# The top level settings are used as module
# and system configuration.
-
-{% if variant.endswith("bsd") %}
+{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %}
+{% if is_bsd %}
syslog_fix_perms: root:wheel
{% elif variant in ["suse"] %}
syslog_fix_perms: root:root
@@ -11,17 +11,28 @@ syslog_fix_perms: root:root
# when a 'default' entry is found it will reference the 'default_user'
# from the distro configuration specified below
users:
+{% if variant in ["photon"] %}
+ - name: root
+ lock_passwd: false
+{% else %}
- default
+{% endif %}
+
+# VMware guest customization.
+{% if variant in ["photon"] %}
+disable_vmware_customization: true
+{% endif %}
# If this is set, 'root' will not be able to ssh in and they
# will get a message to login instead as the default $user
-{% if variant in ["freebsd"] %}
+{% if variant in ["freebsd", "photon"] %}
disable_root: false
{% else %}
disable_root: true
{% endif %}
-{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora",
+ "rhel", "rocky"] %}
mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2']
{% if variant == "amazon" %}
resize_rootfs: noblock
@@ -33,6 +44,8 @@ ssh_pwauth: 0
# This will cause the set+update hostname module to not operate (if true)
preserve_hostname: false
+# If you use datasource_list array, keep array items in a single line.
+# If you use multi line array, ds-identify script won't read array items.
{% if variant.endswith("bsd") %}
# This should not be required, but leave it in place until the real cause of
# not finding -any- datasources is resolved.
@@ -60,22 +73,24 @@ cloud_init_modules:
{% endif %}
- bootcmd
- write-files
-{% if variant not in ["netbsd"] %}
+{% if variant not in ["netbsd", "openbsd"] %}
- growpart
- resizefs
{% endif %}
-{% if variant not in ["freebsd", "netbsd"] %}
+{% if not is_bsd %}
- disk_setup
- mounts
{% endif %}
- set_hostname
- update_hostname
- update_etc_hosts
-{% if variant in ["alpine"] %}
+{% if variant in ["alpine", "photon"] %}
- resolv_conf
{% endif %}
{% if not variant.endswith("bsd") %}
+{% if variant not in ["photon"] %}
- ca-certs
+{% endif %}
- rsyslog
{% endif %}
- users-groups
@@ -89,11 +104,15 @@ cloud_config_modules:
- emit_upstart
- snap
{% endif %}
+{% if variant not in ["photon"] %}
- ssh-import-id
- locale
+{% endif %}
- set-passwords
-{% if variant in ["rhel", "fedora"] %}
+{% if variant in ["rhel", "fedora", "photon"] %}
+{% if variant not in ["photon"] %}
- spacewalk
+{% endif %}
- yum-add-repo
{% endif %}
{% if variant in ["ubuntu", "unknown", "debian"] %}
@@ -154,9 +173,11 @@ cloud_final_modules:
system_info:
# This will affect which distro class gets used
{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "rhel",
- "suse", "ubuntu"] %}
+ "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel",
+ "rocky", "suse", "ubuntu"] %}
distro: {{ variant }}
+{% elif variant in ["dragonfly"] %}
+ distro: dragonflybsd
{% else %}
# Unknown/fallback distro.
distro: ubuntu
@@ -207,7 +228,7 @@ system_info:
security: http://ports.ubuntu.com/ubuntu-ports
ssh_svcname: ssh
{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora",
- "rhel", "suse"] %}
+ "rhel", "rocky", "suse"] %}
# Default user name + that default users groups (if added/used)
default_user:
{% if variant == "amazon" %}
@@ -248,6 +269,15 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/tcsh
+{% elif variant in ["dragonfly"] %}
+ # Default user name + that default users groups (if added/used)
+ default_user:
+ name: dragonfly
+ lock_passwd: True
+ gecos: DragonFly
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/sh
{% elif variant in ["netbsd"] %}
default_user:
name: netbsd
@@ -264,8 +294,27 @@ system_info:
groups: [wheel]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/ksh
+{% elif variant == "photon" %}
+ default_user:
+ name: photon
+ lock_passwd: True
+ gecos: PhotonOS
+ groups: [wheel]
+ sudo: ["ALL=(ALL) NOPASSWD:ALL"]
+ shell: /bin/bash
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+
+ ssh_svcname: sshd
+
+#manage_etc_hosts: true
{% endif %}
{% if variant in ["freebsd", "netbsd", "openbsd"] %}
network:
renderers: ['{{ variant }}']
+{% elif variant in ["dragonfly"] %}
+ network:
+ renderers: ['freebsd']
{% endif %}
diff --git a/debian/changelog b/debian/changelog
index e513d19c..a7ef2618 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,59 @@
+cloud-init (21.2-43-g184c836a-0ubuntu1) impish; urgency=medium
+
+ * New upstream snapshot.
+ - Initial hotplug support (#936)
+ - Fix MIME policy failure on python version upgrade (#934)
+ - run-container: fixup the centos repos baseurls when using http_proxy
+ (#944) [Paride Legovini]
+ - tools: add support for building rpms on rocky linux (#940)
+ - ssh-util: allow cloudinit to merge all ssh keys into a custom user
+ file, defined in AuthorizedKeysFile (#937) [Emanuele Giuseppe Esposito]
+ (LP: #1911680)
+ - VMware: new "allow_raw_data" switch (#939) [xiaofengw-vmware]
+ - bump pycloudlib version (#935)
+ - add renanrodrigo as a contributor (#938) [Renan Rodrigo]
+ - testing: simplify test_upgrade.py (#932)
+ - freebsd/net_v1 format: read MTU from root (#930) [Gonéri Le Bouder]
+ - Add new network activators to bring up interfaces (#919)
+ - Detect a Python version change and clear the cache (#857)
+ [Robert Schweikert]
+ - cloud_tests: fix the Impish release name (#931) [Paride Legovini]
+ - Removed distro specific network code from Photon (#929) [sshedi]
+ - Add support for VMware PhotonOS (#909) [sshedi]
+ - cloud_tests: add impish release definition (#927) [Paride Legovini]
+ - docs: fix stale links rename master branch to main (#926)
+ - Fix DNS in NetworkState (SC-133) (#923)
+ - tests: Add 'adhoc' mark for integration tests (#925)
+ - Fix the spelling of "DigitalOcean" (#924) [Mark Mercado]
+ - Small Doc Update for ReportEventStack and Test (#920) [Mike Russell]
+ - Replace deprecated collections.Iterable with abc replacement (#922)
+ (LP: #1932048)
+ - testing: OCI availability domain is now required (SC-59) (#910)
+ - add DragonFlyBSD support (#904) [Gonéri Le Bouder]
+ - Use instance-data-sensitive.json in jinja templates (SC-117) (#917)
+ (LP: #1931392)
+ - doc: Update NoCloud docs stating required files (#918) (LP: #1931577)
+ - build-on-netbsd: don't pin a specific py3 version (#913)
+ [Gonéri Le Bouder]
+ - - Create the log file with 640 permissions (#858) [Robert Schweikert]
+ - Allow braces to appear in dhclient output (#911) [eb3095]
+ - Docs: Replace all freenode references with libera (#912)
+ - openbsd/net: flush the route table on net restart (#908)
+ [Gonéri Le Bouder]
+ - Add Rocky Linux support to cloud-init (#906) [Louis Abel]
+ - Add "esposem" as contributor (#907) [Emanuele Giuseppe Esposito]
+ - Add integration test for #868 (#901)
+ - Added support for importing keys via primary/security mirror clauses
+ (#882) [Paul Goins] (LP: #1925395)
+ - [examples] config-user-groups expire in the future (#902)
+ [Geert Stappers]
+ - BSD: static network, set the mtu (#894) [Gonéri Le Bouder]
+ - Add integration test for lp-1920939 (#891)
+ - Fix unit tests breaking from new httpretty version (#903)
+ - Allow user control over update events (#834)
+
+ -- James Falcon <james.falcon@canonical.com> Mon, 19 Jul 2021 14:23:12 -0500
+
cloud-init (21.2-3-g899bfaa9-0ubuntu2) impish; urgency=medium
* d/po/templates.pot: refresh with debconf-updatepo
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index 004894b7..f4392326 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -138,6 +138,12 @@ apt:
# the first defining a valid mirror wins (in the order as defined here,
# not the order as listed in the config).
#
+ # Additionally, if the repository requires a custom signing key, it can be
+ # specified via the same fields as for custom sources:
+ # 'keyid': providing a key to import via shortid or fingerprint
+ # 'key': providing a raw PGP key
+ # 'keyserver': specify an alternate keyserver to pull keys from that
+ # were specified by keyid
- arches: [s390x, arm64]
# as above, allowing to have one config for different per arch mirrors
# security is optional, if not defined it is set to the same value as primary
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 4a5a7e20..1faecf75 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -19,7 +19,7 @@ users:
primary_group: foobar
groups: users
selinux_user: staff_u
- expiredate: '2012-09-01'
+ expiredate: '2032-09-01'
ssh_import_id: foobar
lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 10e8228f..67d6a9e3 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -27,7 +27,7 @@ Getting help
Having trouble? We would like to help!
- Try the :ref:`FAQ` – its got answers to some common questions
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
@@ -49,6 +49,7 @@ Having trouble? We would like to help!
topics/format.rst
topics/examples.rst
+ topics/events.rst
topics/modules.rst
topics/merging.rst
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index f3e13edc..a45a49d6 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,12 +14,13 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
-and OpenBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD,
+OpenBSD and DragonFlyBSD:
- Alpine Linux
- ArchLinux
- Debian
+- DragonFlyBSD
- Fedora
- FreeBSD
- Gentoo Linux
@@ -42,7 +43,7 @@ environments in the public cloud:
- Softlayer
- Rackspace Public Cloud
- IBM Cloud
-- Digital Ocean
+- DigitalOcean
- Bigstep
- Hetzner
- Joyent
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index 0ff230b5..b6115ed6 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -119,6 +119,10 @@ Current subcommands:
schema errors locally without the need for deployment. Schema
validation is work in progress and supports a subset of cloud-config
modules.
+ * ``hotplug-hook``: respond to newly added system devices by retrieving
+ updated system metadata and bringing up/down the corresponding device.
+ This command is intended to be called via a systemd service and is
+ not considered user-accessible except for debugging purposes.
.. _cli_features:
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
index 68c10405..33aad789 100644
--- a/doc/rtd/topics/code_review.rst
+++ b/doc/rtd/topics/code_review.rst
@@ -22,7 +22,7 @@ questions about the code review process, or at any point during the
code review process, these are the available avenues:
* if you have an open Pull Request, comment on that pull request
-* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+* join the ``#cloud-init`` channel on the Libera IRC network and ask
away
* send an email to the cloud-init mailing list,
cloud-init@lists.launchpad.net
@@ -58,12 +58,12 @@ Reviewer
Committer
A cloud-init core developer (i.e. a person who has permission to
- merge PRs into master).
+ merge PRs into **main**).
Prerequisites For Landing Pull Requests
=======================================
-Before a PR can be landed into master, the following conditions *must*
+Before a PR can be landed into **main**, the following conditions *must*
be met:
* the CLA has been signed by the **Proposer** (or is covered by an
@@ -148,7 +148,7 @@ temporarily closed. (The first two are covered in this section; see
(In the below, when the verbs "merge" or "squash merge" are used, they
should be understood to mean "squash merged using the GitHub UI", which
-is the only way that changes can land in cloud-init's master branch.)
+is the only way that changes can land in cloud-init's **main** branch.)
These are the steps that comprise the review phase:
diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst
index 88f1e5f5..a4910408 100644
--- a/doc/rtd/topics/datasources/digitalocean.rst
+++ b/doc/rtd/topics/datasources/digitalocean.rst
@@ -1,7 +1,7 @@
.. _datasource_digital_ocean:
-Digital Ocean
-=============
+DigitalOcean
+============
The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's
`metadata service`_. This metadata service serves information about the
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index edb41e2a..dbe21834 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -50,6 +50,8 @@ These user-data and meta-data files are expected to be in the following format.
/user-data
/meta-data
+Both files are required to be present for it to be considered a valid seed ISO.
+
Basically, user-data is simply user-data and meta-data is a yaml formatted file
representing what you'd find in the EC2 metadata service.
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index 85b0c377..bd5df860 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -18,6 +18,10 @@ configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
The settings that may be configured are:
+ * disable_vmware_customization: disable or enable the vmware customization
+ based on vmware customization files. (default: True)
+ * allow_raw_data: enable or disable the vmware customization based on raw
+ cloud-init data including metadata and userdata. (default: True)
* vmware_cust_file_max_wait: the maximum amount of clock time in seconds that
should be spent waiting for vmware customization files. (default: 15)
@@ -35,7 +39,7 @@ The following VMTools configuration options affect cloud-init's behavior on a bo
change this default behavior (for example: enabled by default) via
customization specification settings.
-VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/master/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
+VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
For more information, see [VMware vSphere Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html) and specific VMTools parameters consumed.
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index fb3006fe..b897a318 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -258,9 +258,9 @@ from **-proposed**
* Create a `new cloud-init bug`_ reporting the version of cloud-init
affected
- * Ping upstream cloud-init on Freenode's `#cloud-init IRC channel`_
+ * Ping upstream cloud-init on Libera's `#cloud-init IRC channel`_
.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
-.. _#cloud-init IRC channel: https://webchat.freenode.net/?channel=#cloud-init
+.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst
new file mode 100644
index 00000000..984e7577
--- /dev/null
+++ b/doc/rtd/topics/events.rst
@@ -0,0 +1,89 @@
+.. _events:
+
+******************
+Events and Updates
+******************
+
+Events
+======
+
+`Cloud-init`_ will fetch and apply cloud and user data configuration
+upon several event types. The two most common events for cloud-init
+are when an instance first boots and any subsequent boot thereafter (reboot).
+In addition to boot events, cloud-init users and vendors are interested
+in when devices are added. cloud-init currently supports the following
+event types:
+
+- **BOOT_NEW_INSTANCE**: New instance first boot
+- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE'
+- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each
+ boot: once during Local stage, then again in Network stage. As this behavior
+ was previously the default behavior, this option exists to prevent regressing
+ such behavior.
+- **HOTPLUG**: Dynamic add of a system device
+
+Future work will likely include infrastructure and support for the following
+events:
+
+- **METADATA_CHANGE**: An instance's metadata has change
+- **USER_REQUEST**: Directed request to update
+
+Datasource Event Support
+========================
+
+All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event.
+Each Datasource will declare a set of these events that it is capable of
+handling. Datasources may not support all event types. In some cases a system
+may be configured to allow a particular event but may be running on
+a platform whose datasource cannot support the event.
+
+Configuring Event Updates
+=========================
+
+Update configuration may be specified via user data,
+which can be used to enable or disable handling of specific events.
+This configuration will be honored as long as the events are supported by
+the datasource. However, configuration will always be applied at first
+boot, regardless of the user data specified.
+
+Updates
+~~~~~~~
+Update policy configuration defines which
+events are allowed to be handled. This is separate from whether a
+particular platform or datasource has the capability for such events.
+
+**scope**: *<name of the scope for event policy>*
+
+The ``scope`` value is a string which defines under which domain does the
+event occur. Currently the only one known scope is ``network``, though more
+scopes may be added in the future. Scopes are defined by convention but
+arbitrary values can be used.
+
+**when**: *<list of events to handle for a particular scope>*
+
+Each ``scope`` requires a ``when`` element to specify which events
+are to allowed to be handled.
+
+Hotplug
+=======
+When the hotplug event is supported by the data source and configured in
+user data, cloud-init will respond to the addition or removal of network
+interfaces to the system. In addition to fetching and updating the system
+metadata, cloud-init will also bring up/down the newly added interface.
+
+Examples
+========
+
+apply network config every boot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+On every boot, apply network configuration found in the datasource.
+
+.. code-block:: shell-session
+
+ # apply network config on every boot
+ updates:
+ network:
+ when: ['boot', 'hotplug']
+
+.. _Cloud-init: https://launchpad.net/cloud-init
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 27fabf15..efc532de 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -10,7 +10,7 @@ Having trouble? We would like to help!
- First go through this page with answers to common questions
- Use the search bar at the upper left to search these docs
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? Check out the :ref:`reporting_bugs` topic for
how to report one
@@ -139,7 +139,7 @@ cloud-config is:
To verify your YAML, we do have a short script called `validate-yaml.py`_
that can validate your user data offline.
-.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py
+.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
Another option is to run the following on an instance to debug userdata
provided to the system:
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index fa8aa925..05580804 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -55,7 +55,7 @@ to store the multipart message in ``user-data``:
$ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
-.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
+.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
User-Data Script
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index 1850982c..6c17139f 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -509,14 +509,19 @@ EC2 instance:
Using instance-data
===================
-As of cloud-init v. 18.4, any variables present in
-``/run/cloud-init/instance-data.json`` can be used in:
+As of cloud-init v. 18.4, any instance-data can be used in:
* User-data scripts
* Cloud config data
* Command line interface via **cloud-init query** or
**cloud-init devel render**
+This means that any variable present in
+``/run/cloud-init/instance-data-sensitive.json`` can be used,
+unless a non-root user is using the command line interface.
+In the non-root user case,
+``/run/cloud-init/instance-data.json`` will be used instead.
+
Many clouds allow users to provide user-data to an instance at
the time the instance is launched. Cloud-init supports a number of
:ref:`user_data_formats`.
@@ -559,9 +564,39 @@ Below are some examples of providing these types of user-data:
{%- endif %}
...
+One way to easily explore what Jinja variables are available on your machine
+is to use the ``cloud-init query --format`` (-f) commandline option which will
+render any Jinja syntax you use. Warnings or exceptions will be raised on
+invalid instance-data keys, paths or invalid syntax.
+
+.. code-block:: shell-session
+
+ # List all instance-data keys and values as root user
+ % sudo cloud-init query --all
+ {...}
+
+ # Introspect nested keys on an object
+ % cloud-init query -f "{{ds.keys()}}"
+ dict_keys(['meta_data', '_doc'])
+
+ # Test your Jinja rendering syntax on the command-line directly
+
+ # Failure to reference valid top-level instance-data key
+ % cloud-init query -f "{{invalid.instance-data.key}}"
+ WARNING: Ignoring jinja template for query commandline: 'invalid' is undefined
+
+ # Failure to reference valid dot-delimited key path on a known top-level key
+ % cloud-init query -f "{{v1.not_here}}"
+ WARNING: Could not render jinja template variables in file 'query commandline': 'not_here'
+ CI_MISSING_JINJA_VAR/not_here
+
+ # Test expected value using valid instance-data key path
+ % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
+ My AMI: ami-0fecc35d3c8ba8d60
+
.. note::
Trying to reference jinja variables that don't exist in
- instance-data.json will result in warnings in ``/var/log/cloud-init.log``
+ instance-data will result in warnings in ``/var/log/cloud-init.log``
and the following string in your rendered user-data:
``CI_MISSING_JINJA_VAR/<your_varname>``.
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 17732c2a..3202163b 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -335,6 +335,10 @@ the following keys:
- ``address``: List of IPv4 or IPv6 address of nameservers.
- ``search``: List of of hostnames to include in the resolv.conf search path.
+- ``interface``: Optional. Ties the nameserver definition to the specified
+ interface. The value specified here must match the `name` of an interface
+ defined in this config. If unspecified, this nameserver will be considered
+ a global nameserver.
**Nameserver Example**::
@@ -349,6 +353,7 @@ the following keys:
address: 192.168.23.14/27
gateway: 192.168.23.1
- type: nameserver
+ interface: interface0 # Ties nameserver to interface0 only
address:
- 192.168.23.2
- 8.8.8.8
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst
index 5b702bd2..d882e036 100644
--- a/doc/rtd/topics/testing.rst
+++ b/doc/rtd/topics/testing.rst
@@ -166,8 +166,8 @@ Test Argument Ordering
.. _pytest: https://docs.pytest.org/
.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
-.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/master/cloudinit/tests/test_subp.py#L9
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/fbcb224bc12495ba200ab107246349d802c5d8e6/cloudinit/tests/test_subp.py#L20
.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
-.. _pytest.param: https://docs.pytest.org/en/latest/reference.html#pytest-param
+.. _pytest.param: https://docs.pytest.org/en/6.2.x/reference.html#pytest-param
.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
diff --git a/integration-requirements.txt b/integration-requirements.txt
index 95891356..20940328 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@96b146ee1beb99b8e44e36525e18a9a20e00c3f2
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@245ca0b97e71926fdb651147e42d6256b17f6778
pytest
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 16138012..b930709b 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -119,6 +119,12 @@ version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f)
( cd "$RPM_BUILD_ROOT" &&
sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys )
+# patch hotplug /usr/libexec script path
+hotplug_file=$(cd "$RPM_BUILD_ROOT" && find . -name 10-cloud-init-hook-hotplug.rules -type f)
+
+( cd "$RPM_BUILD_ROOT" &&
+ sed -i "s,/usr/lib,%{_libexecdir}," $hotplug_file )
+
%clean
rm -rf $RPM_BUILD_ROOT
@@ -172,6 +178,7 @@ fi
%files
/lib/udev/rules.d/66-azure-ephemeral.rules
+/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules
%if "%{init_system}" == "systemd"
/usr/lib/systemd/system-generators/cloud-init-generator
diff --git a/setup.py b/setup.py
index cbacf48e..7fa03e63 100755
--- a/setup.py
+++ b/setup.py
@@ -128,6 +128,7 @@ INITSYS_FILES = {
'systemd': [render_tmpl(f)
for f in (glob('systemd/*.tmpl') +
glob('systemd/*.service') +
+ glob('systemd/*.socket') +
glob('systemd/*.target'))
if (is_f(f) and not is_generator(f))],
'systemd.generators': [
@@ -156,7 +157,7 @@ USR = "usr"
ETC = "etc"
USR_LIB_EXEC = "usr/lib"
LIB = "lib"
-if os.uname()[0] == 'FreeBSD':
+if os.uname()[0] in ['FreeBSD', 'DragonFly']:
USR = "usr/local"
USR_LIB_EXEC = "usr/local/lib"
elif os.path.isfile('/etc/redhat-release'):
@@ -249,6 +250,7 @@ data_files = [
(ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
(ETC + '/cloud/templates', glob('templates/*')),
(USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify',
+ 'tools/hook-hotplug',
'tools/uncloud-init',
'tools/write-ssh-key-fingerprints']),
(USR + '/share/bash-completion/completions',
diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl
index 9b103ef9..0713db16 100755..100644
--- a/systemd/cloud-init-generator.tmpl
+++ b/systemd/cloud-init-generator.tmpl
@@ -83,7 +83,7 @@ default() {
check_for_datasource() {
local ds_rc=""
-{% if variant in ["almalinux", "rhel", "fedora", "centos"] %}
+{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky"] %}
local dsidentify="/usr/libexec/cloud-init/ds-identify"
{% else %}
local dsidentify="/usr/lib/cloud-init/ds-identify"
diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service
new file mode 100644
index 00000000..b64632ef
--- /dev/null
+++ b/systemd/cloud-init-hotplugd.service
@@ -0,0 +1,22 @@
+# Paired with cloud-init-hotplugd.socket to read from the FIFO
+# /run/cloud-init/hook-hotplug-cmd which is created during a udev network
+# add or remove event as processed by 10-cloud-init-hook-hotplug.rules.
+
+# On start, read args from the FIFO, process and provide structured arguments
+# to `cloud-init devel hotplug-hook` which will setup or teardown network
+# devices as configured by user-data.
+
+# Known bug with an enforcing SELinux policy: LP: #1936229
+# cloud-init-hotplud.service will read args from file descriptor 3
+
+[Unit]
+Description=cloud-init hotplug hook daemon
+After=cloud-init-hotplugd.socket
+
+[Service]
+Type=simple
+ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \
+ exec /usr/bin/cloud-init devel hotplug-hook $args; \
+ exit 0'
+SyslogIdentifier=cloud-init-hotplugd
+TimeoutStopSec=5
diff --git a/systemd/cloud-init-hotplugd.socket b/systemd/cloud-init-hotplugd.socket
new file mode 100644
index 00000000..aa093016
--- /dev/null
+++ b/systemd/cloud-init-hotplugd.socket
@@ -0,0 +1,13 @@
+# cloud-init-hotplugd.socket listens on the FIFO file
+# /run/cloud-init/hook-hotplug-cmd which is created during a udev network
+# add or remove event as processed by 10-cloud-init-hook-hotplug.rules.
+
+# Known bug with an enforcing SELinux policy: LP: #1936229
+[Unit]
+Description=cloud-init hotplug hook socket
+
+[Socket]
+ListenFIFO=/run/cloud-init/hook-hotplug-cmd
+
+[Install]
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl
index a5c51277..c773e411 100644
--- a/systemd/cloud-init.service.tmpl
+++ b/systemd/cloud-init.service.tmpl
@@ -1,7 +1,9 @@
## template:jinja
[Unit]
Description=Initial cloud-init job (metadata service crawler)
+{% if variant not in ["photon"] %}
DefaultDependencies=no
+{% endif %}
Wants=cloud-init-local.service
Wants=sshd-keygen.service
Wants=sshd.service
@@ -10,7 +12,7 @@ After=systemd-networkd-wait-online.service
{% if variant in ["ubuntu", "unknown", "debian"] %}
After=networking.service
{% endif %}
-{% if variant in ["almalinux", "centos", "fedora", "rhel"] %}
+{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky"] %}
After=network.service
After=NetworkManager.service
{% endif %}
diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit
index aa5bd118..d26f3d0f 100755
--- a/sysvinit/freebsd/cloudinit
+++ b/sysvinit/freebsd/cloudinit
@@ -2,7 +2,7 @@
# PROVIDE: cloudinit
# REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd
-# BEFORE: cloudconfig cloudfinal
+# BEFORE: LOGIN cloudconfig cloudfinal
. /etc/rc.subr
diff --git a/templates/chrony.conf.photon.tmpl b/templates/chrony.conf.photon.tmpl
new file mode 100644
index 00000000..8551f793
--- /dev/null
+++ b/templates/chrony.conf.photon.tmpl
@@ -0,0 +1,48 @@
+## template:jinja
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+# Record the rate at which the system clock gains/losses time.
+driftfile /var/lib/chrony/drift
+
+# Allow the system clock to be stepped in the first three updates
+# if its offset is larger than 1 second.
+makestep 1.0 3
+
+# Enable kernel synchronization of the real-time clock (RTC).
+rtcsync
+
+# Enable hardware timestamping on all interfaces that support it.
+#hwtimestamp *
+
+# Increase the minimum number of selectable sources required to adjust
+# the system clock.
+#minsources 2
+
+# Allow NTP client access from local network.
+#allow 192.168.0.0/16
+
+# Serve time even if not synchronized to a time source.
+#local stratum 10
+
+# Specify file containing keys for NTP authentication.
+#keyfile /etc/chrony.keys
+
+# Get TAI-UTC offset and leap seconds from the system tz database.
+leapsectz right/UTC
+
+# Specify directory for log files.
+logdir /var/log/chrony
+
+# Select which information is logged.
+#log measurements statistics tracking
diff --git a/templates/hosts.photon.tmpl b/templates/hosts.photon.tmpl
new file mode 100644
index 00000000..0fd6f722
--- /dev/null
+++ b/templates/hosts.photon.tmpl
@@ -0,0 +1,22 @@
+## template:jinja
+{#
+This file /etc/cloud/templates/hosts.photon.tmpl is only utilized
+if enabled in cloud-config. Specifically, in order to enable it
+you need to add the following to config:
+ manage_etc_hosts: True
+-#}
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.photon.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
+
+# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost6.localdomain6 localhost6
diff --git a/templates/ntp.conf.photon.tmpl b/templates/ntp.conf.photon.tmpl
new file mode 100644
index 00000000..4d4910d1
--- /dev/null
+++ b/templates/ntp.conf.photon.tmpl
@@ -0,0 +1,61 @@
+## template:jinja
+
+# For more information about this file, see the man pages
+# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
+
+driftfile /var/lib/ntp/drift
+
+# Permit time synchronization with our time source, but do not
+# permit the source to query or modify the service on this system.
+restrict default kod nomodify notrap nopeer noquery
+restrict -6 default kod nomodify notrap nopeer noquery
+
+# Permit all access over the loopback interface. This could
+# be tightened as well, but to do so would effect some of
+# the administrative functions.
+restrict 127.0.0.1
+restrict -6 ::1
+
+# Hosts on local network are less restricted.
+#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
+
+# Use public servers from the pool.ntp.org project.
+# Please consider joining the pool (http://www.pool.ntp.org/join.html).
+{% if pools %}# pools
+{% endif %}
+{% for pool in pools -%}
+pool {{pool}} iburst
+{% endfor %}
+{%- if servers %}# servers
+{% endif %}
+{% for server in servers -%}
+server {{server}} iburst
+{% endfor %}
+
+#broadcast 192.168.1.255 autokey # broadcast server
+#broadcastclient # broadcast client
+#broadcast 224.0.1.1 autokey # multicast server
+#multicastclient 224.0.1.1 # multicast client
+#manycastserver 239.255.254.254 # manycast server
+#manycastclient 239.255.254.254 autokey # manycast client
+
+# Enable public key cryptography.
+#crypto
+
+includefile /etc/ntp/crypto/pw
+
+# Key file containing the keys and key identifiers used when operating
+# with symmetric key cryptography.
+keys /etc/ntp/keys
+
+# Specify the key identifiers which are trusted.
+#trustedkey 4 8 42
+
+# Specify the key identifier to use with the ntpdc utility.
+#requestkey 8
+
+# Specify the key identifier to use with the ntpq utility.
+#controlkey 8
+
+# Enable writing of statistics records.
+#statistics clockstats cryptostats loopstats peerstats
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
index f870be67..72a37bf7 100644
--- a/templates/resolv.conf.tmpl
+++ b/templates/resolv.conf.tmpl
@@ -22,7 +22,7 @@ domain {{domain}}
sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
{% endif %}
{#
- Flags and options are required to be on the
+ Flags and options are required to be on the
same line preceded by "options" keyword
#}
{% if options or flags %}
diff --git a/templates/systemd.resolved.conf.tmpl b/templates/systemd.resolved.conf.tmpl
new file mode 100644
index 00000000..fca50d37
--- /dev/null
+++ b/templates/systemd.resolved.conf.tmpl
@@ -0,0 +1,15 @@
+## template:jinja
+# Your system has been configured with 'manage-resolv-conf' set to true.
+# As a result, cloud-init has written this file with configuration data
+# that it has been provided. Cloud-init, by default, will write this file
+# a single time (PER_ONCE).
+#
+[Resolve]
+LLMNR=false
+{% if nameservers is defined %}
+DNS={% for server in nameservers %}{{server}} {% endfor %}
+{% endif %}
+
+{% if searchdomains is defined %}
+Domains={% for search in searchdomains %}{{search}} {% endfor %}
+{% endif %}
diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml
index 6249efc5..c52b78f9 100644
--- a/tests/cloud_tests/releases.yaml
+++ b/tests/cloud_tests/releases.yaml
@@ -133,6 +133,23 @@ features:
releases:
# UBUNTU =================================================================
+ impish:
+ # EOL: July 2022
+ default:
+ enabled: true
+ release: impish
+ version: "21.10"
+ os: ubuntu
+ feature_groups:
+ - base
+ - debian_base
+ - ubuntu_specific
+ lxd:
+ sstreams_server: https://cloud-images.ubuntu.com/daily
+ alias: impish
+ setup_overrides: null
+ override_templates: false
+
hirsute:
# EOL: Jan 2022
default:
diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py
index 7dcccbdd..49baadb0 100644
--- a/tests/cloud_tests/util.py
+++ b/tests/cloud_tests/util.py
@@ -23,7 +23,7 @@ from tests.cloud_tests import LOG
OS_FAMILY_MAPPING = {
'debian': ['debian', 'ubuntu'],
- 'redhat': ['centos', 'rhel', 'fedora'],
+ 'redhat': ['centos', 'photon', 'rhel', 'fedora'],
'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
'suse': ['sles'],
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1
new file mode 100644
index 00000000..bd4c822e
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye
+t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV
+3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+
+yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY
+lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN
+HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw
+Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO
+geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2
+EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL
+NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/
+rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n
+vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2
+euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS
+khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg
+RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN
+oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT
+Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT
+tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi
+vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq
+KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA
+w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+
+qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6
++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA
+AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp
+60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E
+uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC
+77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ
+aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk
+cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb
+Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb
+GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB
+/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g
+0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I
+bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4
+CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub
new file mode 100644
index 00000000..3d2e26e1
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2
new file mode 100644
index 00000000..5854d901
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO
+LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+
+3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO
+i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH
+m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2
+17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5
+qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS
+yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2
+EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ
+A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq
+0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4
+qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP
+CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T
+wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH
+BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5
+ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+
+aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV
+RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd
+eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34
+qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql
+rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB
+w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy
+dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA
+wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj
+c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr
+IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy
+Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv
+vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u
+U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv
+/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9
+mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV
+zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd
+E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS
+0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD
+tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub
new file mode 100644
index 00000000..f3831a57
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3
new file mode 100644
index 00000000..2596c762
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3
@@ -0,0 +1,38 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV
+yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG
+bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH
+9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ
+ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV
+O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr
+jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm
+Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2
+EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL
+6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy
+fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e
+fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ
+iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f
+2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA
+QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ
+HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n
+Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK
+WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV
+JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ
+vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR
+2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8
+1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d
+DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA
+wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P
+yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy
+QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2
+0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k
+mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi
+uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9
+3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr
+VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM
+2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM
+GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa
+e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x
+eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub
new file mode 100644
index 00000000..057db632
--- /dev/null
+++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host
diff --git a/tests/integration_tests/assets/test_version_change.pkl b/tests/integration_tests/assets/test_version_change.pkl
new file mode 100644
index 00000000..65ae93e5
--- /dev/null
+++ b/tests/integration_tests/assets/test_version_change.pkl
Binary files differ
diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl
new file mode 100644
index 00000000..a4089ecf
--- /dev/null
+++ b/tests/integration_tests/assets/trusty_with_mime.pkl
@@ -0,0 +1,572 @@
+ccopy_reg
+_reconstructor
+p1
+(ccloudinit.sources.DataSourceNoCloud
+DataSourceNoCloudNet
+p2
+c__builtin__
+object
+p3
+NtRp4
+(dp5
+S'paths'
+p6
+g1
+(ccloudinit.helpers
+Paths
+p7
+g3
+NtRp8
+(dp9
+S'lookups'
+p10
+(dp11
+S'cloud_config'
+p12
+S'cloud-config.txt'
+p13
+sS'userdata'
+p14
+S'user-data.txt.i'
+p15
+sS'vendordata'
+p16
+S'vendor-data.txt.i'
+p17
+sS'userdata_raw'
+p18
+S'user-data.txt'
+p19
+sS'boothooks'
+p20
+g20
+sS'scripts'
+p21
+g21
+sS'sem'
+p22
+g22
+sS'data'
+p23
+g23
+sS'vendor_scripts'
+p24
+S'scripts/vendor'
+p25
+sS'handlers'
+p26
+g26
+sS'obj_pkl'
+p27
+S'obj.pkl'
+p28
+sS'vendordata_raw'
+p29
+S'vendor-data.txt'
+p30
+sS'vendor_cloud_config'
+p31
+S'vendor-cloud-config.txt'
+p32
+ssS'template_tpl'
+p33
+S'/etc/cloud/templates/%s.tmpl'
+p34
+sS'cfgs'
+p35
+(dp36
+S'cloud_dir'
+p37
+S'/var/lib/cloud/'
+p38
+sS'templates_dir'
+p39
+S'/etc/cloud/templates/'
+p40
+sS'upstart_dir'
+p41
+S'/etc/init/'
+p42
+ssS'cloud_dir'
+p43
+g38
+sS'datasource'
+p44
+NsS'upstart_conf_d'
+p45
+g42
+sS'boot_finished'
+p46
+S'/var/lib/cloud/instance/boot-finished'
+p47
+sS'instance_link'
+p48
+S'/var/lib/cloud/instance'
+p49
+sS'seed_dir'
+p50
+S'/var/lib/cloud/seed'
+p51
+sbsS'supported_seed_starts'
+p52
+(S'http://'
+p53
+S'https://'
+p54
+S'ftp://'
+p55
+tp56
+sS'sys_cfg'
+p57
+(dp58
+S'output'
+p59
+(dp60
+S'all'
+p61
+S'| tee -a /var/log/cloud-init-output.log'
+p62
+ssS'users'
+p63
+(lp64
+S'default'
+p65
+asS'def_log_file'
+p66
+S'/var/log/cloud-init.log'
+p67
+sS'cloud_final_modules'
+p68
+(lp69
+S'rightscale_userdata'
+p70
+aS'scripts-vendor'
+p71
+aS'scripts-per-once'
+p72
+aS'scripts-per-boot'
+p73
+aS'scripts-per-instance'
+p74
+aS'scripts-user'
+p75
+aS'ssh-authkey-fingerprints'
+p76
+aS'keys-to-console'
+p77
+aS'phone-home'
+p78
+aS'final-message'
+p79
+aS'power-state-change'
+p80
+asS'disable_root'
+p81
+I01
+sS'syslog_fix_perms'
+p82
+S'syslog:adm'
+p83
+sS'log_cfgs'
+p84
+(lp85
+(lp86
+S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n'
+p87
+aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n'
+p88
+aa(lp89
+g87
+aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+p90
+aasS'cloud_init_modules'
+p91
+(lp92
+S'migrator'
+p93
+aS'seed_random'
+p94
+aS'bootcmd'
+p95
+aS'write-files'
+p96
+aS'growpart'
+p97
+aS'resizefs'
+p98
+aS'set_hostname'
+p99
+aS'update_hostname'
+p100
+aS'update_etc_hosts'
+p101
+aS'ca-certs'
+p102
+aS'rsyslog'
+p103
+aS'users-groups'
+p104
+aS'ssh'
+p105
+asS'preserve_hostname'
+p106
+I00
+sS'_log'
+p107
+(lp108
+g87
+ag90
+ag88
+asS'datasource_list'
+p109
+(lp110
+S'NoCloud'
+p111
+aS'ConfigDrive'
+p112
+aS'OpenNebula'
+p113
+aS'Azure'
+p114
+aS'AltCloud'
+p115
+aS'OVF'
+p116
+aS'MAAS'
+p117
+aS'GCE'
+p118
+aS'OpenStack'
+p119
+aS'CloudSigma'
+p120
+aS'Ec2'
+p121
+aS'CloudStack'
+p122
+aS'SmartOS'
+p123
+aS'None'
+p124
+asS'vendor_data'
+p125
+(dp126
+S'prefix'
+p127
+(lp128
+sS'enabled'
+p129
+I01
+ssS'cloud_config_modules'
+p130
+(lp131
+S'emit_upstart'
+p132
+aS'disk_setup'
+p133
+aS'mounts'
+p134
+aS'ssh-import-id'
+p135
+aS'locale'
+p136
+aS'set-passwords'
+p137
+aS'grub-dpkg'
+p138
+aS'apt-pipelining'
+p139
+aS'apt-configure'
+p140
+aS'package-update-upgrade-install'
+p141
+aS'landscape'
+p142
+aS'timezone'
+p143
+aS'puppet'
+p144
+aS'chef'
+p145
+aS'salt-minion'
+p146
+aS'mcollective'
+p147
+aS'disable-ec2-metadata'
+p148
+aS'runcmd'
+p149
+aS'byobu'
+p150
+assg14
+(iemail.mime.multipart
+MIMEMultipart
+p151
+(dp152
+S'_headers'
+p153
+(lp154
+(S'Content-Type'
+p155
+S'multipart/mixed; boundary="===============4291038100093149247=="'
+tp156
+a(S'MIME-Version'
+p157
+S'1.0'
+p158
+tp159
+a(S'Number-Attachments'
+p160
+S'1'
+tp161
+asS'_payload'
+p162
+(lp163
+(iemail.mime.base
+MIMEBase
+p164
+(dp165
+g153
+(lp166
+(g157
+g158
+tp167
+a(S'Content-Type'
+p168
+S'text/x-not-multipart'
+tp169
+a(S'Content-Disposition'
+p170
+S'attachment; filename="part-001"'
+tp171
+asg162
+S''
+sS'_charset'
+p172
+NsS'_default_type'
+p173
+S'text/plain'
+p174
+sS'preamble'
+p175
+NsS'defects'
+p176
+(lp177
+sS'_unixfrom'
+p178
+NsS'epilogue'
+p179
+Nsbasg172
+Nsg173
+g174
+sg175
+Nsg176
+(lp180
+sg178
+Nsg179
+Nsbsg16
+S'#cloud-config\n{}\n\n'
+p181
+sg18
+S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n'
+p182
+sg29
+NsS'dsmode'
+p183
+S'net'
+p184
+sS'seed'
+p185
+S'/var/lib/cloud/seed/nocloud-net'
+p186
+sS'cmdline_id'
+p187
+S'ds=nocloud-net'
+p188
+sS'ud_proc'
+p189
+g1
+(ccloudinit.user_data
+UserDataProcessor
+p190
+g3
+NtRp191
+(dp192
+g6
+g8
+sS'ssl_details'
+p193
+(dp194
+sbsg50
+g186
+sS'ds_cfg'
+p195
+(dp196
+sS'distro'
+p197
+g1
+(ccloudinit.distros.ubuntu
+Distro
+p198
+g3
+NtRp199
+(dp200
+S'osfamily'
+p201
+S'debian'
+p202
+sS'_paths'
+p203
+g8
+sS'name'
+p204
+S'ubuntu'
+p205
+sS'_runner'
+p206
+g1
+(ccloudinit.helpers
+Runners
+p207
+g3
+NtRp208
+(dp209
+g6
+g8
+sS'sems'
+p210
+(dp211
+sbsS'_cfg'
+p212
+(dp213
+S'paths'
+p214
+(dp215
+g37
+g38
+sg39
+g40
+sg41
+g42
+ssS'default_user'
+p216
+(dp217
+S'shell'
+p218
+S'/bin/bash'
+p219
+sS'name'
+p220
+S'ubuntu'
+p221
+sS'sudo'
+p222
+(lp223
+S'ALL=(ALL) NOPASSWD:ALL'
+p224
+asS'lock_passwd'
+p225
+I01
+sS'gecos'
+p226
+S'Ubuntu'
+p227
+sS'groups'
+p228
+(lp229
+S'adm'
+p230
+aS'audio'
+p231
+aS'cdrom'
+p232
+aS'dialout'
+p233
+aS'dip'
+p234
+aS'floppy'
+p235
+aS'netdev'
+p236
+aS'plugdev'
+p237
+aS'sudo'
+p238
+aS'video'
+p239
+assS'package_mirrors'
+p240
+(lp241
+(dp242
+S'arches'
+p243
+(lp244
+S'i386'
+p245
+aS'amd64'
+p246
+asS'failsafe'
+p247
+(dp248
+S'security'
+p249
+S'http://security.ubuntu.com/ubuntu'
+p250
+sS'primary'
+p251
+S'http://archive.ubuntu.com/ubuntu'
+p252
+ssS'search'
+p253
+(dp254
+S'security'
+p255
+(lp256
+sS'primary'
+p257
+(lp258
+S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/'
+p259
+aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/'
+p260
+aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/'
+p261
+assa(dp262
+S'arches'
+p263
+(lp264
+S'armhf'
+p265
+aS'armel'
+p266
+aS'default'
+p267
+asS'failsafe'
+p268
+(dp269
+S'security'
+p270
+S'http://ports.ubuntu.com/ubuntu-ports'
+p271
+sS'primary'
+p272
+S'http://ports.ubuntu.com/ubuntu-ports'
+p273
+ssasS'ssh_svcname'
+p274
+S'ssh'
+p275
+ssbsS'metadata'
+p276
+(dp277
+g183
+g184
+sS'local-hostname'
+p278
+S'me'
+p279
+sS'instance-id'
+p280
+S'me'
+p281
+ssb. \ No newline at end of file
diff --git a/tests/integration_tests/bugs/test_gh868.py b/tests/integration_tests/bugs/test_gh868.py
new file mode 100644
index 00000000..838efca6
--- /dev/null
+++ b/tests/integration_tests/bugs/test_gh868.py
@@ -0,0 +1,20 @@
+"""Ensure no Traceback when 'chef_license' is set"""
+import pytest
+from tests.integration_tests.instances import IntegrationInstance
+
+
+USERDATA = """\
+#cloud-config
+chef:
+ install_type: omnibus
+ chef_license: accept
+ server_url: https://chef.yourorg.invalid
+ validation_name: some-validator
+"""
+
+
+@pytest.mark.adhoc # Can't be regularly reaching out to chef install script
+@pytest.mark.user_data(USERDATA)
+def test_chef_license(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Traceback' not in log
diff --git a/tests/integration_tests/bugs/test_lp1920939.py b/tests/integration_tests/bugs/test_lp1920939.py
new file mode 100644
index 00000000..408792a6
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1920939.py
@@ -0,0 +1,140 @@
+"""
+Test that disk setup can run successfully on a mounted partition when
+partprobe is being used.
+
+lp-1920939
+"""
+import json
+import os
+import pytest
+from uuid import uuid4
+from pycloudlib.lxd.instance import LXDInstance
+
+from cloudinit.subp import subp
+from tests.integration_tests.instances import IntegrationInstance
+
+DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4())
+
+
+def setup_and_mount_lxd_disk(instance: LXDInstance):
+ subp('lxc config device add {} test-disk-setup-disk disk source={}'.format(
+ instance.name, DISK_PATH).split())
+
+
+@pytest.yield_fixture
+def create_disk():
+ # 640k should be enough for anybody
+ subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split())
+ yield
+ os.remove(DISK_PATH)
+
+
+USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [50, 50]
+ overwrite: True
+fs_setup:
+ - label: test
+ device: /dev/sdb1
+ filesystem: ext4
+ - label: test2
+ device: /dev/sdb2
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt1"]
+- ["/dev/sdb2", "/mnt2"]
+"""
+
+UPDATED_USERDATA = """\
+#cloud-config
+disk_setup:
+ /dev/sdb:
+ table_type: mbr
+ layout: [100]
+ overwrite: True
+fs_setup:
+ - label: test3
+ device: /dev/sdb1
+ filesystem: ext4
+mounts:
+- ["/dev/sdb1", "/mnt3"]
+"""
+
+
+def _verify_first_disk_setup(client, log):
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 2
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt1'
+ assert sdb['children'][1]['name'] == 'sdb2'
+ assert sdb['children'][1]['mountpoint'] == '/mnt2'
+
+
+@pytest.mark.user_data(USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+# Not bionic or xenial because the LXD agent gets in the way of us
+# changing the userdata
+@pytest.mark.not_bionic
+@pytest.mark.not_xenial
+def test_disk_setup_when_mounted(create_disk, client: IntegrationInstance):
+ """Test lp-1920939.
+
+ We insert an extra disk into our VM, format it to have two partitions,
+ modify our cloud config to mount devices before disk setup, and modify
+ our userdata to setup a single partition on the disk.
+
+ This allows cloud-init to attempt disk setup on a mounted partition.
+ When blockdev is in use, it will fail with
+ "blockdev: ioctl error on BLKRRPART: Device or resource busy" along
+ with a warning and a traceback. When partprobe is in use, everything
+ should work successfully.
+ """
+ log = client.read_from_file('/var/log/cloud-init.log')
+ _verify_first_disk_setup(client, log)
+
+ # Update our userdata and cloud.cfg to mount then perform new disk setup
+ client.write_to_file(
+ '/var/lib/cloud/seed/nocloud-net/user-data',
+ UPDATED_USERDATA
+ )
+ client.execute("sed -i 's/write-files/write-files\\n - mounts/' "
+ "/etc/cloud/cloud.cfg")
+
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ # Assert new setup works as expected
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ lsblk = json.loads(client.execute('lsblk --json'))
+ sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0]
+ assert len(sdb['children']) == 1
+ assert sdb['children'][0]['name'] == 'sdb1'
+ assert sdb['children'][0]['mountpoint'] == '/mnt3'
+
+
+@pytest.mark.user_data(USERDATA)
+@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk)
+@pytest.mark.ubuntu
+@pytest.mark.lxd_vm
+def test_disk_setup_no_partprobe(create_disk, client: IntegrationInstance):
+ """Ensure disk setup still works as expected without partprobe."""
+ # We can't do this part in a bootcmd because the path has already
+ # been found by the time we get to the bootcmd
+ client.execute('rm $(which partprobe)')
+ client.execute('cloud-init clean --logs')
+ client.restart()
+
+ log = client.read_from_file('/var/log/cloud-init.log')
+ _verify_first_disk_setup(client, log)
+
+ assert 'partprobe' not in log
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 3bbccb44..f2362b5d 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -142,12 +142,12 @@ class IntegrationCloud(ABC):
except (ValueError, IndexError):
return image.image_id
- def _perform_launch(self, launch_kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs):
pycloudlib_instance = self.cloud_instance.launch(**launch_kwargs)
return pycloudlib_instance
def launch(self, user_data=None, launch_kwargs=None,
- settings=integration_settings):
+ settings=integration_settings, **kwargs):
if launch_kwargs is None:
launch_kwargs = {}
if self.settings.EXISTING_INSTANCE_ID:
@@ -157,22 +157,22 @@ class IntegrationCloud(ABC):
self.instance = self.cloud_instance.get_instance(
self.settings.EXISTING_INSTANCE_ID
)
- return
- kwargs = {
+ return self.instance
+ default_launch_kwargs = {
'image_id': self.image_id,
'user_data': user_data,
}
- kwargs.update(launch_kwargs)
+ launch_kwargs = {**default_launch_kwargs, **launch_kwargs}
log.info(
"Launching instance with launch_kwargs:\n%s",
- "\n".join("{}={}".format(*item) for item in kwargs.items())
+ "\n".join("{}={}".format(*item) for item in launch_kwargs.items())
)
with emit_dots_on_travis():
- pycloudlib_instance = self._perform_launch(kwargs)
+ pycloudlib_instance = self._perform_launch(launch_kwargs, **kwargs)
log.info('Launched instance: %s', pycloudlib_instance)
instance = self.get_instance(pycloudlib_instance, settings)
- if kwargs.get('wait', True):
+ if launch_kwargs.get('wait', True):
# If we aren't waiting, we can't rely on command execution here
log.info(
'cloud-init version: %s',
@@ -247,8 +247,15 @@ class OciCloud(IntegrationCloud):
integration_instance_cls = IntegrationOciInstance
def _get_cloud_instance(self):
+ if not integration_settings.ORACLE_AVAILABILITY_DOMAIN:
+ raise Exception(
+ 'ORACLE_AVAILABILITY_DOMAIN must be set to a valid '
+ 'availability domain. If using the oracle CLI, '
+ 'try `oci iam availability-domain list`'
+ )
return OCI(
- tag='oci-integration-test'
+ tag='oci-integration-test',
+ availability_domain=integration_settings.ORACLE_AVAILABILITY_DOMAIN
)
@@ -292,7 +299,7 @@ class _LxdIntegrationCloud(IntegrationCloud):
).format(**format_variables)
subp(command.split())
- def _perform_launch(self, launch_kwargs):
+ def _perform_launch(self, launch_kwargs, **kwargs):
launch_kwargs['inst_type'] = launch_kwargs.pop('instance_type', None)
wait = launch_kwargs.pop('wait', True)
release = launch_kwargs.pop('image_id')
@@ -310,6 +317,9 @@ class _LxdIntegrationCloud(IntegrationCloud):
)
if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE':
self._mount_source(pycloudlib_instance)
+ if 'lxd_setup' in kwargs:
+ log.info("Running callback specified by 'lxd_setup' mark")
+ kwargs['lxd_setup'](pycloudlib_instance)
pycloudlib_instance.start(wait=wait)
return pycloudlib_instance
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 6f4ce8d3..5a543e39 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -213,6 +213,7 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud):
user_data = getter('user_data')
name = getter('instance_name')
lxd_config_dict = getter('lxd_config_dict')
+ lxd_setup = getter('lxd_setup')
lxd_use_exec = fixture_utils.closest_marker_args_or(
request, 'lxd_use_exec', None
)
@@ -238,9 +239,14 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud):
# run anywhere else. A failure flags up this discrepancy.
pytest.fail(XENIAL_LXD_VM_EXEC_MSG)
launch_kwargs["execute_via_ssh"] = False
+ local_launch_kwargs = {}
+ if lxd_setup is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip('lxd_setup requres LXD')
+ local_launch_kwargs['lxd_setup'] = lxd_setup
with session_cloud.launch(
- user_data=user_data, launch_kwargs=launch_kwargs
+ user_data=user_data, launch_kwargs=launch_kwargs, **local_launch_kwargs
) as instance:
if lxd_use_exec is not None:
# Existing instances are not affected by the launch kwargs, so
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index 0703be58..e4a790c2 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -98,6 +98,13 @@ KEYPAIR_NAME = None
OPENSTACK_NETWORK = None
##################################################################
+# OCI SETTINGS
+##################################################################
+# Availability domain to use for Oracle. Should be one of the namess found
+# in `oci iam availability-domain list`
+ORACLE_AVAILABILITY_DOMAIN = None
+
+##################################################################
# USER SETTINGS OVERRIDES
##################################################################
# Bring in any user-file defined settings
diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py
new file mode 100644
index 00000000..b683566f
--- /dev/null
+++ b/tests/integration_tests/modules/test_hotplug.py
@@ -0,0 +1,94 @@
+import pytest
+import time
+import yaml
+from collections import namedtuple
+
+from tests.integration_tests.instances import IntegrationInstance
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: ['hotplug']
+"""
+
+ip_addr = namedtuple('ip_addr', 'interface state ip4 ip6')
+
+
+def _wait_till_hotplug_complete(client, expected_runs=1):
+ for _ in range(60):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ if log.count('Exiting hotplug handler') == expected_runs:
+ return log
+ time.sleep(1)
+ raise Exception('Waiting for hotplug handler failed')
+
+
+def _get_ip_addr(client):
+ ips = []
+ lines = client.execute('ip --brief addr').split('\n')
+ for line in lines:
+ attributes = line.split()
+ interface, state = attributes[0], attributes[1]
+ ip4_cidr = attributes[2] if len(attributes) > 2 else None
+ ip6_cidr = attributes[3] if len(attributes) > 3 else None
+ ip4 = ip4_cidr.split('/')[0] if ip4_cidr else None
+ ip6 = ip6_cidr.split('/')[0] if ip6_cidr else None
+ ip = ip_addr(interface, state, ip4, ip6)
+ ips.append(ip)
+ return ips
+
+
+@pytest.mark.openstack
+@pytest.mark.user_data(USER_DATA)
+def test_hotplug_add_remove(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Exiting hotplug handler' not in log
+
+ # Add new NIC
+ added_ip = client.instance.add_network_interface()
+ _wait_till_hotplug_complete(client)
+ ips_after_add = _get_ip_addr(client)
+ new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0]
+
+ assert len(ips_after_add) == len(ips_before) + 1
+ assert added_ip not in [ip.ip4 for ip in ips_before]
+ assert added_ip in [ip.ip4 for ip in ips_after_add]
+ assert new_addition.state == 'UP'
+
+ netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml')
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface in config['network']['ethernets']
+
+ # Remove new NIC
+ client.instance.remove_network_interface(added_ip)
+ _wait_till_hotplug_complete(client, expected_runs=2)
+ ips_after_remove = _get_ip_addr(client)
+ assert len(ips_after_remove) == len(ips_before)
+ assert added_ip not in [ip.ip4 for ip in ips_after_remove]
+
+ netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml')
+ config = yaml.safe_load(netplan_cfg)
+ assert new_addition.interface not in config['network']['ethernets']
+
+
+@pytest.mark.openstack
+def test_no_hotplug_in_userdata(client: IntegrationInstance):
+ ips_before = _get_ip_addr(client)
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Exiting hotplug handler' not in log
+
+ # Add new NIC
+ client.instance.add_network_interface()
+ _wait_till_hotplug_complete(client)
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'hotplug not enabled for event of type network' in log
+
+ ips_after_add = _get_ip_addr(client)
+ if len(ips_after_add) == len(ips_before) + 1:
+ # We can see the device, but it should not have been brought up
+ new_ip = [ip for ip in ips_after_add if ip not in ips_before][0]
+ assert new_ip.state == 'DOWN'
+ else:
+ assert len(ips_after_add) == len(ips_before)
diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py
new file mode 100644
index 00000000..00fdeaea
--- /dev/null
+++ b/tests/integration_tests/modules/test_persistence.py
@@ -0,0 +1,30 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+"""Test the behavior of loading/discarding pickle data"""
+from pathlib import Path
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import (
+ ASSETS_DIR,
+ verify_ordered_items_in_text,
+)
+
+
+PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
+TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl'
+
+
+@pytest.mark.lxd_container
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ assert client.execute('cloud-init status --wait').ok
+ log = client.read_from_file('/var/log/cloud-init.log')
+ verify_ordered_items_in_text([
+ "Unable to unpickle datasource: 'MIMEMultipart' object has no "
+ "attribute 'policy'. Ignoring current cache.",
+ 'no cache found',
+ 'Searching for local data source',
+ 'SUCCESS: found local data from DataSourceNoCloud'
+ ], log)
diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
index b9b0d85e..e1946cb1 100644
--- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
+++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py
@@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\
no_ssh_fingerprints: true
"""
-USER_DATA_SSH_AUTHKEY_ENABLE="""\
+USER_DATA_SSH_AUTHKEY_ENABLE = """\
#cloud-config
ssh_genkeytypes:
- ecdsa
diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py
new file mode 100644
index 00000000..f82d7649
--- /dev/null
+++ b/tests/integration_tests/modules/test_ssh_keysfile.py
@@ -0,0 +1,85 @@
+import paramiko
+import pytest
+from io import StringIO
+from paramiko.ssh_exception import SSHException
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import get_test_rsa_keypair
+
+TEST_USER1_KEYS = get_test_rsa_keypair('test1')
+TEST_USER2_KEYS = get_test_rsa_keypair('test2')
+TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3')
+
+USERDATA = """\
+#cloud-config
+bootcmd:
+ - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config
+ssh_authorized_keys:
+ - {default}
+users:
+- default
+- name: test_user1
+ ssh_authorized_keys:
+ - {user1}
+- name: test_user2
+ ssh_authorized_keys:
+ - {user2}
+""".format( # noqa: E501
+ default=TEST_DEFAULT_KEYS.public_key,
+ user1=TEST_USER1_KEYS.public_key,
+ user2=TEST_USER2_KEYS.public_key,
+)
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USERDATA)
+def test_authorized_keys(client: IntegrationInstance):
+ expected_keys = [
+ ('test_user1', '/home/test_user1/.ssh/authorized_keys2',
+ TEST_USER1_KEYS),
+ ('test_user2', '/home/test_user2/.ssh/authorized_keys2',
+ TEST_USER2_KEYS),
+ ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2',
+ TEST_DEFAULT_KEYS),
+ ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS),
+ ]
+
+ for user, filename, keys in expected_keys:
+ contents = client.read_from_file(filename)
+ if user in ['ubuntu', 'root']:
+ # Our personal public key gets added by pycloudlib
+ lines = contents.split('\n')
+ assert len(lines) == 2
+ assert keys.public_key.strip() in contents
+ else:
+ assert contents.strip() == keys.public_key.strip()
+
+ # Ensure we can actually connect
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ paramiko_key = paramiko.RSAKey.from_private_key(StringIO(
+ keys.private_key))
+
+ # Will fail with AuthenticationException if
+ # we cannot connect
+ ssh.connect(
+ client.instance.ip,
+ username=user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
+
+ # Ensure other uses can't connect using our key
+ other_users = [u[0] for u in expected_keys if u[2] != keys]
+ for other_user in other_users:
+ with pytest.raises(SSHException):
+ print('trying to connect as {} with key from {}'.format(
+ other_user, user))
+ ssh.connect(
+ client.instance.ip,
+ username=other_user,
+ pkey=paramiko_key,
+ look_for_keys=False,
+ allow_agent=False,
+ )
diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py
new file mode 100644
index 00000000..a45cad72
--- /dev/null
+++ b/tests/integration_tests/modules/test_user_events.py
@@ -0,0 +1,95 @@
+"""Test user-overridable events.
+
+This is currently limited to applying network config on BOOT events.
+"""
+
+import pytest
+import re
+import yaml
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+def _add_dummy_bridge_to_netplan(client: IntegrationInstance):
+ # Update netplan configuration to ensure it doesn't change on reboot
+ netplan = yaml.safe_load(
+ client.execute('cat /etc/netplan/50-cloud-init.yaml')
+ )
+ # Just a dummy bridge to do nothing
+ try:
+ netplan['network']['bridges']['dummy0'] = {'dhcp4': False}
+ except KeyError:
+ netplan['network']['bridges'] = {'dummy0': {'dhcp4': False}}
+
+ dumped_netplan = yaml.dump(netplan)
+ client.write_to_file('/etc/netplan/50-cloud-init.yaml', dumped_netplan)
+
+
+@pytest.mark.lxd_container
+@pytest.mark.lxd_vm
+@pytest.mark.ec2
+@pytest.mark.gce
+@pytest.mark.oci
+@pytest.mark.openstack
+@pytest.mark.not_xenial
+def test_boot_event_disabled_by_default(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Applying network configuration' in log
+ assert 'dummy0' not in client.execute('ls /sys/class/net')
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute('rm /var/log/cloud-init.log')
+
+ client.restart()
+ log2 = client.read_from_file('/var/log/cloud-init.log')
+
+ # We attempt to apply network config twice on every boot.
+ # Ensure neither time works.
+ assert 2 == len(
+ re.findall(r"Event Denied: scopes=\['network'\] EventType=boot[^-]",
+ log2)
+ )
+ assert 2 == log2.count(
+ "Event Denied: scopes=['network'] EventType=boot-legacy"
+ )
+ assert 2 == log2.count(
+ "No network config applied. Neither a new instance"
+ " nor datasource network update allowed"
+ )
+
+ assert 'dummy0' in client.execute('ls /sys/class/net')
+
+
+def _test_network_config_applied_on_reboot(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Applying network configuration' in log
+ assert 'dummy0' not in client.execute('ls /sys/class/net')
+
+ _add_dummy_bridge_to_netplan(client)
+ client.execute('rm /var/log/cloud-init.log')
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+
+ assert 'Event Allowed: scope=network EventType=boot' in log
+ assert 'Applying network configuration' in log
+ assert 'dummy0' not in client.execute('ls /sys/class/net')
+
+
+@pytest.mark.azure
+@pytest.mark.not_xenial
+def test_boot_event_enabled_by_default(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
+
+
+USER_DATA = """\
+#cloud-config
+updates:
+ network:
+ when: [boot]
+"""
+
+
+@pytest.mark.not_xenial
+@pytest.mark.user_data(USER_DATA)
+def test_boot_event_enabled(client: IntegrationInstance):
+ _test_network_config_applied_on_reboot(client)
diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py
new file mode 100644
index 00000000..4e9ab63f
--- /dev/null
+++ b/tests/integration_tests/modules/test_version_change.py
@@ -0,0 +1,56 @@
+from pathlib import Path
+
+from tests.integration_tests.instances import IntegrationInstance
+from tests.integration_tests.util import ASSETS_DIR
+
+
+PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl')
+TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl'
+
+
+def _assert_no_pickle_problems(log):
+ assert 'Failed loading pickled blob' not in log
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+
+def test_reboot_without_version_change(client: IntegrationInstance):
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Python version change detected' not in log
+ assert 'Cache compatibility status is currently unknown.' not in log
+ _assert_no_pickle_problems(log)
+
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Python version change detected' not in log
+ assert 'Could not determine Python version used to write cache' not in log
+ _assert_no_pickle_problems(log)
+
+ # Now ensure that loading a bad pickle gives us problems
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log
+
+
+def test_cache_purged_on_version_change(client: IntegrationInstance):
+ # Start by pushing the invalid pickle so we'll hit an error if the
+ # cache didn't actually get purged
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("echo '1.0' > /var/lib/cloud/data/python-version")
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert 'Python version change detected. Purging cache' in log
+ _assert_no_pickle_problems(log)
+
+
+def test_log_message_on_missing_version_file(client: IntegrationInstance):
+ # Start by pushing a pickle so we can see the log message
+ client.push_file(TEST_PICKLE, PICKLE_PATH)
+ client.execute("rm /var/lib/cloud/data/python-version")
+ client.restart()
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert (
+ 'Writing python-version file. '
+ 'Cache compatibility status is currently unknown.'
+ ) in log
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
index 7478a1b9..5c7a2e1e 100644
--- a/tests/integration_tests/test_upgrade.py
+++ b/tests/integration_tests/test_upgrade.py
@@ -1,17 +1,35 @@
+import json
import logging
import os
import pytest
-import time
-from pathlib import Path
-from tests.integration_tests.clouds import ImageSpecification, IntegrationCloud
-from tests.integration_tests.conftest import (
- get_validated_source,
- session_start_time,
-)
+from tests.integration_tests.clouds import IntegrationCloud
+from tests.integration_tests.conftest import get_validated_source
+
+
+LOG = logging.getLogger('integration_testing.test_upgrade')
+
+LOG_TEMPLATE = """\n\
+=== `systemd-analyze` before:
+{pre_systemd_analyze}
+=== `systemd-analyze` after:
+{post_systemd_analyze}
+=== `systemd-analyze blame` before (first 10 lines):
+{pre_systemd_blame}
+=== `systemd-analyze blame` after (first 10 lines):
+{post_systemd_blame}
-log = logging.getLogger('integration_testing')
+=== `cloud-init analyze show` before:')
+{pre_analyze_totals}
+=== `cloud-init analyze show` after:')
+{post_analyze_totals}
+
+=== `cloud-init analyze blame` before (first 10 lines): ')
+{pre_cloud_blame}
+=== `cloud-init analyze blame` after (first 10 lines): ')
+{post_cloud_blame}
+"""
UNSUPPORTED_INSTALL_METHOD_MSG = (
"Install method '{}' not supported for this test"
@@ -22,43 +40,6 @@ hostname: SRU-worked
"""
-def _output_to_compare(instance, file_path, netcfg_path):
- commands = [
- 'hostname',
- 'dpkg-query --show cloud-init',
- 'cat /run/cloud-init/result.json',
- # 'cloud-init init' helps us understand if our pickling upgrade paths
- # have broken across re-constitution of a cached datasource. Some
- # platforms invalidate their datasource cache on reboot, so we run
- # it here to ensure we get a dirty run.
- 'cloud-init init',
- 'grep Trace /var/log/cloud-init.log',
- 'cloud-id',
- 'cat {}'.format(netcfg_path),
- 'systemd-analyze',
- 'systemd-analyze blame',
- 'cloud-init analyze show',
- 'cloud-init analyze blame',
- ]
- with file_path.open('w') as f:
- for command in commands:
- f.write('===== {} ====='.format(command) + '\n')
- f.write(instance.execute(command) + '\n')
-
-
-def _restart(instance):
- # work around pad.lv/1908287
- instance.restart()
- if not instance.execute('cloud-init status --wait --long').ok:
- for _ in range(10):
- time.sleep(5)
- result = instance.execute('cloud-init status --wait --long')
- if result.ok:
- return
- raise Exception("Cloud-init didn't finish starting up")
-
-
-@pytest.mark.sru_2020_11
def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
source = get_validated_source(session_cloud)
if not source.installs_new_version():
@@ -69,37 +50,82 @@ def test_clean_boot_of_upgraded_package(session_cloud: IntegrationCloud):
'image_id': session_cloud.released_image_id,
}
- image = ImageSpecification.from_os_image()
-
- # Get the paths to write test logs
- output_dir = Path(session_cloud.settings.LOCAL_LOG_PATH)
- output_dir.mkdir(parents=True, exist_ok=True)
- base_filename = 'test_upgrade_{platform}_{os}_{{stage}}_{time}.log'.format(
- platform=session_cloud.settings.PLATFORM,
- os=image.release,
- time=session_start_time,
- )
- before_path = output_dir / base_filename.format(stage='before')
- after_path = output_dir / base_filename.format(stage='after')
-
- # Get the network cfg file
- netcfg_path = '/dev/null'
- if image.os == 'ubuntu':
- netcfg_path = '/etc/netplan/50-cloud-init.yaml'
- if image.release == 'xenial':
- netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg'
-
with session_cloud.launch(
launch_kwargs=launch_kwargs, user_data=USER_DATA,
) as instance:
- _output_to_compare(instance, before_path, netcfg_path)
+ # get pre values
+ pre_hostname = instance.execute('hostname')
+ pre_cloud_id = instance.execute('cloud-id')
+ pre_result = instance.execute('cat /run/cloud-init/result.json')
+ pre_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml')
+ pre_systemd_analyze = instance.execute('systemd-analyze')
+ pre_systemd_blame = instance.execute('systemd-analyze blame')
+ pre_cloud_analyze = instance.execute('cloud-init analyze show')
+ pre_cloud_blame = instance.execute('cloud-init analyze blame')
+
+ # Ensure no issues pre-upgrade
+ assert not json.loads(pre_result)['v1']['errors']
+
+ log = instance.read_from_file('/var/log/cloud-init.log')
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ # Upgrade and reboot
instance.install_new_cloud_init(source, take_snapshot=False)
instance.execute('hostname something-else')
- _restart(instance)
+ instance.restart()
assert instance.execute('cloud-init status --wait --long').ok
- _output_to_compare(instance, after_path, netcfg_path)
- log.info('Wrote upgrade test logs to %s and %s', before_path, after_path)
+ # 'cloud-init init' helps us understand if our pickling upgrade paths
+ # have broken across re-constitution of a cached datasource. Some
+ # platforms invalidate their datasource cache on reboot, so we run
+ # it here to ensure we get a dirty run.
+ assert instance.execute('cloud-init init').ok
+
+ # get post values
+ post_hostname = instance.execute('hostname')
+ post_cloud_id = instance.execute('cloud-id')
+ post_result = instance.execute('cat /run/cloud-init/result.json')
+ post_network = instance.execute('cat /etc/netplan/50-cloud-init.yaml')
+ post_systemd_analyze = instance.execute('systemd-analyze')
+ post_systemd_blame = instance.execute('systemd-analyze blame')
+ post_cloud_analyze = instance.execute('cloud-init analyze show')
+ post_cloud_blame = instance.execute('cloud-init analyze blame')
+
+ # Ensure no issues post-upgrade
+ assert not json.loads(pre_result)['v1']['errors']
+
+ log = instance.read_from_file('/var/log/cloud-init.log')
+ assert 'Traceback' not in log
+ assert 'WARN' not in log
+
+ # Ensure important things stayed the same
+ assert pre_hostname == post_hostname
+ assert pre_cloud_id == post_cloud_id
+ assert pre_result == post_result
+ assert pre_network == post_network
+
+ # Calculate and log all the boot numbers
+ pre_analyze_totals = [
+ x for x in pre_cloud_analyze.splitlines()
+ if x.startswith('Finished stage') or x.startswith('Total Time')
+ ]
+ post_analyze_totals = [
+ x for x in post_cloud_analyze.splitlines()
+ if x.startswith('Finished stage') or x.startswith('Total Time')
+ ]
+
+ # pylint: disable=logging-format-interpolation
+ LOG.info(LOG_TEMPLATE.format(
+ pre_systemd_analyze=pre_systemd_analyze,
+ post_systemd_analyze=post_systemd_analyze,
+ pre_systemd_blame='\n'.join(pre_systemd_blame.splitlines()[:10]),
+ post_systemd_blame='\n'.join(post_systemd_blame.splitlines()[:10]),
+ pre_analyze_totals='\n'.join(pre_analyze_totals),
+ post_analyze_totals='\n'.join(post_analyze_totals),
+ pre_cloud_blame='\n'.join(pre_cloud_blame.splitlines()[:10]),
+ post_cloud_blame='\n'.join(post_cloud_blame.splitlines()[:10]),
+ ))
@pytest.mark.ci
diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py
index 3ef12358..ce62ffc8 100644
--- a/tests/integration_tests/util.py
+++ b/tests/integration_tests/util.py
@@ -3,8 +3,15 @@ import multiprocessing
import os
import time
from contextlib import contextmanager
+from collections import namedtuple
+from pathlib import Path
+
log = logging.getLogger('integration_testing')
+key_pair = namedtuple('key_pair', 'public_key private_key')
+
+ASSETS_DIR = Path('tests/integration_tests/assets')
+KEY_PATH = ASSETS_DIR / 'keys'
def verify_ordered_items_in_text(to_verify: list, text: str):
@@ -47,3 +54,13 @@ def emit_dots_on_travis():
yield
finally:
dot_process.terminate()
+
+
+def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair:
+ private_key_path = KEY_PATH / 'id_rsa.{}'.format(key_name)
+ public_key_path = KEY_PATH / 'id_rsa.{}.pub'.format(key_name)
+ with public_key_path.open() as public_file:
+ public_key = public_file.read()
+ with private_key_path.open() as private_file:
+ private_key = private_file.read()
+ return key_pair(public_key, private_key)
diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py
new file mode 100644
index 00000000..63d2490e
--- /dev/null
+++ b/tests/unittests/cmd/devel/test_hotplug_hook.py
@@ -0,0 +1,218 @@
+import pytest
+from collections import namedtuple
+from unittest import mock
+from unittest.mock import call
+
+from cloudinit.cmd.devel.hotplug_hook import handle_hotplug
+from cloudinit.distros import Distro
+from cloudinit.event import EventType
+from cloudinit.net.activators import NetworkActivator
+from cloudinit.net.network_state import NetworkState
+from cloudinit.sources import DataSource
+from cloudinit.stages import Init
+
+
+hotplug_args = namedtuple('hotplug_args', 'udevaction, subsystem, devpath')
+FAKE_MAC = '11:22:33:44:55:66'
+
+
+@pytest.yield_fixture
+def mocks():
+ m_init = mock.MagicMock(spec=Init)
+ m_distro = mock.MagicMock(spec=Distro)
+ m_datasource = mock.MagicMock(spec=DataSource)
+ m_datasource.distro = m_distro
+ m_init.datasource = m_datasource
+ m_init.fetch.return_value = m_datasource
+
+ read_sys_net = mock.patch(
+ 'cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe',
+ return_value=FAKE_MAC
+ )
+
+ m_network_state = mock.MagicMock(spec=NetworkState)
+ parse_net = mock.patch(
+ 'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data',
+ return_value=m_network_state
+ )
+
+ m_activator = mock.MagicMock(spec=NetworkActivator)
+ select_activator = mock.patch(
+ 'cloudinit.cmd.devel.hotplug_hook.activators.select_activator',
+ return_value=m_activator
+ )
+
+ sleep = mock.patch('time.sleep')
+
+ read_sys_net.start()
+ parse_net.start()
+ select_activator.start()
+ m_sleep = sleep.start()
+
+ yield namedtuple('mocks', 'm_init m_network_state m_activator m_sleep')(
+ m_init=m_init,
+ m_network_state=m_network_state,
+ m_activator=m_activator,
+ m_sleep=m_sleep,
+ )
+
+ read_sys_net.stop()
+ parse_net.stop()
+ select_activator.stop()
+ sleep.stop()
+
+
+class TestUnsupportedActions:
+ def test_unsupported_subsystem(self, mocks):
+ with pytest.raises(
+ Exception,
+ match='cannot handle events for subsystem: not_real'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ subsystem='not_real',
+ udevaction='add'
+ )
+
+ def test_unsupported_udevaction(self, mocks):
+ with pytest.raises(ValueError, match='Unknown action: not_real'):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='not_real',
+ subsystem='net'
+ )
+
+
+class TestHotplug:
+ def test_succcessful_add(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [{
+ 'mac_address': FAKE_MAC,
+ }]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with([
+ EventType.HOTPLUG
+ ])
+ mocks.m_activator.bring_up_interface.assert_called_once_with('fake')
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_successful_remove(self, mocks):
+ init = mocks.m_init
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+ init.datasource.update_metadata_if_supported.assert_called_once_with([
+ EventType.HOTPLUG
+ ])
+ mocks.m_activator.bring_down_interface.assert_called_once_with('fake')
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ init._write_to_cache.assert_called_once_with()
+
+ def test_update_event_disabled(self, mocks, caplog):
+ init = mocks.m_init
+ init.update_event_enabled.return_value = False
+ handle_hotplug(
+ hotplug_init=init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+ assert 'hotplug not enabled for event of type' in caplog.text
+ init.datasource.update_metadata_if_supported.assert_not_called()
+ mocks.m_activator.bring_up_interface.assert_not_called()
+ mocks.m_activator.bring_down_interface.assert_not_called()
+ init._write_to_cache.assert_not_called()
+
+ def test_update_metadata_failed(self, mocks):
+ mocks.m_init.datasource.update_metadata_if_supported.return_value = \
+ False
+ with pytest.raises(
+ RuntimeError, match='Datasource .* not updated for event hotplug'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+
+ def test_detect_hotplugged_device_not_detected_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ with pytest.raises(
+ RuntimeError,
+ match='Failed to detect {} in updated metadata'.format(FAKE_MAC)
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+
+ def test_detect_hotplugged_device_detected_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{
+ 'mac_address': FAKE_MAC,
+ }]
+ with pytest.raises(
+ RuntimeError,
+ match='Failed to detect .* in updated metadata'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+
+ def test_apply_failed_on_add(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{
+ 'mac_address': FAKE_MAC,
+ }]
+ mocks.m_activator.bring_up_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match='Failed to bring up device: /dev/fake'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+
+ def test_apply_failed_on_remove(self, mocks):
+ mocks.m_network_state.iter_interfaces.return_value = [{}]
+ mocks.m_activator.bring_down_interface.return_value = False
+ with pytest.raises(
+ RuntimeError, match='Failed to bring down device: /dev/fake'
+ ):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='remove',
+ subsystem='net'
+ )
+
+ def test_retry(self, mocks):
+ with pytest.raises(RuntimeError):
+ handle_hotplug(
+ hotplug_init=mocks.m_init,
+ devpath='/dev/fake',
+ udevaction='add',
+ subsystem='net'
+ )
+ assert mocks.m_sleep.call_count == 5
+ assert mocks.m_sleep.call_args_list == [
+ call(1), call(3), call(5), call(10), call(30)
+ ]
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index c5675249..30293e9e 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -27,6 +27,8 @@ from cloudinit.handlers.upstart_job import UpstartJobPartHandler
from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
+INSTANCE_DATA_FILE = 'instance-data-sensitive.json'
+
class TestUpstartJobPartHandler(FilesystemMockingTestCase):
@@ -145,8 +147,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_handler = ShellScriptPartHandler(self.paths)
self.assertEqual(2, script_handler.handler_version)
- # Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ # Create required instance data json file
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': 'echo himom'}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
@@ -168,7 +170,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
self.assertEqual(3, cloudcfg_handler.handler_version)
# Create required instance-data.json file
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
@@ -198,8 +200,9 @@ class TestJinjaTemplatePartHandler(CiTestCase):
script_file = os.path.join(script_handler.script_dir, 'part01')
self.assertEqual(
'Cannot render jinja template vars. Instance data not yet present'
- ' at {}/instance-data.json'.format(
- self.run_dir), str(context_manager.exception))
+ ' at {}/{}'.format(self.run_dir, INSTANCE_DATA_FILE),
+ str(context_manager.exception)
+ )
self.assertFalse(
os.path.exists(script_file),
'Unexpected file created %s' % script_file)
@@ -207,7 +210,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
def test_jinja_template_handle_errors_on_unreadable_instance_data(self):
"""If instance-data is unreadable, raise an error from handle_part."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(
+ self.run_dir, INSTANCE_DATA_FILE)
util.write_file(instance_json, util.json_dumps({}))
h = JinjaTemplatePartHandler(
self.paths, sub_handlers=[script_handler])
@@ -221,8 +225,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
frequency='freq', headers='headers')
script_file = os.path.join(script_handler.script_dir, 'part01')
self.assertEqual(
- 'Cannot render jinja template vars. No read permission on'
- " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir),
+ "Cannot render jinja template vars. No read permission on "
+ "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE),
str(context_manager.exception))
self.assertFalse(
os.path.exists(script_file),
@@ -230,9 +234,9 @@ class TestJinjaTemplatePartHandler(CiTestCase):
@skipUnlessJinja()
def test_jinja_template_handle_renders_jinja_content(self):
- """When present, render jinja variables from instance-data.json."""
+ """When present, render jinja variables from instance data"""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': {'subkey': 'echo himom'}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
@@ -247,8 +251,8 @@ class TestJinjaTemplatePartHandler(CiTestCase):
frequency='freq', headers='headers')
script_file = os.path.join(script_handler.script_dir, 'part01')
self.assertNotIn(
- 'Instance data not yet present at {}/instance-data.json'.format(
- self.run_dir),
+ 'Instance data not yet present at {}/{}'.format(
+ self.run_dir, INSTANCE_DATA_FILE),
self.logs.getvalue())
self.assertEqual(
'#!/bin/bash\necho himom', util.load_file(script_file))
@@ -257,7 +261,7 @@ class TestJinjaTemplatePartHandler(CiTestCase):
def test_jinja_template_handle_renders_jinja_content_missing_keys(self):
"""When specified jinja variable is undefined, log a warning."""
script_handler = ShellScriptPartHandler(self.paths)
- instance_json = os.path.join(self.run_dir, 'instance-data.json')
+ instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE)
instance_data = {'topkey': {'subkey': 'echo himom'}}
util.write_file(instance_json, util.json_dumps(instance_data))
h = JinjaTemplatePartHandler(
diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py
index f5cf514d..fdb4026c 100644
--- a/tests/unittests/test_cli.py
+++ b/tests/unittests/test_cli.py
@@ -224,7 +224,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase):
self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all'])
expected_doc_sections = [
'**Supported distros:** all',
- '**Supported distros:** almalinux, alpine, centos, debian, fedora',
+ ('**Supported distros:** almalinux, alpine, centos, debian, '
+ 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu'),
'**Config schema**:\n **resize_rootfs:** (true/false/noblock)',
'**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n'
]
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 742d1faa..54e06119 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -3163,8 +3163,8 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase):
expected_logs = [
'INFO: Removing Ubuntu extended network scripts because cloud-init'
- ' updates Azure network configuration on the following event:'
- ' System boot.',
+ ' updates Azure network configuration on the following events:'
+ " ['boot', 'boot-legacy']",
'Recursively deleting %s' % subdir,
'Attempting to remove %s' % file1]
for log in expected_logs:
diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py
index dce01f5d..e2718077 100644
--- a/tests/unittests/test_datasource/test_ovf.py
+++ b/tests/unittests/test_datasource/test_ovf.py
@@ -138,18 +138,17 @@ class TestDatasourceOVF(CiTestCase):
self.assertIn(
'DEBUG: No system-product-name found', self.logs.getvalue())
- def test_get_data_no_vmware_customization_disabled(self):
- """When cloud-init workflow for vmware is disabled via sys_cfg and
- no meta data provided, log a message.
+ def test_get_data_vmware_customization_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ allow_raw_data is disabled via ds_cfg, log a message.
"""
paths = Paths({'cloud_dir': self.tdir})
ds = self.datasource(
- sys_cfg={'disable_vmware_customization': True}, distro={},
- paths=paths)
+ sys_cfg={'disable_vmware_customization': True,
+ 'datasource': {'OVF': {'allow_raw_data': False}}},
+ distro={}, paths=paths)
conf_file = self.tmp_path('test-cust', self.tdir)
conf_content = dedent("""\
- [CUSTOM-SCRIPT]
- SCRIPT-NAME = test-script
[MISC]
MARKER-ID = 12345345
""")
@@ -168,7 +167,71 @@ class TestDatasourceOVF(CiTestCase):
'DEBUG: Customization for VMware platform is disabled.',
self.logs.getvalue())
- def test_get_data_vmware_customization_disabled(self):
+ def test_get_data_vmware_customization_sys_cfg_disabled(self):
+ """When vmware customization is disabled via sys_cfg and
+ no meta data is found, log a message.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': True,
+ 'datasource': {'OVF': {'allow_raw_data': True}}},
+ distro={}, paths=paths)
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [MISC]
+ MARKER-ID = 12345345
+ """)
+ util.write_file(conf_file, conf_content)
+ retcode = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'transport_iso9660': NOT_FOUND,
+ 'transport_vmware_guestinfo': NOT_FOUND,
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file},
+ ds.get_data)
+ self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ self.assertIn(
+ 'DEBUG: Customization using VMware config is disabled.',
+ self.logs.getvalue())
+
+ def test_get_data_allow_raw_data_disabled(self):
+ """When allow_raw_data is disabled via ds_cfg and
+ meta data is found, log a message.
+ """
+ paths = Paths({'cloud_dir': self.tdir})
+ ds = self.datasource(
+ sys_cfg={'disable_vmware_customization': False,
+ 'datasource': {'OVF': {'allow_raw_data': False}}},
+ distro={}, paths=paths)
+
+ # Prepare the conf file
+ conf_file = self.tmp_path('test-cust', self.tdir)
+ conf_content = dedent("""\
+ [CLOUDINIT]
+ METADATA = test-meta
+ """)
+ util.write_file(conf_file, conf_content)
+ # Prepare the meta data file
+ metadata_file = self.tmp_path('test-meta', self.tdir)
+ util.write_file(metadata_file, "This is meta data")
+ retcode = wrap_and_call(
+ 'cloudinit.sources.DataSourceOVF',
+ {'dmi.read_dmi_data': 'vmware',
+ 'transport_iso9660': NOT_FOUND,
+ 'transport_vmware_guestinfo': NOT_FOUND,
+ 'util.del_dir': True,
+ 'search_file': self.tdir,
+ 'wait_for_imc_cfg_file': conf_file,
+ 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']},
+ ds.get_data)
+ self.assertFalse(retcode, 'Expected False return from ds.get_data')
+ self.assertIn(
+ 'DEBUG: Customization using raw data is disabled.',
+ self.logs.getvalue())
+
+ def test_get_data_vmware_customization_enabled(self):
"""When cloud-init workflow for vmware is enabled via sys_cfg log a
message.
"""
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 5847a384..9c499672 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -29,7 +29,7 @@ from cloudinit.sources.DataSourceSmartOS import (
convert_smartos_network_data as convert_net,
SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
identify_file)
-from cloudinit.event import EventType
+from cloudinit.event import EventScope, EventType
from cloudinit import helpers as c_helpers
from cloudinit.util import (b64e, write_file)
@@ -653,8 +653,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase):
def test_reconfig_network_on_boot(self):
# Test to ensure that network is configured from metadata on each boot
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
- self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]),
- dsrc.update_events['network'])
+ self.assertSetEqual(
+ {EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY},
+ dsrc.default_update_events[EventScope.NETWORK]
+ )
class TestIdentifyFile(CiTestCase):
diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/test_distros/test_dragonflybsd.py
new file mode 100644
index 00000000..df2c00f4
--- /dev/null
+++ b/tests/unittests/test_distros/test_dragonflybsd.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python3
+
+
+import cloudinit.util
+from cloudinit.tests.helpers import mock
+
+
+def test_find_dragonflybsd_part():
+ assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3"
+
+
+@mock.patch("cloudinit.util.is_DragonFlyBSD")
+@mock.patch("cloudinit.subp.subp")
+def test_parse_mount(mock_subp, m_is_DragonFlyBSD):
+ mount_out = """
+vbd0s3 on / (hammer2, local)
+devfs on /dev (devfs, nosymfollow, local)
+/dev/vbd0s0a on /boot (ufs, local)
+procfs on /proc (procfs, local)
+tmpfs on /var/run/shm (tmpfs, local)
+"""
+
+ mock_subp.return_value = (mount_out, "")
+ m_is_DragonFlyBSD.return_value = True
+ assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/")
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index a1df066a..d09e46af 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -2,6 +2,7 @@
import copy
import os
+import re
from io import StringIO
from textwrap import dedent
from unittest import mock
@@ -14,7 +15,7 @@ from cloudinit.tests.helpers import (
FilesystemMockingTestCase, dir2dict)
from cloudinit import subp
from cloudinit import util
-
+from cloudinit import safeyaml
BASE_NET_CFG = '''
auto lo
@@ -88,6 +89,24 @@ V1_NET_CFG = {'config': [{'name': 'eth0',
'type': 'physical'}],
'version': 1}
+V1_NET_CFG_WITH_DUPS = """\
+# same value in interface specific dns and global dns
+# should produce single entry in network file
+version: 1
+config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: static
+ address: 192.168.0.102/24
+ dns_nameservers: [1.2.3.4]
+ dns_search: [test.com]
+ interface: eth0
+ - type: nameserver
+ address: [1.2.3.4]
+ search: [test.com]
+"""
+
V1_NET_CFG_OUTPUT = """\
# This file is generated from information provided by the datasource. Changes
# to it will not persist across an instance reboot. To disable cloud-init's
@@ -771,6 +790,125 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase):
with_netplan=True)
+class TestNetCfgDistroPhoton(TestNetCfgDistroBase):
+
+ def setUp(self):
+ super(TestNetCfgDistroPhoton, self).setUp()
+ self.distro = self._get_distro('photon', renderers=['networkd'])
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r'^\[(.+)\]$', line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ assert key
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _apply_and_verify(self, apply_fn, config, expected_cfgs=None,
+ bringup=False):
+ if not expected_cfgs:
+ raise ValueError('expected_cfg must not be None')
+
+ tmpd = None
+ with mock.patch('cloudinit.net.networkd.available') as m_avail:
+ m_avail.return_value = True
+ with self.reRooted(tmpd) as tmpd:
+ apply_fn(config, bringup)
+
+ results = dir2dict(tmpd)
+ for cfgpath, expected in expected_cfgs.items():
+ actual = self.create_conf_dict(results[cfgpath].splitlines())
+ self.compare_dicts(actual, expected)
+ self.assertEqual(0o644, get_mode(cfgpath, tmpd))
+
+ def nwk_file_path(self, ifname):
+ return '/etc/systemd/network/10-cloud-init-%s.network' % ifname
+
+ def net_cfg_1(self, ifname):
+ ret = """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.1.5/24
+ [Route]
+ Gateway=192.168.1.254""" % ifname
+ return ret
+
+ def net_cfg_2(self, ifname):
+ ret = """\
+ [Match]
+ Name=%s
+ [Network]
+ DHCP=ipv4""" % ifname
+ return ret
+
+ def test_photon_network_config_v1(self):
+ tmp = self.net_cfg_1('eth0').splitlines()
+ expected_eth0 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2('eth1').splitlines()
+ expected_eth1 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path('eth0'): expected_eth0,
+ self.nwk_file_path('eth1'): expected_eth1,
+ }
+
+ self._apply_and_verify(self.distro.apply_network_config,
+ V1_NET_CFG,
+ expected_cfgs.copy())
+
+ def test_photon_network_config_v2(self):
+ tmp = self.net_cfg_1('eth7').splitlines()
+ expected_eth7 = self.create_conf_dict(tmp)
+
+ tmp = self.net_cfg_2('eth9').splitlines()
+ expected_eth9 = self.create_conf_dict(tmp)
+
+ expected_cfgs = {
+ self.nwk_file_path('eth7'): expected_eth7,
+ self.nwk_file_path('eth9'): expected_eth9,
+ }
+
+ self._apply_and_verify(self.distro.apply_network_config,
+ V2_NET_CFG,
+ expected_cfgs.copy())
+
+ def test_photon_network_config_v1_with_duplicates(self):
+ expected = """\
+ [Match]
+ Name=eth0
+ [Network]
+ DHCP=no
+ DNS=1.2.3.4
+ Domains=test.com
+ [Address]
+ Address=192.168.0.102/24"""
+
+ net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS)
+
+ expected = self.create_conf_dict(expected.splitlines())
+ expected_cfgs = {
+ self.nwk_file_path('eth0'): expected,
+ }
+
+ self._apply_and_verify(self.distro.apply_network_config,
+ net_cfg,
+ expected_cfgs.copy())
+
+
def get_mode(path, target=None):
return os.stat(subp.target_path(target, path)).st_mode & 0o777
diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py
index ac847238..abb0a9b6 100644
--- a/tests/unittests/test_handler/test_handler_apt_source_v3.py
+++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py
@@ -1009,6 +1009,29 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""")
self.assertEqual(mirrors['SECURITY'],
smir)
+ def test_apt_v3_add_mirror_keys(self):
+ """test_apt_v3_add_mirror_keys - Test adding key for mirrors"""
+ arch = 'amd64'
+ cfg = {
+ 'primary': [
+ {'arches': [arch],
+ 'uri': 'http://test.ubuntu.com/',
+ 'key': 'fakekey_primary'}],
+ 'security': [
+ {'arches': [arch],
+ 'uri': 'http://testsec.ubuntu.com/',
+ 'key': 'fakekey_security'}]
+ }
+
+ with mock.patch.object(cc_apt_configure,
+ 'add_apt_key_raw') as mockadd:
+ cc_apt_configure.add_mirror_keys(cfg, TARGET)
+ calls = [
+ mock.call('fakekey_primary', TARGET),
+ mock.call('fakekey_security', TARGET),
+ ]
+ mockadd.assert_has_calls(calls, any_order=True)
+
class TestDebconfSelections(TestCase):
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index 73641b70..32ca3b7e 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -120,6 +120,32 @@ class TestHostname(t_help.FilesystemMockingTestCase):
contents = util.load_file(distro.hostname_conf_fn)
self.assertEqual('blah', contents.strip())
+ @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False)
+ def test_photon_hostname(self, m_uses_systemd):
+ cfg1 = {
+ 'hostname': 'photon',
+ 'prefer_fqdn_over_hostname': True,
+ 'fqdn': 'test1.vmware.com',
+ }
+ cfg2 = {
+ 'hostname': 'photon',
+ 'prefer_fqdn_over_hostname': False,
+ 'fqdn': 'test2.vmware.com',
+ }
+
+ ds = None
+ distro = self._fetch_distro('photon', cfg1)
+ paths = helpers.Paths({'cloud_dir': self.tmp})
+ cc = cloud.Cloud(ds, paths, {}, distro, None)
+ self.patchUtils(self.tmp)
+ for c in [cfg1, cfg2]:
+ cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, [])
+ contents = util.load_file(distro.hostname_conf_fn, decode=True)
+ if c['prefer_fqdn_over_hostname']:
+ self.assertEqual(contents.strip(), c['fqdn'])
+ else:
+ self.assertEqual(contents.strip(), c['hostname'])
+
def test_multiple_calls_skips_unchanged_hostname(self):
"""Only new hostname or fqdn values will generate a hostname call."""
distro = self._fetch_distro('debian')
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index b72a62b8..43e209c1 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -5,7 +5,7 @@ from cloudinit import distros
from cloudinit.net import cmdline
from cloudinit.net import (
eni, interface_has_own_mac, natural_sort_key, netplan, network_state,
- renderers, sysconfig)
+ renderers, sysconfig, networkd)
from cloudinit.sources.helpers import openstack
from cloudinit import temp_utils
from cloudinit import subp
@@ -821,6 +821,32 @@ iface eth1 inet static
NETWORK_CONFIGS = {
'small': {
+ 'expected_networkd_eth99': textwrap.dedent("""\
+ [Match]
+ Name=eth99
+ MACAddress=c0:d6:9f:2c:e8:80
+ [Address]
+ Address=192.168.21.3/24
+ [Network]
+ DHCP=ipv4
+ Domains=barley.maas sach.maas
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ DNS=8.8.8.8 8.8.4.4
+ [Route]
+ Gateway=65.61.151.37
+ Destination=0.0.0.0/0
+ Metric=10000
+ """).rstrip(' '),
+ 'expected_networkd_eth1': textwrap.dedent("""\
+ [Match]
+ Name=eth1
+ MACAddress=cf:d6:af:48:e8:80
+ [Network]
+ DHCP=no
+ Domains=wark.maas
+ DNS=1.2.3.4 5.6.7.8
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -938,6 +964,12 @@ NETWORK_CONFIGS = {
"""),
},
'v4_and_v6': {
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=yes
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -973,6 +1005,17 @@ NETWORK_CONFIGS = {
""").rstrip(' '),
},
'v4_and_v6_static': {
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Link]
+ MTUBytes=8999
+ [Network]
+ DHCP=no
+ [Address]
+ Address=192.168.14.2/24
+ Address=2001:1::1/64
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -1059,6 +1102,12 @@ NETWORK_CONFIGS = {
""").rstrip(' '),
},
'dhcpv6_only': {
+ 'expected_networkd': textwrap.dedent("""\
+ [Match]
+ Name=iface0
+ [Network]
+ DHCP=ipv6
+ """).rstrip(' '),
'expected_eni': textwrap.dedent("""\
auto lo
iface lo inet loopback
@@ -4986,26 +5035,199 @@ class TestEniRoundTrip(CiTestCase):
files['/etc/network/interfaces'].splitlines())
+class TestNetworkdNetRendering(CiTestCase):
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r'^\[(.+)\]$', line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot")
+ @mock.patch("cloudinit.net.sys_dev_path")
+ @mock.patch("cloudinit.net.read_sys_net")
+ @mock.patch("cloudinit.net.get_devicelist")
+ def test_networkd_default_generation(self, mock_get_devicelist,
+ mock_read_sys_net,
+ mock_sys_dev_path,
+ m_get_cmdline,
+ m_chown):
+ tmp_dir = self.tmp_dir()
+ _setup_test(tmp_dir, mock_get_devicelist,
+ mock_read_sys_net, mock_sys_dev_path)
+
+ network_cfg = net.generate_fallback_config()
+ ns = network_state.parse_net_config_data(network_cfg,
+ skip_broken=False)
+
+ render_dir = os.path.join(tmp_dir, "render")
+ os.makedirs(render_dir)
+
+ render_target = 'etc/systemd/network/10-cloud-init-eth1000.network'
+ renderer = networkd.Renderer({})
+ renderer.render_network_state(ns, target=render_dir)
+
+ self.assertTrue(os.path.exists(os.path.join(render_dir,
+ render_target)))
+ with open(os.path.join(render_dir, render_target)) as fh:
+ contents = fh.readlines()
+
+ actual = self.create_conf_dict(contents)
+ print(actual)
+
+ expected = textwrap.dedent("""\
+ [Match]
+ Name=eth1000
+ MACAddress=07-1c-c6-75-a4-be
+ [Network]
+ DHCP=ipv4""").rstrip(' ')
+
+ expected = self.create_conf_dict(expected.splitlines())
+
+ self.compare_dicts(actual, expected)
+
+
+class TestNetworkdRoundTrip(CiTestCase):
+
+ def create_conf_dict(self, contents):
+ content_dict = {}
+ for line in contents:
+ if line:
+ line = line.strip()
+ if line and re.search(r'^\[(.+)\]$', line):
+ content_dict[line] = []
+ key = line
+ elif line:
+ content_dict[key].append(line)
+
+ return content_dict
+
+ def compare_dicts(self, actual, expected):
+ for k, v in actual.items():
+ self.assertEqual(sorted(expected[k]), sorted(v))
+
+ def _render_and_read(self, network_config=None, state=None, nwkd_path=None,
+ dir=None):
+ if dir is None:
+ dir = self.tmp_dir()
+
+ if network_config:
+ ns = network_state.parse_net_config_data(network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ if not nwkd_path:
+ nwkd_path = '/etc/systemd/network/'
+
+ renderer = networkd.Renderer(config={'network_conf_dir': nwkd_path})
+
+ renderer.render_network_state(ns, target=dir)
+ return dir2dict(dir)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_small_networkd(self, m_chown):
+ nwk_fn1 = '/etc/systemd/network/10-cloud-init-eth99.network'
+ nwk_fn2 = '/etc/systemd/network/10-cloud-init-eth1.network'
+ entry = NETWORK_CONFIGS['small']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn1].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd_eth99'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ actual = files[nwk_fn2].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd_eth1'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['v4_and_v6']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_v4_and_v6_static(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['v4_and_v6_static']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+ @mock.patch("cloudinit.net.util.chownbyname", return_value=True)
+ def testsimple_render_dhcpv6_only(self, m_chown):
+ nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network'
+ entry = NETWORK_CONFIGS['dhcpv6_only']
+ files = self._render_and_read(network_config=yaml.load(entry['yaml']))
+
+ actual = files[nwk_fn].splitlines()
+ actual = self.create_conf_dict(actual)
+
+ expected = entry['expected_networkd'].splitlines()
+ expected = self.create_conf_dict(expected)
+
+ self.compare_dicts(actual, expected)
+
+
class TestRenderersSelect:
@pytest.mark.parametrize(
- 'renderer_selected,netplan,eni,nm,scfg,sys', (
+ 'renderer_selected,netplan,eni,nm,scfg,sys,networkd', (
# -netplan -ifupdown -nm -scfg -sys raises error
- (net.RendererNotFoundError, False, False, False, False, False),
+ (net.RendererNotFoundError, False, False, False, False, False,
+ False),
# -netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', False, True, False, False, False),
+ ('eni', False, True, False, False, False, False),
# +netplan +ifupdown -nm -scfg -sys selects eni
- ('eni', True, True, False, False, False),
+ ('eni', True, True, False, False, False, False),
# +netplan -ifupdown -nm -scfg -sys selects netplan
- ('netplan', True, False, False, False, False),
+ ('netplan', True, False, False, False, False, False),
# Ubuntu with Network-Manager installed
# +netplan -ifupdown +nm -scfg -sys selects netplan
- ('netplan', True, False, True, False, False),
+ ('netplan', True, False, True, False, False, False),
# Centos/OpenSuse with Network-Manager installed selects sysconfig
# -netplan -ifupdown +nm -scfg +sys selects netplan
- ('sysconfig', False, False, True, False, True),
+ ('sysconfig', False, False, True, False, True, False),
+ # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd
+ ('networkd', False, False, False, False, False, True),
),
)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
@mock.patch("cloudinit.net.renderers.netplan.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig")
@@ -5013,7 +5235,8 @@ class TestRenderersSelect:
@mock.patch("cloudinit.net.renderers.eni.available")
def test_valid_renderer_from_defaults_depending_on_availability(
self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail,
- m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys
+ m_netplan_avail, m_networkd_avail, renderer_selected,
+ netplan, eni, nm, scfg, sys, networkd
):
"""Assert proper renderer per DEFAULT_PRIORITY given availability."""
m_eni_avail.return_value = eni # ifupdown pkg presence
@@ -5021,6 +5244,7 @@ class TestRenderersSelect:
m_scfg_avail.return_value = scfg # sysconfig presence
m_sys_avail.return_value = sys # sysconfig/ifup/down presence
m_netplan_avail.return_value = netplan # netplan presence
+ m_networkd_avail.return_value = networkd # networkd presence
if isinstance(renderer_selected, str):
(renderer_name, _rnd_class) = renderers.select(
priority=renderers.DEFAULT_PRIORITY
@@ -5053,7 +5277,7 @@ class TestNetRenderers(CiTestCase):
# available should only be called until one is found.
m_eni_avail.return_value = True
m_sysc_avail.side_effect = Exception("Should not call me")
- found = renderers.search(priority=['eni', 'sysconfig'], first=True)
+ found = renderers.search(priority=['eni', 'sysconfig'], first=True)[0]
self.assertEqual(['eni'], [found[0]])
@mock.patch("cloudinit.net.renderers.sysconfig.available")
@@ -5094,6 +5318,12 @@ class TestNetRenderers(CiTestCase):
result = sysconfig.available()
self.assertTrue(result)
+ @mock.patch("cloudinit.net.renderers.networkd.available")
+ def test_networkd_available(self, m_nwkd_avail):
+ m_nwkd_avail.return_value = True
+ found = renderers.search(priority=['networkd'], first=False)
+ self.assertEqual('networkd', found[0][0])
+
@mock.patch(
"cloudinit.net.is_openvswitch_internal_interface",
diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py
new file mode 100644
index 00000000..db825c35
--- /dev/null
+++ b/tests/unittests/test_net_activators.py
@@ -0,0 +1,232 @@
+from collections import namedtuple
+from unittest.mock import patch
+
+import pytest
+
+from cloudinit.net.activators import (
+ DEFAULT_PRIORITY,
+ search_activator,
+ select_activator,
+)
+from cloudinit.net.activators import (
+ IfUpDownActivator,
+ NetplanActivator,
+ NetworkManagerActivator
+)
+from cloudinit.net.network_state import parse_net_config_data
+from cloudinit.safeyaml import load
+
+
+V1_CONFIG = """\
+version: 1
+config:
+- type: physical
+ name: eth0
+- type: physical
+ name: eth1
+"""
+
+V2_CONFIG = """\
+version: 2
+ethernets:
+ eth0:
+ dhcp4: true
+ eth1:
+ dhcp4: true
+"""
+
+NETPLAN_CALL_LIST = [
+ ((['netplan', 'apply'], ), {}),
+]
+
+
+@pytest.yield_fixture
+def available_mocks():
+ mocks = namedtuple('Mocks', 'm_which, m_file')
+ with patch('cloudinit.subp.which', return_value=True) as m_which:
+ with patch('os.path.isfile', return_value=True) as m_file:
+ yield mocks(m_which, m_file)
+
+
+@pytest.yield_fixture
+def unavailable_mocks():
+ mocks = namedtuple('Mocks', 'm_which, m_file')
+ with patch('cloudinit.subp.which', return_value=False) as m_which:
+ with patch('os.path.isfile', return_value=False) as m_file:
+ yield mocks(m_which, m_file)
+
+
+class TestSearchAndSelect:
+ def test_defaults(self, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY
+
+ activator = select_activator()
+ assert activator == DEFAULT_PRIORITY[0]
+
+ def test_priority(self, available_mocks):
+ new_order = [NetplanActivator, NetworkManagerActivator]
+ resp = search_activator(priority=new_order)
+ assert resp == new_order
+
+ activator = select_activator(priority=new_order)
+ assert activator == new_order[0]
+
+ def test_target(self, available_mocks):
+ search_activator(target='/tmp')
+ assert '/tmp' == available_mocks.m_which.call_args[1]['target']
+
+ select_activator(target='/tmp')
+ assert '/tmp' == available_mocks.m_which.call_args[1]['target']
+
+ @patch('cloudinit.net.activators.IfUpDownActivator.available',
+ return_value=False)
+ def test_first_not_available(self, m_available, available_mocks):
+ resp = search_activator()
+ assert resp == DEFAULT_PRIORITY[1:]
+
+ resp = select_activator()
+ assert resp == DEFAULT_PRIORITY[1]
+
+ def test_priority_not_exist(self, available_mocks):
+ with pytest.raises(ValueError):
+ search_activator(priority=['spam', 'eggs'])
+ with pytest.raises(ValueError):
+ select_activator(priority=['spam', 'eggs'])
+
+ def test_none_available(self, unavailable_mocks):
+ resp = search_activator()
+ assert resp == []
+
+ with pytest.raises(RuntimeError):
+ select_activator()
+
+
+IF_UP_DOWN_AVAILABLE_CALLS = [
+ (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}),
+ (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}),
+ (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}),
+]
+
+NETPLAN_AVAILABLE_CALLS = [
+ (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}),
+]
+
+NETWORK_MANAGER_AVAILABLE_CALLS = [
+ (('nmcli',), {'target': None}),
+]
+
+
+@pytest.mark.parametrize('activator, available_calls', [
+ (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS),
+ (NetplanActivator, NETPLAN_AVAILABLE_CALLS),
+ (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS),
+])
+class TestActivatorsAvailable:
+ def test_available(
+ self, activator, available_calls, available_mocks
+ ):
+ activator.available()
+ assert available_mocks.m_which.call_args_list == available_calls
+
+
+IF_UP_DOWN_BRING_UP_CALL_LIST = [
+ ((['ifup', 'eth0'], ), {}),
+ ((['ifup', 'eth1'], ), {}),
+]
+
+NETWORK_MANAGER_BRING_UP_CALL_LIST = [
+ ((['nmcli', 'connection', 'up', 'ifname', 'eth0'], ), {}),
+ ((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}),
+]
+
+
+@pytest.mark.parametrize('activator, expected_call_list', [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST),
+])
+class TestActivatorsBringUp:
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_up_interface('eth0')
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_up_interfaces(['eth0', 'eth1'])
+ assert expected_call_list == m_subp.call_args_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_up_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_up_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+
+IF_UP_DOWN_BRING_DOWN_CALL_LIST = [
+ ((['ifdown', 'eth0'], ), {}),
+ ((['ifdown', 'eth1'], ), {}),
+]
+
+NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [
+ ((['nmcli', 'connection', 'down', 'eth0'], ), {}),
+ ((['nmcli', 'connection', 'down', 'eth1'], ), {}),
+]
+
+
+@pytest.mark.parametrize('activator, expected_call_list', [
+ (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST),
+ (NetplanActivator, NETPLAN_CALL_LIST),
+ (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST),
+])
+class TestActivatorsBringDown:
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_interface(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interface('eth0')
+ assert len(m_subp.call_args_list) == 1
+ assert m_subp.call_args_list[0] == expected_call_list[0]
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_interfaces(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ activator.bring_down_interfaces(['eth0', 'eth1'])
+ assert expected_call_list == m_subp.call_args_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_all_interfaces_v1(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V1_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
+
+ @patch('cloudinit.subp.subp', return_value=('', ''))
+ def test_bring_down_all_interfaces_v2(
+ self, m_subp, activator, expected_call_list, available_mocks
+ ):
+ network_state = parse_net_config_data(load(V2_CONFIG))
+ activator.bring_down_all_interfaces(network_state)
+ for call in m_subp.call_args_list:
+ assert call in expected_call_list
diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py
index 414b4830..466d472b 100644
--- a/tests/unittests/test_net_freebsd.py
+++ b/tests/unittests/test_net_freebsd.py
@@ -1,8 +1,24 @@
-from cloudinit import net
+import os
+import yaml
+
+import cloudinit.net
+import cloudinit.net.network_state
+from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict)
-from cloudinit.tests.helpers import (CiTestCase, mock, readResource)
SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output")
+V1 = """
+config:
+- id: eno1
+ mac_address: 08:94:ef:51:ae:e0
+ mtu: 1470
+ name: eno1
+ subnets:
+ - address: 172.20.80.129/25
+ type: static
+ type: physical
+version: 1
+"""
class TestInterfacesByMac(CiTestCase):
@@ -12,8 +28,49 @@ class TestInterfacesByMac(CiTestCase):
def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp):
mock_is_FreeBSD.return_value = True
mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0)
- a = net.get_interfaces_by_mac()
+ a = cloudinit.net.get_interfaces_by_mac()
assert a == {'52:54:00:50:b7:0d': 'vtnet0',
'80:00:73:63:5c:48': 're0.33',
'02:14:39:0e:25:00': 'bridge0',
'02:ff:60:8c:f3:72': 'vnet0:11'}
+
+
+class TestFreeBSDRoundTrip(CiTestCase):
+
+ def _render_and_read(self, network_config=None, state=None,
+ netplan_path=None, target=None):
+ if target is None:
+ target = self.tmp_dir()
+ os.mkdir("%s/etc" % target)
+ with open("%s/etc/rc.conf" % target, 'a') as fd:
+ fd.write("# dummy rc.conf\n")
+ with open("%s/etc/resolv.conf" % target, 'a') as fd:
+ fd.write("# dummy resolv.conf\n")
+
+ if network_config:
+ ns = cloudinit.net.network_state.parse_net_config_data(
+ network_config)
+ elif state:
+ ns = state
+ else:
+ raise ValueError("Expected data or state, got neither")
+
+ renderer = cloudinit.net.freebsd.Renderer()
+ renderer.render_network_state(ns, target=target)
+ return dir2dict(target)
+
+ @mock.patch('cloudinit.subp.subp')
+ def test_render_output_has_yaml(self, mock_subp):
+
+ entry = {
+ 'yaml': V1,
+ }
+ network_config = yaml.load(entry['yaml'])
+ ns = cloudinit.net.network_state.parse_net_config_data(network_config)
+ files = self._render_and_read(state=ns)
+ assert files == {
+ '/etc/resolv.conf': '# dummy resolv.conf\n',
+ '/etc/rc.conf': (
+ "# dummy rc.conf\n"
+ "ifconfig_eno1="
+ "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n")}
diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py
index 495e2669..275879af 100644
--- a/tests/unittests/test_render_cloudcfg.py
+++ b/tests/unittests/test_render_cloudcfg.py
@@ -10,7 +10,8 @@ from cloudinit import util
# TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES)
DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd",
- "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"]
+ "netbsd", "openbsd", "photon", "rhel", "suse", "ubuntu",
+ "unknown"]
@pytest.mark.allow_subp_for(sys.executable)
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 9f11fd5c..b78a6939 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -113,6 +113,7 @@ class TestReportingEvent(TestCase):
class TestFinishReportingEvent(TestCase):
+
def test_as_has_result(self):
result = events.status.SUCCESS
name, desc = 'test_name', 'test_desc'
@@ -121,6 +122,23 @@ class TestFinishReportingEvent(TestCase):
self.assertTrue('result' in ret)
self.assertEqual(ret['result'], result)
+ def test_has_result_with_optional_post_files(self):
+ result = events.status.SUCCESS
+ name, desc, files = 'test_name', 'test_desc', [
+ '/really/fake/path/install.log']
+ event = events.FinishReportingEvent(
+ name, desc, result, post_files=files)
+ ret = event.as_dict()
+ self.assertTrue('result' in ret)
+ self.assertTrue('files' in ret)
+ self.assertEqual(ret['result'], result)
+ posted_install_log = ret['files'][0]
+ self.assertTrue('path' in posted_install_log)
+ self.assertTrue('content' in posted_install_log)
+ self.assertTrue('encoding' in posted_install_log)
+ self.assertEqual(posted_install_log['path'], files[0])
+ self.assertEqual(posted_install_log['encoding'], 'base64')
+
class TestBaseReportingHandler(TestCase):
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index fd1d1bac..bcb8044f 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -570,20 +570,33 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase):
ssh_util.render_authorizedkeysfile_paths(
"%h/.keys", "/homedirs/bobby", "bobby"))
+ def test_all(self):
+ self.assertEqual(
+ ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys",
+ "/keys/path1", "/opt/bobby/keys"],
+ ssh_util.render_authorizedkeysfile_paths(
+ "%h/.keys .secret/keys /keys/path1 /opt/%u/keys",
+ "/homedirs/bobby", "bobby"))
+
class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
@patch("cloudinit.ssh_util.pwd.getpwnam")
def test_multiple_authorizedkeys_file_order1(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby')
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+
+ # /tmp/home2/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder)
util.write_file(authorized_keys, VALID_CONTENT['rsa'])
- user_keys = self.tmp_path('user_keys')
+ # /tmp/home2/bobby/.ssh/user_keys = dsa
+ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder)
util.write_file(user_keys, VALID_CONTENT['dsa'])
- sshd_config = self.tmp_path('sshd_config')
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
util.write_file(
sshd_config,
"AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
@@ -593,33 +606,244 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase):
fpw.pw_name, sshd_config)
content = ssh_util.update_authorized_keys(auth_key_entries, [])
- self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
+ self.assertEqual(user_keys, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
self.assertTrue(VALID_CONTENT['dsa'] in content)
@patch("cloudinit.ssh_util.pwd.getpwnam")
def test_multiple_authorizedkeys_file_order2(self, m_getpwnam):
- fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie')
+ fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie')
m_getpwnam.return_value = fpw
- authorized_keys = self.tmp_path('authorized_keys')
+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+
+ # /tmp/home/suzie/.ssh/authorized_keys = rsa
+ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder)
util.write_file(authorized_keys, VALID_CONTENT['rsa'])
- user_keys = self.tmp_path('user_keys')
+ # /tmp/home/suzie/.ssh/user_keys = dsa
+ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder)
util.write_file(user_keys, VALID_CONTENT['dsa'])
- sshd_config = self.tmp_path('sshd_config')
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
util.write_file(
sshd_config,
- "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys)
+ "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys)
)
(auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
- fpw.pw_name, sshd_config
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(authorized_keys, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
+ m_getpwnam.return_value = fpw
+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+
+ # /tmp/home2/bobby/.ssh/authorized_keys = rsa
+ authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+
+ # /tmp/home2/bobby/.ssh/user_keys = dsa
+ user_keys = self.tmp_path('user_keys', dir=user_ssh_folder)
+ util.write_file(user_keys, VALID_CONTENT['dsa'])
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys',
+ dir="/tmp")
+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s %s %s" % (authorized_keys_global,
+ user_keys, authorized_keys)
+ )
+
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(authorized_keys, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['ecdsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
+ m_getpwnam.return_value = fpw
+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+
+ # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.tmp_path('authorized_keys2',
+ dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+
+ # /tmp/home2/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder)
+ util.write_file(user_keys, VALID_CONTENT['dsa'])
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys',
+ dir="/tmp")
+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s %s %s" % (authorized_keys_global,
+ authorized_keys, user_keys)
+ )
+
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(user_keys, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['ecdsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_global(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
+ m_getpwnam.return_value = fpw
+
+ # /tmp/etc/ssh/authorized_keys = rsa
+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys',
+ dir="/tmp")
+ util.write_file(authorized_keys_global, VALID_CONTENT['rsa'])
+
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config')
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s" % (authorized_keys_global)
)
+
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
content = ssh_util.update_authorized_keys(auth_key_entries, [])
self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn)
self.assertTrue(VALID_CONTENT['rsa'] in content)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby')
+ m_getpwnam.return_value = fpw
+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+ # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.tmp_path('authorized_keys2',
+ dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ # /tmp/home2/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder)
+ util.write_file(user_keys, VALID_CONTENT['dsa'])
+
+ fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie')
+ user_ssh_folder = "%s/.ssh" % fpw2.pw_dir
+ # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com
+ authorized_keys2 = self.tmp_path('authorized_keys2',
+ dir=user_ssh_folder)
+ util.write_file(authorized_keys2,
+ VALID_CONTENT['ssh-xmss@openssh.com'])
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2',
+ dir="/tmp")
+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" %
+ (authorized_keys_global, user_keys)
+ )
+
+ # process first user
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(user_keys, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['ecdsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content)
+
+ m_getpwnam.return_value = fpw2
+ # process second user
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw2.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(authorized_keys2, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content)
+ self.assertTrue(VALID_CONTENT['ecdsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.assertFalse(VALID_CONTENT['rsa'] in content)
+
+ @patch("cloudinit.ssh_util.pwd.getpwnam")
+ def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam):
+ fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby')
+ m_getpwnam.return_value = fpw
+ user_ssh_folder = "%s/.ssh" % fpw.pw_dir
+ # /tmp/home/bobby/.ssh/authorized_keys2 = rsa
+ authorized_keys = self.tmp_path('authorized_keys2',
+ dir=user_ssh_folder)
+ util.write_file(authorized_keys, VALID_CONTENT['rsa'])
+ # /tmp/home/bobby/.ssh/user_keys3 = dsa
+ user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder)
+ util.write_file(user_keys, VALID_CONTENT['dsa'])
+
+ fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy')
+ user_ssh_folder = "%s/.ssh" % fpw2.pw_dir
+ # /tmp/home/badguy/home/bobby = ""
+ authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy")
+
+ # /tmp/etc/ssh/authorized_keys = ecdsa
+ authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2',
+ dir="/tmp")
+ util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa'])
+
+ # /tmp/sshd_config
+ sshd_config = self.tmp_path('sshd_config', dir="/tmp")
+ util.write_file(
+ sshd_config,
+ "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" %
+ (authorized_keys_global, user_keys, authorized_keys2)
+ )
+
+ # process first user
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ self.assertEqual(user_keys, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['rsa'] in content)
+ self.assertTrue(VALID_CONTENT['ecdsa'] in content)
+ self.assertTrue(VALID_CONTENT['dsa'] in content)
+
+ m_getpwnam.return_value = fpw2
+ # process second user
+ (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys(
+ fpw2.pw_name, sshd_config)
+ content = ssh_util.update_authorized_keys(auth_key_entries, [])
+
+ # badguy should not take the key from the other user!
+ self.assertEqual(authorized_keys2, auth_key_fn)
+ self.assertTrue(VALID_CONTENT['ecdsa'] in content)
self.assertTrue(VALID_CONTENT['dsa'] in content)
+ self.assertFalse(VALID_CONTENT['rsa'] in content)
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index e5292001..2290cab7 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -999,4 +999,22 @@ class TestFindDevs:
devlist = util.find_devs_with_netbsd(criteria=criteria)
assert devlist == expected_devlist
+ @pytest.mark.parametrize(
+ 'criteria,expected_devlist', (
+ (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']),
+ ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']),
+ ('TYPE=vfat', ['/dev/vbd0']),
+ ('LABEL_FATBOOT=A_LABEL', # lp: #1841466
+ ['/dev/vbd0', '/dev/cd0', '/dev/acd0']),
+ )
+ )
+ @mock.patch("cloudinit.subp.subp")
+ def test_find_devs_with_dragonflybsd(self, m_subp, criteria,
+ expected_devlist):
+ m_subp.return_value = (
+ 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', ''
+ )
+ devlist = util.find_devs_with_dragonflybsd(criteria=criteria)
+ assert devlist == expected_devlist
+
# vi: ts=4 expandtab
diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers
index 48995057..e2979ed4 100644
--- a/tools/.github-cla-signers
+++ b/tools/.github-cla-signers
@@ -19,8 +19,10 @@ dhensby
eandersson
eb3095
emmanuelthome
+esposem
giggsoff
hamalq
+irishgordo
izzyleung
johnsonshi
jordimassaguerpla
@@ -30,20 +32,24 @@ klausenbusk
landon912
lucasmoura
lungj
+mamercad
manuelisimo
marlluslustosa
matthewruffell
mitechie
+nazunalika
nicolasbock
nishigori
olivierlemasle
omBratteng
onitake
qubidt
+renanrodrigo
riedel
slyon
smoser
sshedi
+stappersg
TheRealFalcon
taoyama
timothegenzmer
diff --git a/tools/build-on-netbsd b/tools/build-on-netbsd
index d2a7067d..32837058 100755
--- a/tools/build-on-netbsd
+++ b/tools/build-on-netbsd
@@ -2,17 +2,24 @@
fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+PYTHON="${PYTHON:-python3}"
+if [ ! $(which ${PYTHON}) ]; then
+ echo "Please install python first."
+ exit 1
+fi
+py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))')
+
# Check dependencies:
depschecked=/tmp/c-i.dependencieschecked
pkgs="
bash
dmidecode
- py37-configobj
- py37-jinja2
- py37-oauthlib
- py37-requests
- py37-setuptools
- py37-yaml
+ ${py_prefix}-configobj
+ ${py_prefix}-jinja2
+ ${py_prefix}-oauthlib
+ ${py_prefix}-requests
+ ${py_prefix}-setuptools
+ ${py_prefix}-yaml
sudo
"
[ -f "$depschecked" ] || pkg_add ${pkgs} || fail "install packages"
@@ -20,8 +27,8 @@ pkgs="
touch $depschecked
# Build the code and install in /usr/pkg/:
-python3.7 setup.py build
-python3.7 setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
+${PYTHON} setup.py build
+${PYTHON} setup.py install -O1 --distro netbsd --skip-build --init-system sysvinit_netbsd
mv -v /usr/local/etc/rc.d/cloud* /etc/rc.d
# Enable cloud-init in /etc/rc.conf:
diff --git a/tools/hook-hotplug b/tools/hook-hotplug
new file mode 100755
index 00000000..34e95929
--- /dev/null
+++ b/tools/hook-hotplug
@@ -0,0 +1,21 @@
+#!/bin/bash
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# This script checks if cloud-init has hotplug hooked and if
+# cloud-init has finished; if so invoke cloud-init hotplug-hook
+
+is_finished() {
+ [ -e /run/cloud-init/result.json ]
+}
+
+if is_finished; then
+ # open cloud-init's hotplug-hook fifo rw
+ exec 3<>/run/cloud-init/hook-hotplug-cmd
+ env_params=(
+ --devpath="${DEVPATH}"
+ --subsystem="${SUBSYSTEM}"
+ --udevaction="${ACTION}"
+ )
+ # write params to cloud-init's hotplug-hook fifo
+ echo "${env_params[@]}" >&3
+fi
diff --git a/tools/read-dependencies b/tools/read-dependencies
index 6ad5f701..e52720d4 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -23,6 +23,7 @@ DEFAULT_REQUIREMENTS = 'requirements.txt'
# Map the appropriate package dir needed for each distro choice
DISTRO_PKG_TYPE_MAP = {
'centos': 'redhat',
+ 'rocky': 'redhat',
'redhat': 'redhat',
'debian': 'debian',
'ubuntu': 'debian',
@@ -39,6 +40,7 @@ MAYBE_RELIABLE_YUM_INSTALL = [
error ":: http proxy in use => forcing the use of fixed URLs in /etc/yum.repos.d/*.repo"
sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo
sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
+ sed -i 's/download\.example/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo
}
configure_repos_for_proxy_use
n=0; max=10;
@@ -64,11 +66,13 @@ ZYPPER_INSTALL = [
'--auto-agree-with-licenses']
DRY_DISTRO_INSTALL_PKG_CMD = {
+ 'rocky': ['yum', 'install', '--assumeyes'],
'centos': ['yum', 'install', '--assumeyes'],
'redhat': ['yum', 'install', '--assumeyes'],
}
DISTRO_INSTALL_PKG_CMD = {
+ 'rocky': MAYBE_RELIABLE_YUM_INSTALL,
'centos': MAYBE_RELIABLE_YUM_INSTALL,
'redhat': MAYBE_RELIABLE_YUM_INSTALL,
'debian': ['apt', 'install', '-y'],
@@ -273,10 +277,10 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro]
install_cmd.extend(cmd)
- if distro in ['centos', 'redhat']:
+ if distro in ['centos', 'redhat', 'rocky']:
# CentOS and Redhat need epel-release to access oauthlib and jsonschema
subprocess.check_call(install_cmd + ['epel-release'])
- if distro in ['suse', 'opensuse', 'redhat', 'centos']:
+ if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos']:
pkg_list.append('rpm-build')
subprocess.check_call(install_cmd + pkg_list)
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index f5990748..7e667de4 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -5,8 +5,8 @@ import os
import sys
VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian",
- "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu",
- "unknown"]
+ "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel",
+ "suse","rocky", "ubuntu", "unknown"]
if "avoid-pep8-E402-import-not-top-of-file":
diff --git a/tools/run-container b/tools/run-container
index 15948e77..e049dfdc 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -191,7 +191,7 @@ os_info() {
get_os_info() {
# run inside container, set OS_NAME, OS_VERSION
- # example OS_NAME are centos, debian, opensuse
+ # example OS_NAME are centos, debian, opensuse, rockylinux
[ -n "${OS_NAME:-}" -a -n "${OS_VERSION:-}" ] && return 0
if [ -f /etc/os-release ]; then
OS_NAME=$(sh -c '. /etc/os-release; echo $ID')
@@ -247,7 +247,7 @@ apt_install() {
install_packages() {
get_os_info || return
case "$OS_NAME" in
- centos) yum_install "$@";;
+ centos|rocky*) yum_install "$@";;
opensuse) zypper_install "$@";;
debian|ubuntu) apt_install "$@";;
*) error "Do not know how to install packages on ${OS_NAME}";
@@ -353,6 +353,7 @@ wait_for_boot() {
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
inside "$name" sh -c "sed -i --regexp-extended '/^#baseurl=/s/#// ; /^(mirrorlist|metalink)=/s/^/#/' /etc/yum.repos.d/*.repo"
inside "$name" sh -c "sed -i 's/download\.fedoraproject\.org/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
+ inside "$name" sh -c "sed -i 's/download\.example/dl.fedoraproject.org/g' /etc/yum.repos.d/*.repo"
else
debug 1 "do not know how to configure proxy on $OS_NAME"
fi
@@ -485,7 +486,7 @@ main() {
local build_pkg="" build_srcpkg="" pkg_ext="" distflag=""
case "$OS_NAME" in
- centos) distflag="--distro=redhat";;
+ centos|rocky) distflag="--distro=redhat";;
opensuse) distflag="--distro=suse";;
esac
@@ -494,7 +495,7 @@ main() {
build_pkg="./packages/bddeb -d"
build_srcpkg="./packages/bddeb -S -d"
pkg_ext=".deb";;
- centos|opensuse)
+ centos|opensuse|rocky)
build_pkg="./packages/brpm $distflag"
build_srcpkg="./packages/brpm $distflag --srpm"
pkg_ext=".rpm";;
diff --git a/tox.ini b/tox.ini
index bf8cb78b..f21e1186 100644
--- a/tox.ini
+++ b/tox.ini
@@ -157,7 +157,15 @@ passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_* TRAVIS
deps =
-r{toxinidir}/integration-requirements.txt
setenv =
- PYTEST_ADDOPTS="-m ci"
+ PYTEST_ADDOPTS="-m ci and not adhoc"
+
+[testenv:integration-tests-jenkins]
+commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK OS_*
+deps =
+ -r{toxinidir}/integration-requirements.txt
+setenv =
+ PYTEST_ADDOPTS="-m not adhoc"
[pytest]
# TODO: s/--strict/--strict-markers/ once xenial support is dropped
@@ -174,9 +182,10 @@ markers =
gce: test will only run on GCE platform
azure: test will only run on Azure platform
oci: test will only run on OCI platform
- openstack: test will only run on openstack
+ openstack: test will only run on openstack platform
lxd_config_dict: set the config_dict passed on LXD instance creation
lxd_container: test will only run in LXD container
+ lxd_setup: specify callable to be called between init and start
lxd_use_exec: `execute` will use `lxc exec` instead of SSH
lxd_vm: test will only run in LXD VM
not_xenial: test cannot run on the xenial release
@@ -189,3 +198,4 @@ markers =
sru_next: test is part of the next SRU verification
ubuntu: this test should run on Ubuntu
unstable: skip this test because it is flakey
+ adhoc: only run on adhoc basis, not in any CI environment (travis or jenkins)
diff --git a/udev/10-cloud-init-hook-hotplug.rules b/udev/10-cloud-init-hook-hotplug.rules
new file mode 100644
index 00000000..2e382679
--- /dev/null
+++ b/udev/10-cloud-init-hook-hotplug.rules
@@ -0,0 +1,6 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+# Handle device adds only
+ACTION!="add|remove", GOTO="cloudinit_end"
+LABEL="cloudinit_hook"
+SUBSYSTEM=="net|block", RUN+="/usr/lib/cloud-init/hook-hotplug"
+LABEL="cloudinit_end"