summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-02-23 10:06:10 -0500
committergit-ubuntu importer <ubuntu-devel-discuss@lists.ubuntu.com>2021-02-23 16:01:09 +0000
commit6834043e1074fd6198d1993a735880f245e95274 (patch)
tree482e842c1fcc5f3120e278dc091c74dccef41857
parent8497bb0be24312d4edea3b1307c90407fe70356d (diff)
downloadcloud-init-git-6834043e1074fd6198d1993a735880f245e95274.tar.gz
21.1-0ubuntu1 (patches unapplied)
Imported using git-ubuntu import.
-rw-r--r--.github/workflows/stale.yml2
-rw-r--r--.gitignore3
-rw-r--r--ChangeLog107
-rw-r--r--cloudinit/apport.py1
-rw-r--r--cloudinit/cmd/tests/test_main.py3
-rw-r--r--cloudinit/config/cc_keys_to_console.py5
-rw-r--r--cloudinit/config/tests/test_keys_to_console.py34
-rwxr-xr-xcloudinit/distros/__init__.py2
-rw-r--r--cloudinit/helpers.py7
-rw-r--r--cloudinit/settings.py2
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py65
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py8
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py165
-rw-r--r--cloudinit/sources/__init__.py13
-rw-r--r--cloudinit/sources/helpers/openstack.py5
-rw-r--r--cloudinit/sources/helpers/upcloud.py231
-rw-r--r--cloudinit/stages.py106
-rw-r--r--cloudinit/version.py2
-rw-r--r--debian/changelog29
-rw-r--r--doc/examples/cloud-config-ssh-keys.txt10
-rw-r--r--doc/rtd/topics/availability.rst1
-rw-r--r--doc/rtd/topics/datasources.rst1
-rw-r--r--doc/rtd/topics/datasources/openstack.rst8
-rw-r--r--doc/rtd/topics/datasources/upcloud.rst24
-rw-r--r--doc/rtd/topics/network-config.rst5
-rw-r--r--integration-requirements.txt2
-rw-r--r--tests/integration_tests/__init__.py12
-rw-r--r--tests/integration_tests/bugs/test_gh626.py9
-rw-r--r--tests/integration_tests/bugs/test_gh668.py8
-rw-r--r--tests/integration_tests/bugs/test_lp1835584.py104
-rw-r--r--tests/integration_tests/bugs/test_lp1898997.py4
-rw-r--r--tests/integration_tests/bugs/test_lp1901011.py58
-rw-r--r--tests/integration_tests/clouds.py12
-rw-r--r--tests/integration_tests/conftest.py13
-rw-r--r--tests/integration_tests/instances.py14
-rw-r--r--tests/integration_tests/integration_settings.py3
-rw-r--r--tests/integration_tests/modules/test_apt.py291
-rw-r--r--tests/integration_tests/modules/test_apt_configure_sources_list.py52
-rw-r--r--tests/integration_tests/modules/test_keys_to_console.py48
-rw-r--r--tests/integration_tests/modules/test_power_state_change.py2
-rw-r--r--tests/integration_tests/modules/test_seed_random_data.py6
-rw-r--r--tests/integration_tests/test_upgrade.py2
-rw-r--r--tests/unittests/test_data.py37
-rw-r--r--tests/unittests/test_datasource/test_azure.py91
-rw-r--r--tests/unittests/test_datasource/test_common.py3
-rw-r--r--tests/unittests/test_datasource/test_openstack.py32
-rw-r--r--tests/unittests/test_datasource/test_upcloud.py314
-rw-r--r--tests/unittests/test_distros/test_generic.py13
-rwxr-xr-xtools/ds-identify7
-rw-r--r--tox.ini6
50 files changed, 1818 insertions, 164 deletions
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 20c5735d..3b71ba28 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -20,5 +20,5 @@ jobs:
If you are waiting for code review and you are seeing this message, apologies! Please reply, tagging mitechie, and he will ensure that someone takes a look soon.
- (If the pull request is closed, please do feel free to reopen it if you wish to continue working on it.)
+ (If the pull request is closed and you would like to continue working on it, please do tag mitechie to reopen it.)
stale-pr-label: 'stale-pr'
diff --git a/.gitignore b/.gitignore
index 5a68bff9..eb26e0da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,6 +16,9 @@ stage
.pc/
.cache/
.mypy_cache/
+.pytest_cache/
+.vscode/
+htmlcov/
# Ignore packaging artifacts
cloud-init.dsc
diff --git a/ChangeLog b/ChangeLog
index d0781ded..44b50410 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,110 @@
+21.1
+ - Azure: Support for VMs without ephemeral resource disks. (#800)
+ [Johnson Shi] (LP: #1901011)
+ - cc_keys_to_console: add option to disable key emission (#811)
+ [Michael Hudson-Doyle] (LP: #1915460)
+ - integration_tests: introduce lxd_use_exec mark (#802)
+ - azure: case-insensitive UUID to avoid new IID during kernel upgrade
+ (#798) (LP: #1835584)
+ - stale.yml: don't ask submitters to reopen PRs (#816)
+ - integration_tests: fix use of SSH agent within tox (#815)
+ - integration_tests: add UPGRADE CloudInitSource (#812)
+ - integration_tests: use unique MAC addresses for tests (#813)
+ - Update .gitignore (#814)
+ - Port apt cloud_tests to integration tests (#808)
+ - integration_tests: fix test_gh626 on LXD VMs (#809)
+ - Fix attempting to decode binary data in test_seed_random_data test (#806)
+ - Remove wait argument from tests with session_cloud calls (#805)
+ - Datasource for UpCloud (#743) [Antti Myyrä]
+ - test_gh668: fix failure on LXD VMs (#801)
+ - openstack: read the dynamic metadata group vendor_data2.json (#777)
+ [Andrew Bogott] (LP: #1841104)
+ - includedir in suoders can be prefixed by "arroba" (#783)
+ [Jordi Massaguer Pla]
+ - [VMware] change default max wait time to 15s (#774) [xiaofengw-vmware]
+ - Revert integration test associated with reverted #586 (#784)
+ - Add jordimassaguerpla as contributor (#787) [Jordi Massaguer Pla]
+ - Add Rick Harding to CLA signers (#792) [Rick Harding]
+ - HACKING.rst: add clarifying note to LP CLA process section (#789)
+ - Stop linting cloud_tests (#791)
+ - cloud-tests: update cryptography requirement (#790) [Joshua Powers]
+ - Remove 'remove-raise-on-failure' calls from integration_tests (#788)
+ - Use more cloud defaults in integration tests (#757)
+ - Adding self to cla signers (#776) [Andrew Bogott]
+ - doc: avoid two warnings (#781) [Dan Kenigsberg]
+ - Use proper spelling for Red Hat (#778) [Dan Kenigsberg]
+ - Add antonyc to .github-cla-signers (#747) [Anton Chaporgin]
+ - integration_tests: log image serial if available (#772)
+ - [VMware] Support cloudinit raw data feature (#691) [xiaofengw-vmware]
+ - net: Fix static routes to host in eni renderer (#668) [Pavel Abalikhin]
+ - .travis.yml: don't run cloud_tests in CI (#756)
+ - test_upgrade: add some missing commas (#769)
+ - cc_seed_random: update documentation and fix integration test (#771)
+ (LP: #1911227)
+ - Fix test gh-632 test to only run on NoCloud (#770) (LP: #1911230)
+ - archlinux: fix package upgrade command handling (#768) [Bao Trinh]
+ - integration_tests: add integration test for LP: #1910835 (#761)
+ - Fix regression with handling of IMDS ssh keys (#760) [Thomas Stringer]
+ - integration_tests: log cloud-init version in SUT (#758)
+ - Add ajmyyra as contributor (#742) [Antti Myyrä]
+ - net_convert: add some missing help text (#755)
+ - Missing IPV6_AUTOCONF=no to render sysconfig dhcp6 stateful on RHEL
+ (#753) [Eduardo Otubo]
+ - doc: document missing IPv6 subnet types (#744) [Antti Myyrä]
+ - Add example configuration for datasource `AliYun` (#751) [Xiaoyu Zhong]
+ - integration_tests: add SSH key selection settings (#754)
+ - fix a typo in man page cloud-init.1 (#752) [Amy Chen]
+ - network-config-format-v2.rst: add Netplan Passthrough section (#750)
+ - stale: re-enable post holidays (#749)
+ - integration_tests: port ca_certs tests from cloud_tests (#732)
+ - Azure: Add telemetry for poll IMDS (#741) [Johnson Shi]
+ - doc: move testing section from HACKING to its own doc (#739)
+ - No longer allow integration test failures on travis (#738)
+ - stale: fix error in definition (#740)
+ - integration_tests: set log-cli-level to INFO by default (#737)
+ - PULL_REQUEST_TEMPLATE.md: use backticks around commit message (#736)
+ - stale: disable check for holiday break (#735)
+ - integration_tests: log the path we collect logs into (#733)
+ - .travis.yml: add (most) supported Python versions to CI (#734)
+ - integration_tests: fix IN_PLACE CLOUD_INIT_SOURCE (#731)
+ - cc_ca_certs: add RHEL support (#633) [cawamata]
+ - Azure: only generate config for NICs with addresses (#709)
+ [Thomas Stringer]
+ - doc: fix CloudStack configuration example (#707) [Olivier Lemasle]
+ - integration_tests: restrict test_lxd_bridge appropriately (#730)
+ - Add integration tests for CLI functionality (#729)
+ - Integration test for gh-626 (#728)
+ - Some test_upgrade fixes (#726)
+ - Ensure overriding test vars with env vars works for booleans (#727)
+ - integration_tests: port lxd_bridge test from cloud_tests (#718)
+ - Integration test for gh-632. (#725)
+ - Integration test for gh-671 (#724)
+ - integration-requirements.txt: bump pycloudlib commit (#723)
+ - Drop unnecessary shebang from cmd/main.py (#722) [Eduardo Otubo]
+ - Integration test for LP: #1813396 and #669 (#719)
+ - integration_tests: include timestamp in log output (#720)
+ - integration_tests: add test for LP: #1898997 (#713)
+ - Add integration test for power_state_change module (#717)
+ - Update documentation for network-config-format-v2 (#701) [ggiesen]
+ - sandbox CA Cert tests to not require ca-certificates (#715)
+ [Eduardo Otubo]
+ - Add upgrade integration test (#693)
+ - Integration test for 570 (#712)
+ - Add ability to keep snapshotted images in integration tests (#711)
+ - Integration test for pull #586 (#706)
+ - integration_tests: introduce skipping of tests by OS (#702)
+ - integration_tests: introduce IntegrationInstance.restart (#708)
+ - Add lxd-vm to list of valid integration test platforms (#705)
+ - Adding BOOTPROTO = dhcp to render sysconfig dhcp6 stateful on RHEL
+ (#685) [Eduardo Otubo]
+ - Delete image snapshots created for integration tests (#682)
+ - Parametrize ssh_keys_provided integration test (#700) [lucasmoura]
+ - Drop use_sudo attribute on IntegrationInstance (#694) [lucasmoura]
+ - cc_apt_configure: add riscv64 as a ports arch (#687)
+ [Dimitri John Ledkov]
+ - cla: add xnox (#692) [Dimitri John Ledkov]
+ - Collect logs from integration test runs (#675)
+
20.4.1
- Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)"
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 9bded16c..25f254e3 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -39,6 +39,7 @@ KNOWN_CLOUD_NAMES = [
'SAP Converged Cloud',
'Scaleway',
'SmartOS',
+ 'UpCloud',
'VMware',
'ZStack',
'Other'
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index 585b3b0e..78b27441 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -127,7 +127,8 @@ class TestMain(FilesystemMockingTestCase):
'syslog_fix_perms': [
'syslog:adm', 'root:adm', 'root:wheel', 'root:root'
],
- 'vendor_data': {'enabled': True, 'prefix': []}})
+ 'vendor_data': {'enabled': True, 'prefix': []},
+ 'vendor_data2': {'enabled': True, 'prefix': []}})
updated_cfg.pop('system_info')
self.assertEqual(updated_cfg, cfg)
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 0f2be52b..646d1f67 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -51,6 +51,11 @@ def _get_helper_tool_path(distro):
def handle(name, cfg, cloud, log, _args):
+ if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)):
+ log.debug(("Skipping module named %s, "
+ "logging of SSH host keys disabled"), name)
+ return
+
helper_path = _get_helper_tool_path(cloud.distro)
if not os.path.exists(helper_path):
log.warning(("Unable to activate module %s,"
diff --git a/cloudinit/config/tests/test_keys_to_console.py b/cloudinit/config/tests/test_keys_to_console.py
new file mode 100644
index 00000000..4083fc54
--- /dev/null
+++ b/cloudinit/config/tests/test_keys_to_console.py
@@ -0,0 +1,34 @@
+"""Tests for cc_keys_to_console."""
+from unittest import mock
+
+import pytest
+
+from cloudinit.config import cc_keys_to_console
+
+
+class TestHandle:
+ """Tests for cloudinit.config.cc_keys_to_console.handle.
+
+ TODO: These tests only cover the emit_keys_to_console config option, they
+ should be expanded to cover the full functionality.
+ """
+
+ @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log")
+ @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists")
+ @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp")
+ @pytest.mark.parametrize("cfg,subp_called", [
+ ({}, True), # Default to emitting keys
+ ({"ssh": {}}, True), # Default even if we have the parent key
+ ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled
+ ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled
+ ])
+ def test_emit_keys_to_console_config(
+ self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called
+ ):
+ # Ensure we always find the helper
+ m_path_exists.return_value = True
+ m_subp.return_value = ("", "")
+
+ cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ())
+
+ assert subp_called == (m_subp.call_count == 1)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 1e118472..220bd11f 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -673,7 +673,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta):
found_include = False
for line in sudoers_contents.splitlines():
line = line.strip()
- include_match = re.search(r"^#includedir\s+(.*)$", line)
+ include_match = re.search(r"^[#|@]includedir\s+(.*)$", line)
if not include_match:
continue
included_dir = include_match.group(1).strip()
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 9752ad28..fc5011ec 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -230,6 +230,10 @@ class ConfigMerger(object):
cc_paths = ['cloud_config']
if self._include_vendor:
+ # the order is important here: we want vendor2
+ # (dynamic vendor data from OpenStack)
+ # to override vendor (static data from OpenStack)
+ cc_paths.append('vendor2_cloud_config')
cc_paths.append('vendor_cloud_config')
for cc_p in cc_paths:
@@ -337,9 +341,12 @@ class Paths(object):
"obj_pkl": "obj.pkl",
"cloud_config": "cloud-config.txt",
"vendor_cloud_config": "vendor-cloud-config.txt",
+ "vendor2_cloud_config": "vendor2-cloud-config.txt",
"data": "data",
"vendordata_raw": "vendor-data.txt",
+ "vendordata2_raw": "vendor-data2.txt",
"vendordata": "vendor-data.txt.i",
+ "vendordata2": "vendor-data2.txt.i",
"instance_id": ".instance-id",
"manual_clean_marker": "manual-clean",
"warnings": "warnings",
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index ca4ffa8e..91e1bfe7 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -41,6 +41,7 @@ CFG_BUILTIN = {
'Oracle',
'Exoscale',
'RbxCloud',
+ 'UpCloud',
# At the end to act as a 'catch' when none of the above work...
'None',
],
@@ -56,6 +57,7 @@ CFG_BUILTIN = {
'network': {'renderers': None},
},
'vendor_data': {'enabled': True, 'prefix': []},
+ 'vendor_data2': {'enabled': True, 'prefix': []},
}
# Valid frequencies of handlers/modules
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 090dd66b..cee630f7 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -270,7 +270,7 @@ BUILTIN_DS_CONFIG = {
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
-BUILTIN_CLOUD_CONFIG = {
+BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
'disk_setup': {
'ephemeral0': {'table_type': 'gpt',
'layout': [100],
@@ -618,8 +618,26 @@ class DataSourceAzure(sources.DataSource):
maybe_remove_ubuntu_network_config_scripts()
# Process crawled data and augment with various config defaults
- self.cfg = util.mergemanydict(
- [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
+
+ # Only merge in default cloud config related to the ephemeral disk
+ # if the ephemeral disk exists
+ devpath = RESOURCE_DISK_PATH
+ if os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug)
+ self.cfg = util.mergemanydict(
+ [crawled_data['cfg'], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG])
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist. "
+ "Not merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug)
+ self.cfg = crawled_data['cfg']
+
self._metadata_imds = crawled_data['metadata']['imds']
self.metadata = util.mergemanydict(
[crawled_data['metadata'], DEFAULT_METADATA])
@@ -683,10 +701,18 @@ class DataSourceAzure(sources.DataSource):
def _iid(self, previous=None):
prev_iid_path = os.path.join(
self.paths.get_cpath('data'), 'instance-id')
- iid = dmi.read_dmi_data('system-uuid')
+ # Older kernels than 4.15 will have UPPERCASE product_uuid.
+ # We don't want Azure to react to an UPPER/lower difference as a new
+ # instance id as it rewrites SSH host keys.
+ # LP: #1835584
+ iid = dmi.read_dmi_data('system-uuid').lower()
if os.path.exists(prev_iid_path):
previous = util.load_file(prev_iid_path).strip()
- if is_byte_swapped(previous, iid):
+ if previous.lower() == iid:
+ # If uppercase/lowercase equivalent, return the previous value
+ # to avoid new instance id.
+ return previous
+ if is_byte_swapped(previous.lower(), iid):
return previous
return iid
@@ -1460,26 +1486,17 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
@azure_ds_telemetry_reporter
-def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
+def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH,
is_new_instance=False, preserve_ntfs=False):
- # wait for ephemeral disk to come up
- naplen = .2
- with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter
- ):
- missing = util.wait_for_files([devpath],
- maxwait=maxwait,
- naplen=naplen,
- log_pre="Azure ephemeral disk: ")
-
- if missing:
- report_diagnostic_event(
- "ephemeral device '%s' did not appear after %d seconds." %
- (devpath, maxwait),
- logger_func=LOG.warning)
- return
+ if not os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist." % devpath,
+ logger_func=LOG.debug)
+ return
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists." % devpath,
+ logger_func=LOG.debug)
result = False
msg = None
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index b3406c67..619a171e 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -167,6 +167,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ vd2 = results.get('vendordata2')
+ self.vendordata2_pure = vd2
+ try:
+ self.vendordata2_raw = sources.convert_vendordata(vd2)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data2: %s", e)
+ self.vendordata2_raw = None
+
return True
def _crawl_metadata(self):
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
new file mode 100644
index 00000000..209b9672
--- /dev/null
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -0,0 +1,165 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# UpCloud server metadata API:
+# https://developers.upcloud.com/1.3/8-servers/#metadata-service
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit import net as cloudnet
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+
+
+from cloudinit.sources.helpers import upcloud as uc_helper
+
+LOG = logging.getLogger(__name__)
+
+BUILTIN_DS_CONFIG = {"metadata_url": "http://169.254.169.254/metadata/v1.json"}
+
+# Wait for a up to a minute, retrying the meta-data server
+# every 2 seconds.
+MD_RETRIES = 30
+MD_TIMEOUT = 2
+MD_WAIT_RETRY = 2
+
+
+class DataSourceUpCloud(sources.DataSource):
+
+ dsname = "UpCloud"
+
+ # We'll perform DHCP setup only in init-local, see DataSourceUpCloudLocal
+ perform_dhcp_setup = False
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
+ self.metadata = dict()
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = None
+
+ def _get_sysinfo(self):
+ return uc_helper.read_sysinfo()
+
+ def _read_metadata(self):
+ return uc_helper.read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+
+ def _get_data(self):
+ (is_upcloud, server_uuid) = self._get_sysinfo()
+
+ # only proceed if we know we are on UpCloud
+ if not is_upcloud:
+ return False
+
+ LOG.info("Running on UpCloud. server_uuid=%s", server_uuid)
+
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
+ try:
+ LOG.debug("Finding a fallback NIC")
+ nic = cloudnet.find_fallback_nic()
+ LOG.debug("Discovering metadata via DHCP interface %s", nic)
+ with EphemeralDHCPv4(nic):
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
+ util.logexc(LOG, str(e))
+ return False
+ else:
+ try:
+ LOG.debug(
+ "Discovering metadata without DHCP-configured networking"
+ )
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except sources.InvalidMetaDataException as e:
+ util.logexc(LOG, str(e))
+ LOG.info(
+ "No DHCP-enabled interfaces available, "
+ "unable to fetch metadata for %s",
+ server_uuid,
+ )
+ return False
+
+ self.metadata_full = md
+ self.metadata["instance-id"] = md.get("instance_id", server_uuid)
+ self.metadata["local-hostname"] = md.get("hostname")
+ self.metadata["network"] = md.get("network")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
+ self.vendordata_raw = md.get("vendor_data", None)
+ self.userdata_raw = md.get("user_data", None)
+
+ return True
+
+ def check_instance_id(self, sys_cfg):
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ @property
+ def network_config(self):
+ """
+ Configure the networking. This needs to be done each boot,
+ since the IP and interface information might have changed
+ due to reconfiguration.
+ """
+
+ if self._network_config:
+ return self._network_config
+
+ raw_network_config = self.metadata.get("network")
+ if not raw_network_config:
+ raise Exception("Unable to get network meta-data from server....")
+
+ self._network_config = uc_helper.convert_network_config(
+ raw_network_config,
+ )
+
+ return self._network_config
+
+
+class DataSourceUpCloudLocal(DataSourceUpCloud):
+ """
+ Run in init-local using a DHCP discovery prior to metadata crawl.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+
+ perform_dhcp_setup = True # Get metadata network config if present
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM, )),
+ (DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 9dccc687..1ad1880d 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -187,7 +187,8 @@ class DataSource(metaclass=abc.ABCMeta):
cached_attr_defaults = (
('ec2_metadata', UNSET), ('network_json', UNSET),
('metadata', {}), ('userdata', None), ('userdata_raw', None),
- ('vendordata', None), ('vendordata_raw', None))
+ ('vendordata', None), ('vendordata_raw', None),
+ ('vendordata2', None), ('vendordata2_raw', None))
_dirty_cache = False
@@ -203,7 +204,9 @@ class DataSource(metaclass=abc.ABCMeta):
self.metadata = {}
self.userdata_raw = None
self.vendordata = None
+ self.vendordata2 = None
self.vendordata_raw = None
+ self.vendordata2_raw = None
self.ds_cfg = util.get_cfg_by_path(
self.sys_cfg, ("datasource", self.dsname), {})
@@ -392,6 +395,11 @@ class DataSource(metaclass=abc.ABCMeta):
self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
return self.vendordata
+ def get_vendordata2(self):
+ if self.vendordata2 is None:
+ self.vendordata2 = self.ud_proc.process(self.get_vendordata2_raw())
+ return self.vendordata2
+
@property
def fallback_interface(self):
"""Determine the network interface used during local network config."""
@@ -494,6 +502,9 @@ class DataSource(metaclass=abc.ABCMeta):
def get_vendordata_raw(self):
return self.vendordata_raw
+ def get_vendordata2_raw(self):
+ return self.vendordata2_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 3e6365f1..4f566e64 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -247,6 +247,11 @@ class BaseReader(metaclass=abc.ABCMeta):
False,
load_json_anytype,
)
+ files['vendordata2'] = (
+ self._path_join("openstack", version, 'vendor_data2.json'),
+ False,
+ load_json_anytype,
+ )
files['networkdata'] = (
self._path_join("openstack", version, 'network_data.json'),
False,
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
new file mode 100644
index 00000000..199baa58
--- /dev/null
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -0,0 +1,231 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+def convert_to_network_config_v1(config):
+ """
+ Convert the UpCloud network metadata description into
+ Cloud-init's version 1 netconfig format.
+
+ Example JSON:
+ {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": [],
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "32:d5:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "32:d5:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "32:d5:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "32:d5:ba:4a:8a:e1",
+ "network_id": "035a0a4a-77b4-4de5-820d-189fc8135714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ }
+ """
+
+ def _get_subnet_config(ip_addr, dns):
+ if ip_addr.get("dhcp"):
+ dhcp_type = "dhcp"
+ if ip_addr.get("family") == "IPv6":
+ # UpCloud currently passes IPv6 addresses via
+ # StateLess Address Auto Configuration (SLAAC)
+ dhcp_type = "ipv6_dhcpv6-stateless"
+ return {"type": dhcp_type}
+
+ static_type = "static"
+ if ip_addr.get("family") == "IPv6":
+ static_type = "static6"
+ subpart = {
+ "type": static_type,
+ "control": "auto",
+ "address": ip_addr.get("address"),
+ }
+
+ if ip_addr.get("gateway"):
+ subpart["gateway"] = ip_addr.get("gateway")
+
+ if "/" in ip_addr.get("network"):
+ subpart["netmask"] = ip_addr.get("network").split("/")[1]
+
+ if dns != ip_addr.get("dns") and ip_addr.get("dns"):
+ subpart["dns_nameservers"] = ip_addr.get("dns")
+
+ return subpart
+
+ nic_configs = []
+ macs_to_interfaces = cloudnet.get_interfaces_by_mac()
+ LOG.debug("NIC mapping: %s", macs_to_interfaces)
+
+ for raw_iface in config.get("interfaces"):
+ LOG.debug("Considering %s", raw_iface)
+
+ mac_address = raw_iface.get("mac")
+ if mac_address not in macs_to_interfaces:
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, raw_iface)
+ )
+
+ iface_type = raw_iface.get("type")
+ sysfs_name = macs_to_interfaces.get(mac_address)
+
+ LOG.debug(
+ "Found %s interface '%s' with address '%s' (index %d)",
+ iface_type,
+ sysfs_name,
+ mac_address,
+ raw_iface.get("index"),
+ )
+
+ interface = {
+ "type": "physical",
+ "name": sysfs_name,
+ "mac_address": mac_address
+ }
+
+ subnets = []
+ for ip_address in raw_iface.get("ip_addresses"):
+ sub_part = _get_subnet_config(ip_address, config.get("dns"))
+ subnets.append(sub_part)
+
+ interface["subnets"] = subnets
+ nic_configs.append(interface)
+
+ if config.get("dns"):
+ LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
+ nic_configs.append({
+ "type": "nameserver",
+ "address": config.get("dns")
+ })
+
+ return {"version": 1, "config": nic_configs}
+
+
+def convert_network_config(config):
+ return convert_to_network_config_v1(config)
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+ return json.loads(response.contents.decode())
+
+
+def read_sysinfo():
+ # UpCloud embeds vendor ID and server UUID in the
+ # SMBIOS information
+
+ # Detect if we are on UpCloud and return the UUID
+
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name != "UpCloud":
+ return False, None
+
+ server_uuid = dmi.read_dmi_data("system-uuid")
+ if server_uuid:
+ LOG.debug(
+ "system identified via SMBIOS as UpCloud server: %s",
+ server_uuid
+ )
+ else:
+ msg = (
+ "system identified via SMBIOS as a UpCloud server, but "
+ "did not provide an ID. Please contact support via"
+ "https://hub.upcloud.com or via email with support@upcloud.com"
+ )
+ LOG.critical(msg)
+ raise RuntimeError(msg)
+
+ return True, server_uuid
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 0cce6e80..3ef4491c 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -360,8 +360,18 @@ class Init(object):
reporter=self.reporter)
def update(self):
- self._store_userdata()
- self._store_vendordata()
+ self._store_rawdata(self.datasource.get_userdata_raw(),
+ 'userdata')
+ self._store_processeddata(self.datasource.get_userdata(),
+ 'userdata')
+ self._store_rawdata(self.datasource.get_vendordata_raw(),
+ 'vendordata')
+ self._store_processeddata(self.datasource.get_vendordata(),
+ 'vendordata')
+ self._store_rawdata(self.datasource.get_vendordata2_raw(),
+ 'vendordata2')
+ self._store_processeddata(self.datasource.get_vendordata2(),
+ 'vendordata2')
def setup_datasource(self):
with events.ReportEventStack("setup-datasource",
@@ -381,28 +391,18 @@ class Init(object):
is_new_instance=self.is_new_instance())
self._write_to_cache()
- def _store_userdata(self):
- raw_ud = self.datasource.get_userdata_raw()
- if raw_ud is None:
- raw_ud = b''
- util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
- # processed userdata is a Mime message, so write it as string.
- processed_ud = self.datasource.get_userdata()
- if processed_ud is None:
- raw_ud = ''
- util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
-
- def _store_vendordata(self):
- raw_vd = self.datasource.get_vendordata_raw()
- if raw_vd is None:
- raw_vd = b''
- util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
- # processed vendor data is a Mime message, so write it as string.
- processed_vd = str(self.datasource.get_vendordata())
- if processed_vd is None:
- processed_vd = ''
- util.write_file(self._get_ipath('vendordata'), str(processed_vd),
- 0o600)
+ def _store_rawdata(self, data, datasource):
+ # Raw data is bytes, not a string
+ if data is None:
+ data = b''
+ util.write_file(self._get_ipath('%s_raw' % datasource), data, 0o600)
+
+ def _store_processeddata(self, processed_data, datasource):
+ # processed is a Mime message, so write as string.
+ if processed_data is None:
+ processed_data = ''
+ util.write_file(self._get_ipath(datasource),
+ str(processed_data), 0o600)
def _default_handlers(self, opts=None):
if opts is None:
@@ -434,6 +434,11 @@ class Init(object):
opts={'script_path': 'vendor_scripts',
'cloud_config_path': 'vendor_cloud_config'})
+ def _default_vendordata2_handlers(self):
+ return self._default_handlers(
+ opts={'script_path': 'vendor_scripts',
+ 'cloud_config_path': 'vendor2_cloud_config'})
+
def _do_handlers(self, data_msg, c_handlers_list, frequency,
excluded=None):
"""
@@ -555,7 +560,12 @@ class Init(object):
with events.ReportEventStack("consume-vendor-data",
"reading and applying vendor-data",
parent=self.reporter):
- self._consume_vendordata(frequency)
+ self._consume_vendordata("vendordata", frequency)
+
+ with events.ReportEventStack("consume-vendor-data2",
+ "reading and applying vendor-data2",
+ parent=self.reporter):
+ self._consume_vendordata("vendordata2", frequency)
# Perform post-consumption adjustments so that
# modules that run during the init stage reflect
@@ -568,46 +578,62 @@ class Init(object):
# objects before the load of the userdata happened,
# this is expected.
- def _consume_vendordata(self, frequency=PER_INSTANCE):
+ def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE):
"""
Consume the vendordata and run the part handlers on it
"""
+
# User-data should have been consumed first.
# So we merge the other available cloud-configs (everything except
# vendor provided), and check whether or not we should consume
# vendor data at all. That gives user or system a chance to override.
- if not self.datasource.get_vendordata_raw():
- LOG.debug("no vendordata from datasource")
- return
+ if vendor_source == 'vendordata':
+ if not self.datasource.get_vendordata_raw():
+ LOG.debug("no vendordata from datasource")
+ return
+ cfg_name = 'vendor_data'
+ elif vendor_source == 'vendordata2':
+ if not self.datasource.get_vendordata2_raw():
+ LOG.debug("no vendordata2 from datasource")
+ return
+ cfg_name = 'vendor_data2'
+ else:
+ raise RuntimeError("vendor_source arg must be either 'vendordata'"
+ " or 'vendordata2'")
_cc_merger = helpers.ConfigMerger(paths=self._paths,
datasource=self.datasource,
additional_fns=[],
base_cfg=self.cfg,
include_vendor=False)
- vdcfg = _cc_merger.cfg.get('vendor_data', {})
+ vdcfg = _cc_merger.cfg.get(cfg_name, {})
if not isinstance(vdcfg, dict):
vdcfg = {'enabled': False}
- LOG.warning("invalid 'vendor_data' setting. resetting to: %s",
- vdcfg)
+ LOG.warning("invalid %s setting. resetting to: %s",
+ cfg_name, vdcfg)
enabled = vdcfg.get('enabled')
no_handlers = vdcfg.get('disabled_handlers', None)
if not util.is_true(enabled):
- LOG.debug("vendordata consumption is disabled.")
+ LOG.debug("%s consumption is disabled.", vendor_source)
return
- LOG.debug("vendor data will be consumed. disabled_handlers=%s",
- no_handlers)
+ LOG.debug("%s will be consumed. disabled_handlers=%s",
+ vendor_source, no_handlers)
- # Ensure vendordata source fetched before activation (just incase)
- vendor_data_msg = self.datasource.get_vendordata()
+ # Ensure vendordata source fetched before activation (just in case.)
- # This keeps track of all the active handlers, while excluding what the
- # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
- c_handlers_list = self._default_vendordata_handlers()
+ # c_handlers_list keeps track of all the active handlers, while
+ # excluding what the users doesn't want run, i.e. boot_hook,
+ # cloud_config, shell_script
+ if vendor_source == 'vendordata':
+ vendor_data_msg = self.datasource.get_vendordata()
+ c_handlers_list = self._default_vendordata_handlers()
+ else:
+ vendor_data_msg = self.datasource.get_vendordata2()
+ c_handlers_list = self._default_vendordata2_handlers()
# Run the handlers
self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
diff --git a/cloudinit/version.py b/cloudinit/version.py
index 36ec728e..94afd60d 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,7 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "20.4.1"
+__VERSION__ = "21.1"
_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
diff --git a/debian/changelog b/debian/changelog
index a45a6164..38d0314e 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,32 @@
+cloud-init (21.1-0ubuntu1) hirsute; urgency=medium
+
+ * New upstream release.
+ - Release 21.1 (#820) (LP: #1916540)
+ - Azure: Support for VMs without ephemeral resource disks. (#800)
+ [Johnson Shi] (LP: #1901011)
+ - cc_keys_to_console: add option to disable key emission (#811)
+ [Michael Hudson-Doyle] (LP: #1915460)
+ - integration_tests: introduce lxd_use_exec mark (#802)
+ - azure: case-insensitive UUID to avoid new IID during kernel upgrade
+ (#798) (LP: #1835584)
+ - stale.yml: don't ask submitters to reopen PRs (#816)
+ - integration_tests: fix use of SSH agent within tox (#815)
+ - integration_tests: add UPGRADE CloudInitSource (#812)
+ - integration_tests: use unique MAC addresses for tests (#813)
+ - Update .gitignore (#814)
+ - Port apt cloud_tests to integration tests (#808)
+ - integration_tests: fix test_gh626 on LXD VMs (#809)
+ - Fix attempting to decode binary data in test_seed_random_data test (#806)
+ - Remove wait argument from tests with session_cloud calls (#805)
+ - Datasource for UpCloud (#743) [Antti Myyrä]
+ - test_gh668: fix failure on LXD VMs (#801)
+ - openstack: read the dynamic metadata group vendor_data2.json (#777)
+ [Andrew Bogott] (LP: #1841104)
+ - includedir in suoders can be prefixed by "arroba" (#783)
+ [Jordi Massaguer Pla]
+
+ -- James Falcon <james.falcon@canonical.com> Tue, 23 Feb 2021 10:06:10 -0500
+
cloud-init (20.4.1-79-g71564dce-0ubuntu1) hirsute; urgency=medium
* New upstream snapshot.
diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt
index aad8b683..bfe5ab44 100644
--- a/doc/examples/cloud-config-ssh-keys.txt
+++ b/doc/examples/cloud-config-ssh-keys.txt
@@ -42,3 +42,13 @@ ssh_keys:
-----END DSA PRIVATE KEY-----
dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost
+
+# By default, the fingerprints of the authorized keys for the users
+# cloud-init adds are printed to the console. Setting
+# no_ssh_fingerprints to true suppresses this output.
+no_ssh_fingerprints: false
+
+# By default, (most) ssh host keys are printed to the console. Setting
+# emit_keys_to_console to false suppresses this output.
+ssh:
+ emit_keys_to_console: false
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 8f56a7d2..f58b2b38 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -55,6 +55,7 @@ environments in the public cloud:
- CloudStack
- AltCloud
- SmartOS
+- UpCloud
Additionally, cloud-init is supported on these private clouds:
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 3d026143..228173d2 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -47,6 +47,7 @@ The following is a list of documents for each supported datasource:
datasources/ovf.rst
datasources/rbxcloud.rst
datasources/smartos.rst
+ datasources/upcloud.rst
datasources/zstack.rst
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index b23b4b7c..62d0fc03 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -82,4 +82,12 @@ For more general information about how cloud-init handles vendor data,
including how it can be disabled by users on instances, see
:doc:`/topics/vendordata`.
+OpenStack can also be configured to provide 'dynamic vendordata'
+which is provided by the DynamicJSON provider and appears under a
+different metadata path, /vendor_data2.json.
+
+Cloud-init will look for a ``cloud-init`` at the vendor_data2 path; if found,
+settings are applied after (and, hence, overriding) the settings from static
+vendor data. Both sets of vendor data can be overridden by user data.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/upcloud.rst b/doc/rtd/topics/datasources/upcloud.rst
new file mode 100644
index 00000000..0b7a9bb0
--- /dev/null
+++ b/doc/rtd/topics/datasources/upcloud.rst
@@ -0,0 +1,24 @@
+.. _datasource_upcloud:
+
+UpCloud
+=============
+
+The `UpCloud`_ datasource consumes information from UpCloud's `metadata
+service`_. This metadata service serves information about the
+running server via HTTP over the address 169.254.169.254 available in every
+DHCP-configured interface. The metadata API endpoints are fully described in
+UpCloud API documentation at
+`https://developers.upcloud.com/1.3/8-servers/#metadata-service
+<https://developers.upcloud.com/1.3/8-servers/#metadata-service>`_.
+
+Providing user-data
+-------------------
+
+When creating a server, user-data is provided by specifying it as `user_data`
+in the API or via the server creation tool in the control panel. User-data is
+immutable during server's lifetime and can be removed by deleting the server.
+
+.. _UpCloud: https://upcloud.com/
+.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/
+
+.. vi: textwidth=78
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 08db04d8..07cad765 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -144,6 +144,10 @@ The following Datasources optionally provide network configuration:
- `SmartOS JSON Metadata`_
+- :ref:`datasource_upcloud`
+
+ - `UpCloud JSON metadata`_
+
For more information on network configuration formats
.. toctree::
@@ -257,5 +261,6 @@ Example output converting V2 to sysconfig:
.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index
.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
+.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
.. vi: textwidth=78
diff --git a/integration-requirements.txt b/integration-requirements.txt
index c959001e..6b596426 100644
--- a/integration-requirements.txt
+++ b/integration-requirements.txt
@@ -1,5 +1,5 @@
# PyPI requirements for cloud-init integration testing
# https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html
#
-pycloudlib @ git+https://github.com/canonical/pycloudlib.git@878981e3c7caaf583a8c7c5494dba9d9447acee8
+pycloudlib @ git+https://github.com/canonical/pycloudlib.git@da8445325875674394ffd85aaefaa3d2d0e0020d
pytest
diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py
new file mode 100644
index 00000000..e1d4cd28
--- /dev/null
+++ b/tests/integration_tests/__init__.py
@@ -0,0 +1,12 @@
+import random
+
+
+def random_mac_address() -> str:
+ """Generate a random MAC address.
+
+ The MAC address will have a 1 in its least significant bit, indicating it
+ to be a locally administered address.
+ """
+ return "02:00:00:%02x:%02x:%02x" % (random.randint(0, 255),
+ random.randint(0, 255),
+ random.randint(0, 255))
diff --git a/tests/integration_tests/bugs/test_gh626.py b/tests/integration_tests/bugs/test_gh626.py
index 2d336462..dba01b34 100644
--- a/tests/integration_tests/bugs/test_gh626.py
+++ b/tests/integration_tests/bugs/test_gh626.py
@@ -7,17 +7,21 @@ in the /etc/network/interfaces or netplan config.
import pytest
import yaml
+from tests.integration_tests import random_mac_address
from tests.integration_tests.clouds import ImageSpecification
from tests.integration_tests.instances import IntegrationInstance
+MAC_ADDRESS = random_mac_address()
NETWORK_CONFIG = """\
version: 2
ethernets:
eth0:
dhcp4: true
wakeonlan: true
-"""
+ match:
+ macaddress: {}
+""".format(MAC_ADDRESS)
EXPECTED_ENI_END = """\
iface eth0 inet dhcp
@@ -28,7 +32,8 @@ iface eth0 inet dhcp
@pytest.mark.lxd_container
@pytest.mark.lxd_vm
@pytest.mark.lxd_config_dict({
- 'user.network-config': NETWORK_CONFIG
+ 'user.network-config': NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
})
def test_wakeonlan(client: IntegrationInstance):
if ImageSpecification.from_os_image().release == 'xenial':
diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py
index a3a0c374..ce57052e 100644
--- a/tests/integration_tests/bugs/test_gh668.py
+++ b/tests/integration_tests/bugs/test_gh668.py
@@ -7,11 +7,13 @@ for all network configuration outputs.
import pytest
+from tests.integration_tests import random_mac_address
from tests.integration_tests.instances import IntegrationInstance
DESTINATION_IP = "172.16.0.10"
GATEWAY_IP = "10.0.0.100"
+MAC_ADDRESS = random_mac_address()
NETWORK_CONFIG = """\
version: 2
@@ -22,7 +24,9 @@ ethernets:
routes:
- to: {}/32
via: {}
-""".format(DESTINATION_IP, GATEWAY_IP)
+ match:
+ macaddress: {}
+""".format(DESTINATION_IP, GATEWAY_IP, MAC_ADDRESS)
EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP)
@@ -31,7 +35,9 @@ EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP)
@pytest.mark.lxd_vm
@pytest.mark.lxd_config_dict({
"user.network-config": NETWORK_CONFIG,
+ "volatile.eth0.hwaddr": MAC_ADDRESS,
})
+@pytest.mark.lxd_use_exec
def test_static_route_to_host(client: IntegrationInstance):
route = client.execute("ip route | grep {}".format(DESTINATION_IP))
assert route.startswith(EXPECTED_ROUTE)
diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py
new file mode 100644
index 00000000..660d2a2a
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1835584.py
@@ -0,0 +1,104 @@
+""" Integration test for LP #1835584
+
+Upstream linux kernels prior to 4.15 providate DMI product_uuid in uppercase.
+More recent kernels switched to lowercase for DMI product_uuid. Azure
+datasource uses this product_uuid as the instance-id for cloud-init.
+
+The linux-azure-fips kernel installed in PRO FIPs images, that product UUID is
+uppercase whereas the linux-azure cloud-optimized kernel reports the UUID as
+lowercase.
+
+In cases where product_uuid changes case, ensure cloud-init doesn't
+recreate ssh hostkeys across reboot (due to detecting an instance_id change).
+
+This currently only affects linux-azure-fips -> linux-azure on Bionic.
+This test won't run on Xenial because both linux-azure-fips and linux-azure
+report uppercase product_uuids.
+
+The test will launch a specific Bionic Ubuntu PRO FIPS image which has a
+linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade
+and reboot into linux-azure kernel which is known to report product_uuid as
+lowercase.
+
+Across the reboot, assert that we didn't re-run config_ssh by virtue of
+seeing only one semaphore creation log entry of type:
+
+ Writing to /var/lib/cloud/instances/<UUID>/sem/config_ssh -
+
+https://bugs.launchpad.net/cloud-init/+bug/1835584
+"""
+import re
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationAzureInstance
+from tests.integration_tests.clouds import (
+ ImageSpecification, IntegrationCloud
+)
+from tests.integration_tests.conftest import get_validated_source
+
+
+IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = (
+ "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201"
+)
+
+
+def _check_iid_insensitive_across_kernel_upgrade(
+ instance: IntegrationAzureInstance
+):
+ uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert uuid.isupper(), (
+ "Expected uppercase UUID on Ubuntu FIPS image {}".format(
+ uuid
+ )
+ )
+ orig_kernel = instance.execute("uname -r").strip()
+ assert "azure-fips" in orig_kernel
+ result = instance.execute("apt-get update")
+ # Install a 5.4+ kernel which provides lowercase product_uuid
+ result = instance.execute("apt-get install linux-azure --assume-yes")
+ if not result.ok:
+ pytest.fail("Unable to install linux-azure kernel: {}".format(result))
+ instance.restart()
+ new_kernel = instance.execute("uname -r").strip()
+ assert orig_kernel != new_kernel
+ assert "azure-fips" not in new_kernel
+ assert "azure" in new_kernel
+ new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid")
+ assert (
+ uuid.lower() == new_uuid
+ ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid)
+ log = instance.read_from_file("/var/log/cloud-init.log")
+ RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh "
+ ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log))
+ assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs)
+
+
+@pytest.mark.azure
+@pytest.mark.sru_next
+def test_azure_kernel_upgrade_case_insensitive_uuid(
+ session_cloud: IntegrationCloud
+):
+ cfg_image_spec = ImageSpecification.from_os_image()
+ if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"):
+ pytest.skip(
+ "Test only supports ubuntu:bionic not {0.os}:{0.release}".format(
+ cfg_image_spec
+ )
+ )
+ source = get_validated_source(session_cloud)
+ if not source.installs_new_version():
+ pytest.skip(
+ "Provide CLOUD_INIT_SOURCE to install expected working cloud-init"
+ )
+ image_id = IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC
+ with session_cloud.launch(
+ launch_kwargs={"image_id": image_id}
+ ) as instance:
+ # We can't use setup_image fixture here because we want to avoid
+ # taking a snapshot or cleaning the booted machine after cloud-init
+ # upgrade.
+ instance.install_new_cloud_init(
+ source, take_snapshot=False, clean=False
+ )
+ _check_iid_insensitive_across_kernel_upgrade(instance)
diff --git a/tests/integration_tests/bugs/test_lp1898997.py b/tests/integration_tests/bugs/test_lp1898997.py
index 54c88d82..bde93d06 100644
--- a/tests/integration_tests/bugs/test_lp1898997.py
+++ b/tests/integration_tests/bugs/test_lp1898997.py
@@ -10,8 +10,9 @@ network configuration, and confirms that the bridge can be used to ping the
default gateway.
"""
import pytest
+from tests.integration_tests import random_mac_address
-MAC_ADDRESS = "de:ad:be:ef:12:34"
+MAC_ADDRESS = random_mac_address()
NETWORK_CONFIG = """\
@@ -38,6 +39,7 @@ version: 2
"volatile.eth0.hwaddr": MAC_ADDRESS,
})
@pytest.mark.lxd_vm
+@pytest.mark.lxd_use_exec
@pytest.mark.not_bionic
@pytest.mark.not_xenial
@pytest.mark.sru_2020_11
diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py
new file mode 100644
index 00000000..2b47f0a8
--- /dev/null
+++ b/tests/integration_tests/bugs/test_lp1901011.py
@@ -0,0 +1,58 @@
+"""Integration test for LP: #1901011
+
+Ensure an ephemeral disk exists after boot.
+
+See https://github.com/canonical/cloud-init/pull/800
+"""
+import pytest
+
+from tests.integration_tests.clouds import IntegrationCloud
+
+
+@pytest.mark.azure
+@pytest.mark.parametrize('instance_type,is_ephemeral', [
+ ('Standard_DS1_v2', True),
+ ('Standard_D2s_v4', False),
+])
+def test_ephemeral(instance_type, is_ephemeral,
+ session_cloud: IntegrationCloud, setup_image):
+ if is_ephemeral:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ )
+ else:
+ expected_log = (
+ "Ephemeral resource disk '/dev/disk/cloud/azure_resource' does "
+ "not exist. Not merging default Azure cloud ephemeral disk "
+ "configs."
+ )
+
+ with session_cloud.launch(
+ launch_kwargs={'instance_type': instance_type}
+ ) as client:
+ # Verify log file
+ log = client.read_from_file('/var/log/cloud-init.log')
+ assert expected_log in log
+
+ # Verify devices
+ dev_links = client.execute('ls /dev/disk/cloud')
+ assert 'azure_root' in dev_links
+ assert 'azure_root-part1' in dev_links
+ if is_ephemeral:
+ assert 'azure_resource' in dev_links
+ assert 'azure_resource-part1' in dev_links
+
+ # Verify mounts
+ blks = client.execute('lsblk -pPo NAME,TYPE,MOUNTPOINT')
+ root_device = client.execute(
+ 'realpath /dev/disk/cloud/azure_root-part1'
+ )
+ assert 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format(
+ root_device) in blks
+ if is_ephemeral:
+ ephemeral_device = client.execute(
+ 'realpath /dev/disk/cloud/azure_resource-part1'
+ )
+ assert 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format(
+ ephemeral_device) in blks
diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py
index 9eebb10a..9527a413 100644
--- a/tests/integration_tests/clouds.py
+++ b/tests/integration_tests/clouds.py
@@ -153,9 +153,8 @@ class IntegrationCloud(ABC):
}
kwargs.update(launch_kwargs)
log.info(
- "Launching instance with launch_kwargs:\n{}".format(
- "\n".join("{}={}".format(*item) for item in kwargs.items())
- )
+ "Launching instance with launch_kwargs:\n%s",
+ "\n".join("{}={}".format(*item) for item in kwargs.items())
)
pycloudlib_instance = self._perform_launch(kwargs)
@@ -245,6 +244,7 @@ class _LxdIntegrationCloud(IntegrationCloud):
integration_instance_cls = IntegrationLxdInstance
def _get_cloud_instance(self):
+ # pylint: disable=no-member
return self.pycloudlib_instance_cls(tag=self.instance_tag)
@staticmethod
@@ -260,8 +260,10 @@ class _LxdIntegrationCloud(IntegrationCloud):
'container_path': target_path,
}
log.info(
- 'Mounting source {source_path} directly onto LXD container/vm '
- 'named {name} at {container_path}'.format(**format_variables))
+ 'Mounting source %(source_path)s directly onto LXD container/vm '
+ 'named %(name)s at %(container_path)s',
+ format_variables
+ )
command = (
'lxc config device add {name} host-cloud-init disk '
'source={source_path} '
diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py
index 99dd8d9e..61ad8a71 100644
--- a/tests/integration_tests/conftest.py
+++ b/tests/integration_tests/conftest.py
@@ -124,6 +124,8 @@ def get_validated_source(
return CloudInitSource.PPA
elif os.path.isfile(str(source)):
return CloudInitSource.DEB_PACKAGE
+ elif source == "UPGRADE":
+ return CloudInitSource.UPGRADE
raise ValueError(
'Invalid value for CLOUD_INIT_SOURCE setting: {}'.format(source))
@@ -198,6 +200,9 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud):
user_data = getter('user_data')
name = getter('instance_name')
lxd_config_dict = getter('lxd_config_dict')
+ lxd_use_exec = fixture_utils.closest_marker_args_or(
+ request, 'lxd_use_exec', None
+ )
launch_kwargs = {}
if name is not None:
@@ -206,10 +211,18 @@ def _client(request, fixture_utils, session_cloud: IntegrationCloud):
if not isinstance(session_cloud, _LxdIntegrationCloud):
pytest.skip("lxd_config_dict requires LXD")
launch_kwargs["config_dict"] = lxd_config_dict
+ if lxd_use_exec is not None:
+ if not isinstance(session_cloud, _LxdIntegrationCloud):
+ pytest.skip("lxd_use_exec requires LXD")
+ launch_kwargs["execute_via_ssh"] = False
with session_cloud.launch(
user_data=user_data, launch_kwargs=launch_kwargs
) as instance:
+ if lxd_use_exec is not None:
+ # Existing instances are not affected by the launch kwargs, so
+ # ensure it here; we still need the launch kwarg so waiting works
+ instance.execute_via_ssh = False
previous_failures = request.session.testsfailed
yield instance
test_failed = request.session.testsfailed - previous_failures > 0
diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py
index 0d1e1aef..055ec758 100644
--- a/tests/integration_tests/instances.py
+++ b/tests/integration_tests/instances.py
@@ -39,6 +39,7 @@ class CloudInitSource(Enum):
PROPOSED = 3
PPA = 4
DEB_PACKAGE = 5
+ UPGRADE = 6
def installs_new_version(self):
if self.name in [self.NONE.name, self.IN_PLACE.name]:
@@ -115,7 +116,8 @@ class IntegrationInstance:
def install_new_cloud_init(
self,
source: CloudInitSource,
- take_snapshot=True
+ take_snapshot=True,
+ clean=True,
):
if source == CloudInitSource.DEB_PACKAGE:
self.install_deb()
@@ -123,6 +125,8 @@ class IntegrationInstance:
self.install_ppa()
elif source == CloudInitSource.PROPOSED:
self.install_proposed_image()
+ elif source == CloudInitSource.UPGRADE:
+ self.upgrade_cloud_init()
else:
raise Exception(
"Specified to install {} which isn't supported here".format(
@@ -130,7 +134,8 @@ class IntegrationInstance:
)
version = self.execute('cloud-init -v').split()[-1]
log.info('Installed cloud-init version: %s', version)
- self.instance.clean()
+ if clean:
+ self.instance.clean()
if take_snapshot:
snapshot_id = self.snapshot()
self.cloud.snapshot_id = snapshot_id
@@ -166,6 +171,11 @@ class IntegrationInstance:
remote_script = 'dpkg -i {path}'.format(path=remote_path)
self.execute(remote_script)
+ def upgrade_cloud_init(self):
+ log.info('Upgrading cloud-init to latest version in archive')
+ self.execute("apt-get update -q")
+ self.execute("apt-get install -qy cloud-init")
+
def __enter__(self):
return self
diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py
index 22b4fdda..157d34ad 100644
--- a/tests/integration_tests/integration_settings.py
+++ b/tests/integration_tests/integration_settings.py
@@ -59,6 +59,8 @@ EXISTING_INSTANCE_ID = None
# code.
# PROPOSED
# Install from the Ubuntu proposed repo
+# UPGRADE
+# Upgrade cloud-init to the version in the Ubuntu archive
# <ppa repo>, e.g., ppa:cloud-init-dev/proposed
# Install from a PPA. It MUST start with 'ppa:'
# <file path>
@@ -92,6 +94,7 @@ KEYPAIR_NAME = None
##################################################################
# Bring in any user-file defined settings
try:
+ # pylint: disable=wildcard-import,unused-wildcard-import
from tests.integration_tests.user_settings import * # noqa
except ImportError:
pass
diff --git a/tests/integration_tests/modules/test_apt.py b/tests/integration_tests/modules/test_apt.py
new file mode 100644
index 00000000..5e3d474c
--- /dev/null
+++ b/tests/integration_tests/modules/test_apt.py
@@ -0,0 +1,291 @@
+"""Series of integration tests covering apt functionality."""
+import re
+from tests.integration_tests.clouds import ImageSpecification
+
+import pytest
+
+from tests.integration_tests.instances import IntegrationInstance
+
+
+USER_DATA = """\
+#cloud-config
+apt:
+ conf: |
+ APT {
+ Get {
+ Assume-Yes "true";
+ Fix-Broken "true";
+ }
+ }
+ proxy: "http://proxy.internal:3128"
+ http_proxy: "http://squid.internal:3128"
+ ftp_proxy: "ftp://squid.internal:3128"
+ https_proxy: "https://squid.internal:3128"
+ primary:
+ - arches: [default]
+ uri: http://badarchive.ubuntu.com/ubuntu
+ security:
+ - arches: [default]
+ uri: http://badsecurity.ubuntu.com/ubuntu
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb-src $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ deb-src $SECURITY $RELEASE-security multiverse
+ sources:
+ test_keyserver:
+ keyid: 72600DB15B8E4C8B1964B868038ACC97C660A937
+ keyserver: keyserver.ubuntu.com
+ source: "deb http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu $RELEASE main"
+ test_ppa:
+ keyid: 441614D8
+ keyserver: keyserver.ubuntu.com
+ source: "ppa:simplestreams-dev/trunk"
+ test_key:
+ source: "deb http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu $RELEASE main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.1.6
+ Comment: Hostname: keyserver.ubuntu.com
+
+ mQINBFbZRUIBEAC+A0PIKYBP9kLC4hQtRrffRS11uLo8/BdtmOdrlW0hpPHzCfKnjR3tvSEI
+ lqPHG1QrrjAXKZDnZMRz+h/px7lUztvytGzHPSJd5ARUzAyjyRezUhoJ3VSCxrPqx62avuWf
+ RfoJaIeHfDehL5/dTVkyiWxfVZ369ZX6JN2AgLsQTeybTQ75+2z0xPrrhnGmgh6g0qTYcAaq
+ M5ONOGiqeSBX/Smjh6ALy5XkhUiFGLsI7Yluf6XSICY/x7gd6RAfgSIQrUTNMoS1sqhT4aot
+ +xvOfQy8ySkfAK4NddXql6E/+ZqTmBY/Lr0YklFBy8jGT+UysfiIznPMIwbmgq5Li7BtDDtX
+ b8Uyi4edPpjtextezfXYn4NVIpPL5dPZS/FXh4HpzyH0pYCfrH4QDGA7i52AGmhpiOFjJMo6
+ N33sdjZHOH/2Vyp+QZaQnsdUAi1N4M6c33tQbpIScn1SY+El8z5JDA4PBzkw8HpLCi1gGoa6
+ V4kfbWqXXbGAJFkLkP/vc4+pY9axOlmCkJg7xCPwhI75y1cONgovhz+BEXOzolh5KZuGbGbj
+ xe0wva5DLBeIg7EQFf+99pOS7Syby3Xpm6ZbswEFV0cllK4jf/QMjtfInxobuMoI0GV0bE5l
+ WlRtPCK5FnbHwxi0wPNzB/5fwzJ77r6HgPrR0OkT0lWmbUyoOQARAQABtC1MYXVuY2hwYWQg
+ UFBBIGZvciBjbG91ZCBpbml0IGRldmVsb3BtZW50IHRlYW2JAjgEEwECACIFAlbZRUICGwMG
+ CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEAg9Bvvk0wTfHfcP/REK5N2s1JYc69qEa9ZN
+ o6oi+A7l6AYw+ZY88O5TJe7F9otv5VXCIKSUT0Vsepjgf0mtXAgf/sb2lsJn/jp7tzgov3YH
+ vSrkTkRydz8xcA87gwQKePuvTLxQpftF4flrBxgSueIn5O/tPrBOxLz7EVYBc78SKg9aj9L2
+ yUp+YuNevlwfZCTYeBb9r3FHaab2HcgkwqYch66+nKYfwiLuQ9NzXXm0Wn0JcEQ6pWvJscbj
+ C9BdawWovfvMK5/YLfI6Btm7F4mIpQBdhSOUp/YXKmdvHpmwxMCN2QhqYK49SM7qE9aUDbJL
+ arppSEBtlCLWhRBZYLTUna+BkuQ1bHz4St++XTR49Qd7vDERALpApDjB2dxPfMiBzCMwQQyq
+ uy13exU8o2ETLg+dZSLfDTzrBNsBFmXlw8WW17nTISYdKeGKL+QdlUjpzdwUMMzHhAO8SmMH
+ zjeSlDSRMXBJFAFSbCl7EwmMKa3yVX0zInT91fNllZ3iatAmtVdqVH/BFQfTIMH2ET7A8WzJ
+ ZzVSuMRhqoKdr5AMcHuJGPUoVkVJHQA+NNvEiXSysF3faL7jmKapmUwrhpYYX2H8pf+VMu2e
+ cLflKTI28dl+ZQ4Pl/aVsxrti/pzhdYy05Sn5ddtySyIkvo8L1cU5MWpbvSlFPkTstBUDLBf
+ pb0uBy+g0oxJQg15
+ =uy53
+ -----END PGP PUBLIC KEY BLOCK-----
+apt_pipelining: os
+""" # noqa: E501
+
+EXPECTED_REGEXES = [
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ main restricted",
+ r"deb http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb-src http://badarchive.ubuntu.com/ubuntu [a-z]+ universe restricted",
+ r"deb http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+ r"deb-src http://badsecurity.ubuntu.com/ubuntu [a-z]+-security multiverse",
+]
+
+TEST_KEYSERVER_KEY = """\
+pub rsa1024 2013-12-09 [SC]
+ 7260 0DB1 5B8E 4C8B 1964 B868 038A CC97 C660 A937
+uid [ unknown] Launchpad PPA for Ryan Harper
+"""
+
+TEST_PPA_KEY = """\
+/etc/apt/trusted.gpg.d/simplestreams-dev_ubuntu_trunk.gpg
+---------------------------------------------------------
+pub rsa4096 2016-05-04 [SC]
+ 3552 C902 B4DD F7BD 3842 1821 015D 28D7 4416 14D8
+uid [ unknown] Launchpad PPA for simplestreams-dev
+"""
+
+TEST_KEY = """\
+pub rsa4096 2016-03-04 [SC]
+ 1FF0 D853 5EF7 E719 E5C8 1B9C 083D 06FB E4D3 04DF
+uid [ unknown] Launchpad PPA for cloud init development team
+"""
+
+
+@pytest.mark.ci
+@pytest.mark.ubuntu
+@pytest.mark.user_data(USER_DATA)
+class TestApt:
+ def test_sources_list(self, class_client: IntegrationInstance):
+ """Integration test for the apt module's `sources_list` functionality.
+
+ This test specifies a ``sources_list`` and then checks that (a) the
+ expected number of sources.list entries is present, and (b) that each
+ expected line appears in the file.
+
+ (This is ported from
+ `tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml`.)
+ """
+ sources_list = class_client.read_from_file('/etc/apt/sources.list')
+ assert 6 == len(sources_list.rstrip().split('\n'))
+
+ for expected_re in EXPECTED_REGEXES:
+ assert re.search(expected_re, sources_list) is not None
+
+ def test_apt_conf(self, class_client: IntegrationInstance):
+ """Test the apt conf functionality.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_configure_conf.py
+ """
+ apt_config = class_client.read_from_file(
+ '/etc/apt/apt.conf.d/94cloud-init-config'
+ )
+ assert 'Assume-Yes "true";' in apt_config
+ assert 'Fix-Broken "true";' in apt_config
+
+ def test_apt_proxy(self, class_client: IntegrationInstance):
+ """Test the apt proxy functionality.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_configure_proxy.py
+ """
+ out = class_client.read_from_file(
+ '/etc/apt/apt.conf.d/90cloud-init-aptproxy')
+ assert 'Acquire::http::Proxy "http://proxy.internal:3128";' in out
+ assert 'Acquire::http::Proxy "http://squid.internal:3128";' in out
+ assert 'Acquire::ftp::Proxy "ftp://squid.internal:3128";' in out
+ assert 'Acquire::https::Proxy "https://squid.internal:3128";' in out
+
+ def test_ppa_source(self, class_client: IntegrationInstance):
+ """Test the apt ppa functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_ppa.py
+ """
+ release = ImageSpecification.from_os_image().release
+ ppa_path_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/'
+ 'simplestreams-dev-ubuntu-trunk-{}.list'.format(release)
+ )
+
+ assert (
+ 'http://ppa.launchpad.net/simplestreams-dev/trunk/ubuntu'
+ ) in ppa_path_contents
+
+ keys = class_client.execute('apt-key finger')
+ assert TEST_PPA_KEY in keys
+
+ def test_key(self, class_client: IntegrationInstance):
+ """Test the apt key functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_key.py
+ """
+ test_archive_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/test_key.list'
+ )
+
+ assert (
+ 'http://ppa.launchpad.net/cloud-init-dev/test-archive/ubuntu'
+ ) in test_archive_contents
+
+ keys = class_client.execute('apt-key finger')
+ assert TEST_KEY in keys
+
+ def test_keyserver(self, class_client: IntegrationInstance):
+ """Test the apt keyserver functionality.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_sources_keyserver.py
+ """
+ test_keyserver_contents = class_client.read_from_file(
+ '/etc/apt/sources.list.d/test_keyserver.list'
+ )
+
+ assert (
+ 'http://ppa.launchpad.net/cloud-init-raharper/curtin-dev/ubuntu'
+ ) in test_keyserver_contents
+
+ keys = class_client.execute('apt-key finger')
+ assert TEST_KEYSERVER_KEY in keys
+
+ def test_os_pipelining(self, class_client: IntegrationInstance):
+ """Test 'os' settings does not write apt config file.
+
+ Ported from tests/cloud_tests/testcases/modules/apt_pipelining_os.py
+ """
+ conf_exists = class_client.execute(
+ 'test -f /etc/apt/apt.conf.d/90cloud-init-pipelining'
+ ).ok
+ assert conf_exists is False
+
+
+DEFAULT_DATA = """\
+#cloud-config
+apt:
+ primary:
+ - arches:
+ - default
+ security:
+ - arches:
+ - default
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DEFAULT_DATA)
+class TestDefaults:
+ def test_primary(self, class_client: IntegrationInstance):
+ """Test apt default primary sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_primary.py
+ """
+ sources_list = class_client.read_from_file('/etc/apt/sources.list')
+ assert 'deb http://archive.ubuntu.com/ubuntu' in sources_list
+
+ def test_security(self, class_client: IntegrationInstance):
+ """Test apt default security sources.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_security.py
+ """
+ sources_list = class_client.read_from_file('/etc/apt/sources.list')
+
+ # 3 lines from main, universe, and multiverse
+ assert 3 == sources_list.count('deb http://security.ubuntu.com/ubuntu')
+ assert 3 == sources_list.count(
+ '# deb-src http://security.ubuntu.com/ubuntu'
+ )
+
+
+DISABLED_DATA = """\
+#cloud-config
+apt:
+ disable_suites:
+ - $RELEASE
+ - $RELEASE-updates
+ - $RELEASE-backports
+ - $RELEASE-security
+apt_pipelining: false
+"""
+
+
+@pytest.mark.ubuntu
+@pytest.mark.user_data(DISABLED_DATA)
+class TestDisabled:
+ def test_disable_suites(self, class_client: IntegrationInstance):
+ """Test disabling of apt suites.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_configure_disable_suites.py
+ """
+ sources_list = class_client.execute(
+ "cat /etc/apt/sources.list | grep -v '^#'"
+ ).strip()
+ assert '' == sources_list
+
+ def test_disable_apt_pipelining(self, class_client: IntegrationInstance):
+ """Test disabling of apt pipelining.
+
+ Ported from
+ tests/cloud_tests/testcases/modules/apt_pipelining_disable.py
+ """
+ conf = class_client.read_from_file(
+ '/etc/apt/apt.conf.d/90cloud-init-pipelining'
+ )
+ assert 'Acquire::http::Pipeline-Depth "0";' in conf
diff --git a/tests/integration_tests/modules/test_apt_configure_sources_list.py b/tests/integration_tests/modules/test_apt_configure_sources_list.py
deleted file mode 100644
index 28cbe19f..00000000
--- a/tests/integration_tests/modules/test_apt_configure_sources_list.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Integration test for the apt module's ``sources_list`` functionality.
-
-This test specifies a ``sources_list`` and then checks that (a) the expected
-number of sources.list entries is present, and (b) that each expected line
-appears in the file.
-
-(This is ported from
-``tests/cloud_tests/testcases/modules/apt_configure_sources_list.yaml``.)"""
-import re
-
-import pytest
-
-
-USER_DATA = """\
-#cloud-config
-apt:
- primary:
- - arches: [default]
- uri: http://archive.ubuntu.com/ubuntu
- security:
- - arches: [default]
- uri: http://security.ubuntu.com/ubuntu
- sources_list: |
- deb $MIRROR $RELEASE main restricted
- deb-src $MIRROR $RELEASE main restricted
- deb $PRIMARY $RELEASE universe restricted
- deb-src $PRIMARY $RELEASE universe restricted
- deb $SECURITY $RELEASE-security multiverse
- deb-src $SECURITY $RELEASE-security multiverse
-"""
-
-EXPECTED_REGEXES = [
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* main restricted",
- r"deb http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb-src http://archive.ubuntu.com/ubuntu [a-z].* universe restricted",
- r"deb http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
- r"deb-src http://security.ubuntu.com/ubuntu [a-z].*security multiverse",
-]
-
-
-@pytest.mark.ci
-@pytest.mark.ubuntu
-class TestAptConfigureSourcesList:
-
- @pytest.mark.user_data(USER_DATA)
- def test_sources_list(self, client):
- sources_list = client.read_from_file("/etc/apt/sources.list")
- assert 6 == len(sources_list.rstrip().split('\n'))
-
- for expected_re in EXPECTED_REGEXES:
- assert re.search(expected_re, sources_list) is not None
diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py
new file mode 100644
index 00000000..298c9e6d
--- /dev/null
+++ b/tests/integration_tests/modules/test_keys_to_console.py
@@ -0,0 +1,48 @@
+"""Integration tests for the cc_keys_to_console module.
+
+(This is ported from
+``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)"""
+import pytest
+
+BLACKLIST_USER_DATA = """\
+#cloud-config
+ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256]
+"""
+
+DISABLED_USER_DATA = """\
+#cloud-config
+ssh:
+ emit_keys_to_console: false
+"""
+
+
+@pytest.mark.user_data(BLACKLIST_USER_DATA)
+class TestKeysToConsoleBlacklist:
+ """Test that the blacklist options work as expected."""
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"])
+ def test_excluded_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ @pytest.mark.parametrize("key_type", ["ED25519", "RSA"])
+ def test_included_keys(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) in syslog
+
+
+@pytest.mark.user_data(DISABLED_USER_DATA)
+class TestKeysToConsoleDisabled:
+ """Test that output can be fully disabled."""
+ @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"])
+ def test_keys_excluded(self, class_client, key_type):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "({})".format(key_type) not in syslog
+
+ def test_header_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog
+
+ def test_footer_excluded(self, class_client):
+ syslog = class_client.read_from_file("/var/log/syslog")
+ assert "END SSH HOST KEY FINGERPRINTS" not in syslog
diff --git a/tests/integration_tests/modules/test_power_state_change.py b/tests/integration_tests/modules/test_power_state_change.py
index 32dfc86d..eebe6608 100644
--- a/tests/integration_tests/modules/test_power_state_change.py
+++ b/tests/integration_tests/modules/test_power_state_change.py
@@ -65,7 +65,7 @@ class TestPowerChange:
with session_cloud.launch(
user_data=USER_DATA.format(
delay=delay, mode=mode, timeout=timeout, condition='true'),
- wait=False
+ launch_kwargs={'wait': False},
) as instance:
if mode == 'reboot':
_detect_reboot(instance)
diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py
index f6a67c19..94e982e0 100644
--- a/tests/integration_tests/modules/test_seed_random_data.py
+++ b/tests/integration_tests/modules/test_seed_random_data.py
@@ -24,5 +24,7 @@ class TestSeedRandomData:
@pytest.mark.user_data(USER_DATA)
def test_seed_random_data(self, client):
- seed_output = client.read_from_file("/root/seed")
- assert seed_output.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df")
+ # Only read the first 31 characters, because the rest could be
+ # binary data
+ result = client.execute("head -c 31 < /root/seed")
+ assert result.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df")
diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py
index 233a574b..c20cb3c1 100644
--- a/tests/integration_tests/test_upgrade.py
+++ b/tests/integration_tests/test_upgrade.py
@@ -87,7 +87,7 @@ def test_upgrade(session_cloud: IntegrationCloud):
netcfg_path = '/etc/network/interfaces.d/50-cloud-init.cfg'
with session_cloud.launch(
- launch_kwargs=launch_kwargs, user_data=USER_DATA, wait=True,
+ launch_kwargs=launch_kwargs, user_data=USER_DATA,
) as instance:
_output_to_compare(instance, before_path, netcfg_path)
instance.install_new_cloud_init(source, take_snapshot=False)
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index fb2b55e8..8c968ae9 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -33,11 +33,12 @@ INSTANCE_ID = "i-testing"
class FakeDataSource(sources.DataSource):
- def __init__(self, userdata=None, vendordata=None):
+ def __init__(self, userdata=None, vendordata=None, vendordata2=None):
sources.DataSource.__init__(self, {}, None, None)
self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
self.vendordata_raw = vendordata
+ self.vendordata2_raw = vendordata2
def count_messages(root):
@@ -105,13 +106,14 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
self.assertEqual('qux', cc['baz'])
self.assertEqual('qux2', cc['bar'])
- def test_simple_jsonp_vendor_and_user(self):
+ def test_simple_jsonp_vendor_and_vendor2_and_user(self):
# test that user-data wins over vendor
user_blob = '''
#cloud-config-jsonp
[
{ "op": "add", "path": "/baz", "value": "qux" },
- { "op": "add", "path": "/bar", "value": "qux2" }
+ { "op": "add", "path": "/bar", "value": "qux2" },
+ { "op": "add", "path": "/foobar", "value": "qux3" }
]
'''
vendor_blob = '''
@@ -119,12 +121,23 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
[
{ "op": "add", "path": "/baz", "value": "quxA" },
{ "op": "add", "path": "/bar", "value": "quxB" },
- { "op": "add", "path": "/foo", "value": "quxC" }
+ { "op": "add", "path": "/foo", "value": "quxC" },
+ { "op": "add", "path": "/corge", "value": "quxEE" }
+]
+'''
+ vendor2_blob = '''
+#cloud-config-jsonp
+[
+ { "op": "add", "path": "/corge", "value": "quxD" },
+ { "op": "add", "path": "/grault", "value": "quxFF" },
+ { "op": "add", "path": "/foobar", "value": "quxGG" }
]
'''
self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(user_blob,
+ vendordata=vendor_blob,
+ vendordata2=vendor2_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
@@ -138,9 +151,15 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
(_which_ran, _failures) = mods.run_section('cloud_init_modules')
cfg = mods.cfg
self.assertIn('vendor_data', cfg)
+ self.assertIn('vendor_data2', cfg)
+ # Confirm that vendordata2 overrides vendordata, and that
+ # userdata overrides both
self.assertEqual('qux', cfg['baz'])
self.assertEqual('qux2', cfg['bar'])
+ self.assertEqual('qux3', cfg['foobar'])
self.assertEqual('quxC', cfg['foo'])
+ self.assertEqual('quxD', cfg['corge'])
+ self.assertEqual('quxFF', cfg['grault'])
def test_simple_jsonp_no_vendor_consumed(self):
# make sure that vendor data is not consumed
@@ -294,6 +313,10 @@ run:
#!/bin/bash
echo "test"
'''
+ vendor2_blob = '''
+#!/bin/bash
+echo "dynamic test"
+'''
user_blob = '''
#cloud-config
@@ -303,7 +326,9 @@ vendor_data:
'''
new_root = self.reRoot()
initer = stages.Init()
- initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
+ initer.datasource = FakeDataSource(user_blob,
+ vendordata=vendor_blob,
+ vendordata2=vendor2_blob)
initer.read_cfg()
initer.initialize()
initer.fetch()
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index dc615309..f597c723 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -201,6 +201,7 @@ IMDS_NETWORK_METADATA = {
}
MOCKPATH = 'cloudinit.sources.DataSourceAzure.'
+EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8'
class TestParseNetworkConfig(CiTestCase):
@@ -630,7 +631,7 @@ scbus-1 on xpt0 bus 0
return dsaz
def _get_ds(self, data, agent_command=None, distro='ubuntu',
- apply_network=None):
+ apply_network=None, instance_id=None):
def dsdevs():
return data.get('dsdevs', [])
@@ -659,7 +660,10 @@ scbus-1 on xpt0 bus 0
self.m_ephemeral_dhcpv4 = mock.MagicMock()
self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock()
- self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8'
+ if instance_id:
+ self.instance_id = instance_id
+ else:
+ self.instance_id = EXAMPLE_UUID
def _dmi_mocks(key):
if key == 'system-uuid':
@@ -910,7 +914,7 @@ scbus-1 on xpt0 bus 0
'azure_data': {
'configurationsettype': 'LinuxProvisioningConfiguration'},
'imds': NETWORK_METADATA,
- 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8',
+ 'instance-id': EXAMPLE_UUID,
'local-hostname': u'myhost',
'random_seed': 'wild'}
@@ -1350,23 +1354,51 @@ scbus-1 on xpt0 bus 0
for mypk in mypklist:
self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
- def test_default_ephemeral(self):
- # make sure the ephemeral device works
+ def test_default_ephemeral_configs_ephemeral_exists(self):
+ # make sure the ephemeral configs are correct if disk present
odata = {}
data = {'ovfcontent': construct_valid_ovf_env(data=odata),
'sys_cfg': {}}
- dsrc = self._get_ds(data)
- ret = dsrc.get_data()
- self.assertTrue(ret)
- cfg = dsrc.get_config_obj()
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists(
+ path)
+
+ with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
+ dsaz.RESOURCE_DISK_PATH)
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+
+ def test_default_ephemeral_configs_ephemeral_does_not_exist(self):
+ # make sure the ephemeral configs are correct if disk not present
+ odata = {}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
- self.assertEqual(dsrc.device_name_to_device("ephemeral0"),
- dsaz.RESOURCE_DISK_PATH)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
+ orig_exists = dsaz.os.path.exists
+
+ def changed_exists(path):
+ return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists(
+ path)
+
+ with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists):
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ assert 'disk_setup' not in cfg
+ assert 'fs_setup' not in cfg
def test_provide_disk_aliases(self):
# Make sure that user can affect disk aliases
@@ -1613,6 +1645,32 @@ scbus-1 on xpt0 bus 0
self.assertTrue(ret)
self.assertEqual('value', dsrc.metadata['test'])
+ def test_instance_id_case_insensitive(self):
+ """Return the previous iid when current is a case-insensitive match."""
+ lower_iid = EXAMPLE_UUID.lower()
+ upper_iid = EXAMPLE_UUID.upper()
+ # lowercase current UUID
+ ds = self._get_ds(
+ {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid
+ )
+ # UPPERCASE previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
+ upper_iid)
+ ds.get_data()
+ self.assertEqual(upper_iid, ds.metadata['instance-id'])
+
+ # UPPERCASE current UUID
+ ds = self._get_ds(
+ {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid
+ )
+ # lowercase previous
+ write_file(
+ os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
+ lower_iid)
+ ds.get_data()
+ self.assertEqual(lower_iid, ds.metadata['instance-id'])
+
def test_instance_id_endianness(self):
"""Return the previous iid when dmi uuid is the byteswapped iid."""
ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
@@ -1628,8 +1686,7 @@ scbus-1 on xpt0 bus 0
os.path.join(self.paths.cloud_dir, 'data', 'instance-id'),
'644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8')
ds.get_data()
- self.assertEqual(
- 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id'])
+ self.assertEqual(self.instance_id, ds.metadata['instance-id'])
def test_instance_id_from_dmidecode_used(self):
ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py
index 4ab5d471..5912f7ee 100644
--- a/tests/unittests/test_datasource/test_common.py
+++ b/tests/unittests/test_datasource/test_common.py
@@ -27,6 +27,7 @@ from cloudinit.sources import (
DataSourceRbxCloud as RbxCloud,
DataSourceScaleway as Scaleway,
DataSourceSmartOS as SmartOS,
+ DataSourceUpCloud as UpCloud,
)
from cloudinit.sources import DataSourceNone as DSNone
@@ -48,6 +49,7 @@ DEFAULT_LOCAL = [
OpenStack.DataSourceOpenStackLocal,
RbxCloud.DataSourceRbxCloud,
Scaleway.DataSourceScaleway,
+ UpCloud.DataSourceUpCloudLocal,
]
DEFAULT_NETWORK = [
@@ -63,6 +65,7 @@ DEFAULT_NETWORK = [
NoCloud.DataSourceNoCloudNet,
OpenStack.DataSourceOpenStack,
OVF.DataSourceOVFNet,
+ UpCloud.DataSourceUpCloud,
]
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 415755aa..478f3503 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -40,6 +40,9 @@ USER_DATA = b'#!/bin/sh\necho This is user data\n'
VENDOR_DATA = {
'magic': '',
}
+VENDOR_DATA2 = {
+ 'static': {}
+}
OSTACK_META = {
'availability_zone': 'nova',
'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
@@ -60,6 +63,7 @@ OS_FILES = {
{'links': [], 'networks': [], 'services': []}),
'openstack/latest/user_data': USER_DATA,
'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
+ 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2),
}
EC2_FILES = {
'latest/user-data': USER_DATA,
@@ -142,6 +146,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
+ self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEqual(2, len(f['files']))
@@ -163,6 +168,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, OS_FILES)
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
+ self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEqual(USER_DATA, f.get('userdata'))
@@ -195,6 +201,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, os_files)
f = _read_metadata_service()
self.assertEqual(VENDOR_DATA, f.get('vendordata'))
+ self.assertEqual(VENDOR_DATA2, f.get('vendordata2'))
self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('userdata'))
@@ -210,6 +217,17 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('vendordata'))
+ def test_vendordata2_empty(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith('vendor_data2.json'):
+ os_files.pop(k, None)
+ _register_uris(self.VERSION, {}, {}, os_files)
+ f = _read_metadata_service()
+ self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg'])
+ self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
+ self.assertFalse(f.get('vendordata2'))
+
def test_vendordata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -218,6 +236,14 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(BrokenMetadata, _read_metadata_service)
+ def test_vendordata2_invalid(self):
+ os_files = copy.deepcopy(OS_FILES)
+ for k in list(os_files.keys()):
+ if k.endswith('vendor_data2.json'):
+ os_files[k] = '{' # some invalid json
+ _register_uris(self.VERSION, {}, {}, os_files)
+ self.assertRaises(BrokenMetadata, _read_metadata_service)
+
def test_metadata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
@@ -246,6 +272,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(USER_DATA, ds_os.userdata_raw)
self.assertEqual(2, len(ds_os.files))
self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure)
self.assertIsNone(ds_os.vendordata_raw)
m_dhcp.assert_not_called()
@@ -278,6 +305,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertEqual(USER_DATA, ds_os_local.userdata_raw)
self.assertEqual(2, len(ds_os_local.files))
self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure)
+ self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure)
self.assertIsNone(ds_os_local.vendordata_raw)
m_dhcp.assert_called_with('eth9', None)
@@ -401,7 +429,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
self.assertIsNone(ds_os.vendordata_raw)
self.assertEqual(
['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata',
- 'userdata', 'vendordata', 'version'],
+ 'userdata', 'vendordata', 'vendordata2', 'version'],
sorted(crawled_data.keys()))
self.assertEqual('local', crawled_data['dsmode'])
self.assertEqual(EC2_META, crawled_data['ec2-metadata'])
@@ -415,6 +443,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase):
crawled_data['networkdata'])
self.assertEqual(USER_DATA, crawled_data['userdata'])
self.assertEqual(VENDOR_DATA, crawled_data['vendordata'])
+ self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2'])
self.assertEqual(2, crawled_data['version'])
@@ -681,6 +710,7 @@ class TestMetadataReader(test_helpers.HttprettyTestCase):
'version': 2,
'metadata': expected_md,
'vendordata': vendor_data,
+ 'vendordata2': vendor_data2,
'networkdata': network_data,
'ec2-metadata': mock_read_ec2.return_value,
'files': {},
diff --git a/tests/unittests/test_datasource/test_upcloud.py b/tests/unittests/test_datasource/test_upcloud.py
new file mode 100644
index 00000000..cec48b4b
--- /dev/null
+++ b/tests/unittests/test_datasource/test_upcloud.py
@@ -0,0 +1,314 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import helpers
+from cloudinit import settings
+from cloudinit import sources
+from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \
+ DataSourceUpCloudLocal
+
+from cloudinit.tests.helpers import mock, CiTestCase
+
+UC_METADATA = json.loads("""
+{
+ "cloud_name": "upcloud",
+ "instance_id": "00322b68-0096-4042-9406-faad61922128",
+ "hostname": "test.example.com",
+ "platform": "servers",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "public_keys": [
+ "ssh-rsa AAAAB.... test1@example.com",
+ "ssh-rsa AAAAB.... test2@example.com"
+ ],
+ "region": "fi-hel2",
+ "network": {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": null,
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": null,
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "3a:d6:ba:4a:8a:e1",
+ "network_id": "035a0a4a-7704-4de5-820d-189fc8132714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ },
+ "storage": {
+ "disks": [
+ {
+ "id": "014efb65-223b-4d44-8f0a-c29535b88dcf",
+ "serial": "014efb65223b4d448f0a",
+ "size": 10240,
+ "type": "disk",
+ "tier": "maxiops"
+ }
+ ]
+ },
+ "tags": [],
+ "user_data": "",
+ "vendor_data": ""
+}
+""")
+
+UC_METADATA["user_data"] = b"""#cloud-config
+runcmd:
+- [touch, /root/cloud-init-worked ]
+"""
+
+MD_URL = 'http://169.254.169.254/metadata/v1.json'
+
+
+def _mock_dmi():
+ return True, "00322b68-0096-4042-9406-faad61922128"
+
+
+class TestUpCloudMetadata(CiTestCase):
+ """
+ Test reading the meta-data
+ """
+ def setUp(self):
+ super(TestUpCloudMetadata, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloud(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo')
+ def test_returns_false_not_on_upcloud(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw())
+ self.assertEqual(UC_METADATA.get('vendor_data'),
+ ds.get_vendordata_raw())
+ self.assertEqual(UC_METADATA.get('region'), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name)
+
+ self.assertEqual(UC_METADATA.get('public_keys'),
+ ds.get_public_ssh_keys())
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestUpCloudNetworkSetup(CiTestCase):
+ """
+ Test reading the meta-data on networked context
+ """
+
+ def setUp(self):
+ super(TestUpCloudNetworkSetup, self).setUp()
+ self.tmp = self.tmp_dir()
+
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceUpCloudLocal(
+ settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}))
+ if get_sysinfo:
+ ds._get_sysinfo = get_sysinfo
+ return ds
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ @mock.patch('cloudinit.net.find_fallback_nic')
+ @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery')
+ @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network')
+ def test_network_configured_metadata(self, m_net, m_dhcp,
+ m_fallback_nic, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ m_fallback_nic.return_value = 'eth1'
+ m_dhcp.return_value = [{
+ 'interface': 'eth1', 'fixed-address': '10.6.3.27',
+ 'routers': '10.6.0.1', 'subnet-mask': '22',
+ 'broadcast-address': '10.6.3.255'}
+ ]
+
+ ds = self.get_ds()
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(m_dhcp.called)
+ m_dhcp.assert_called_with('eth1', None)
+
+ m_net.assert_called_once_with(
+ broadcast='10.6.3.255', interface='eth1',
+ ip='10.6.3.27', prefix_or_mask='22',
+ router='10.6.0.1', static_routes=None
+ )
+
+ self.assertTrue(mock_readmd.called)
+
+ self.assertEqual(UC_METADATA.get('region'), ds.availability_zone)
+ self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id())
+ self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name)
+
+ @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata')
+ @mock.patch('cloudinit.net.get_interfaces_by_mac')
+ def test_network_configuration(self, m_get_by_mac, mock_readmd):
+ mock_readmd.return_value = UC_METADATA.copy()
+
+ raw_ifaces = UC_METADATA.get('network').get('interfaces')
+ self.assertEqual(4, len(raw_ifaces))
+
+ m_get_by_mac.return_value = {
+ raw_ifaces[0].get('mac'): 'eth0',
+ raw_ifaces[1].get('mac'): 'eth1',
+ raw_ifaces[2].get('mac'): 'eth2',
+ raw_ifaces[3].get('mac'): 'eth3',
+ }
+
+ ds = self.get_ds()
+ ds.perform_dhcp_setup = False
+
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
+
+ netcfg = ds.network_config
+
+ self.assertEqual(1, netcfg.get('version'))
+
+ config = netcfg.get('config')
+ self.assertIsInstance(config, list)
+ self.assertEqual(5, len(config))
+ self.assertEqual('physical', config[3].get('type'))
+
+ self.assertEqual(raw_ifaces[2].get('mac'), config[2]
+ .get('mac_address'))
+ self.assertEqual(1, len(config[2].get('subnets')))
+ self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0]
+ .get('type'))
+
+ self.assertEqual(2, len(config[0].get('subnets')))
+ self.assertEqual('static', config[0].get('subnets')[1].get('type'))
+
+ dns = config[4]
+ self.assertEqual('nameserver', dns.get('type'))
+ self.assertEqual(2, len(dns.get('address')))
+ self.assertEqual(
+ UC_METADATA.get('network').get('dns')[1],
+ dns.get('address')[1]
+ )
+
+
+class TestUpCloudDatasourceLoading(CiTestCase):
+ def test_get_datasource_list_returns_in_local(self):
+ deps = (sources.DEP_FILESYSTEM, )
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list,
+ [DataSourceUpCloudLocal])
+
+ def test_get_datasource_list_returns_in_normal(self):
+ deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)
+ ds_list = sources.DataSourceUpCloud.get_datasource_list(deps)
+ self.assertEqual(ds_list,
+ [DataSourceUpCloud])
+
+ def test_list_sources_finds_ds(self):
+ found = sources.list_sources(
+ ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK),
+ ['cloudinit.sources'])
+ self.assertEqual([DataSourceUpCloud],
+ found)
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 44607489..336150bc 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -119,6 +119,19 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
self.assertIn("josh", contents)
self.assertEqual(2, contents.count("josh"))
+ def test_sudoers_ensure_only_one_includedir(self):
+ cls = distros.fetch("ubuntu")
+ d = cls("ubuntu", {}, None)
+ self.patchOS(self.tmp)
+ self.patchUtils(self.tmp)
+ for char in ['#', '@']:
+ util.write_file("/etc/sudoers", "{}includedir /b".format(char))
+ d.ensure_sudo_dir("/b")
+ contents = util.load_file("/etc/sudoers")
+ self.assertIn("includedir /b", contents)
+ self.assertTrue(os.path.isdir("/b"))
+ self.assertEqual(1, contents.count("includedir /b"))
+
def test_arch_package_mirror_info_unknown(self):
"""for an unknown arch, we should get back that with arch 'default'."""
arch_mirrors = gapmi(package_mirrors, arch="unknown")
diff --git a/tools/ds-identify b/tools/ds-identify
index 496dbb8a..2f2486f7 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -125,7 +125,7 @@ DI_DSNAME=""
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -883,6 +883,11 @@ dscheck_RbxCloud() {
return ${DS_NOT_FOUND}
}
+dscheck_UpCloud() {
+ dmi_sys_vendor_is UpCloud && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
ovf_vmware_guest_customization() {
# vmware guest customization
diff --git a/tox.ini b/tox.ini
index 5cb999bd..0e2eae46 100644
--- a/tox.ini
+++ b/tox.ini
@@ -147,13 +147,13 @@ deps =
[testenv:integration-tests]
basepython = python3
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_*
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK
deps =
-r{toxinidir}/integration-requirements.txt
[testenv:integration-tests-ci]
commands = {envpython} -m pytest --log-cli-level=INFO {posargs:tests/integration_tests}
-passenv = CLOUD_INIT_*
+passenv = CLOUD_INIT_* SSH_AUTH_SOCK
deps =
-r{toxinidir}/integration-requirements.txt
setenv =
@@ -176,6 +176,7 @@ markers =
oci: test will only run on OCI platform
lxd_config_dict: set the config_dict passed on LXD instance creation
lxd_container: test will only run in LXD container
+ lxd_use_exec: `execute` will use `lxc exec` instead of SSH
lxd_vm: test will only run in LXD VM
not_xenial: test cannot run on the xenial release
not_bionic: test cannot run on the bionic release
@@ -184,5 +185,6 @@ markers =
instance_name: the name to be used for the test instance
sru_2020_11: test is part of the 2020/11 SRU verification
sru_2021_01: test is part of the 2021/01 SRU verification
+ sru_next: test is part of the next SRU verification
ubuntu: this test should run on Ubuntu
unstable: skip this test because it is flakey