summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAliYun.py18
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py113
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py1350
-rw-r--r--cloudinit/sources/DataSourceBigstep.py9
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py39
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py135
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py117
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py65
-rw-r--r--cloudinit/sources/DataSourceEc2.py461
-rw-r--r--cloudinit/sources/DataSourceExoscale.py171
-rw-r--r--cloudinit/sources/DataSourceGCE.py221
-rw-r--r--cloudinit/sources/DataSourceHetzner.py74
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py128
-rw-r--r--cloudinit/sources/DataSourceLXD.py61
-rw-r--r--cloudinit/sources/DataSourceMAAS.py180
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py154
-rw-r--r--cloudinit/sources/DataSourceNone.py15
-rw-r--r--cloudinit/sources/DataSourceOVF.py311
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py190
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py129
-rw-r--r--cloudinit/sources/DataSourceOracle.py125
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py194
-rw-r--r--cloudinit/sources/DataSourceScaleway.py131
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py555
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py7
-rw-r--r--cloudinit/sources/DataSourceVMware.py13
-rw-r--r--cloudinit/sources/DataSourceVultr.py86
-rw-r--r--cloudinit/sources/__init__.py385
-rwxr-xr-xcloudinit/sources/helpers/azure.py693
-rw-r--r--cloudinit/sources/helpers/digitalocean.py195
-rw-r--r--cloudinit/sources/helpers/hetzner.py15
-rw-r--r--cloudinit/sources/helpers/netlink.py187
-rw-r--r--cloudinit/sources/helpers/openstack.py438
-rw-r--r--cloudinit/sources/helpers/upcloud.py12
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py59
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py84
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py46
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py11
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py33
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py29
-rw-r--r--cloudinit/sources/helpers/vultr.py172
50 files changed, 4228 insertions, 3283 deletions
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 09052873..37f512e3 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import sources
+from cloudinit import dmi, sources
from cloudinit.sources import DataSourceEc2 as EC2
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
@@ -9,18 +8,18 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
- dsname = 'AliYun'
- metadata_urls = ['http://100.100.100.200']
+ dsname = "AliYun"
+ metadata_urls = ["http://100.100.100.200"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2016-01-01'
+ min_metadata_version = "2016-01-01"
extended_metadata_versions = []
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata.get('hostname', 'localhost.localdomain')
+ return self.metadata.get("hostname", "localhost.localdomain")
def get_public_ssh_keys(self):
- return parse_public_keys(self.metadata.get('public-keys', {}))
+ return parse_public_keys(self.metadata.get("public-keys", {}))
def _get_cloud_name(self):
if _is_aliyun():
@@ -30,7 +29,7 @@ class DataSourceAliYun(EC2.DataSourceEc2):
def _is_aliyun():
- return dmi.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
+ return dmi.read_dmi_data("system-product-name") == ALIYUN_PRODUCT
def parse_public_keys(public_keys):
@@ -41,7 +40,7 @@ def parse_public_keys(public_keys):
elif isinstance(key_body, list):
keys.extend(key_body)
elif isinstance(key_body, dict):
- key = key_body.get('openssh-key', [])
+ key = key_body.get("openssh-key", [])
if isinstance(key, str):
keys.append(key.strip())
elif isinstance(key, list):
@@ -59,4 +58,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index cd93412a..9029b535 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -7,10 +7,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This file contains code used to gather the user data passed to an
instance on RHEVm and vSphere.
-'''
+"""
import errno
import os
@@ -18,29 +18,26 @@ import os.path
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import sources, subp, util
LOG = logging.getLogger(__name__)
# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
# Shell command lists
-CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
+CMD_PROBE_FLOPPY = ["modprobe", "floppy"]
META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
+ "block-device-mapping": {},
+ "instance-id": 455,
+ "local-hostname": "localhost",
+ "placement": {},
}
def read_user_data_callback(mount_dir):
- '''
+ """
Description:
This callback will be applied by util.mount_cb() on the mounted
file.
@@ -55,10 +52,10 @@ def read_user_data_callback(mount_dir):
Returns:
User Data
- '''
+ """
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# First try deltacloud_user_data_file. On failure try user_data_file.
try:
@@ -67,7 +64,7 @@ def read_user_data_callback(mount_dir):
try:
user_data = util.load_file(user_data_file).strip()
except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
+ util.logexc(LOG, "Failed accessing user data file.")
return None
return user_data
@@ -75,7 +72,7 @@ def read_user_data_callback(mount_dir):
class DataSourceAltCloud(sources.DataSource):
- dsname = 'AltCloud'
+ dsname = "AltCloud"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -87,7 +84,7 @@ class DataSourceAltCloud(sources.DataSource):
return "%s [seed=%s]" % (root, self.seed)
def get_cloud_type(self):
- '''
+ """
Description:
Get the type for the cloud back end this instance is running on
by examining the string returned by reading either:
@@ -101,31 +98,34 @@ class DataSourceAltCloud(sources.DataSource):
One of the following strings:
'RHEV', 'VSPHERE' or 'UNKNOWN'
- '''
+ """
if os.path.exists(CLOUD_INFO_FILE):
try:
cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return 'UNKNOWN'
+ util.logexc(
+ LOG,
+ "Unable to access cloud info file at %s.",
+ CLOUD_INFO_FILE,
+ )
+ return "UNKNOWN"
return cloud_type
system_name = dmi.read_dmi_data("system-product-name")
if not system_name:
- return 'UNKNOWN'
+ return "UNKNOWN"
sys_name = system_name.upper()
- if sys_name.startswith('RHEV'):
- return 'RHEV'
+ if sys_name.startswith("RHEV"):
+ return "RHEV"
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
+ if sys_name.startswith("VMWARE"):
+ return "VSPHERE"
- return 'UNKNOWN'
+ return "UNKNOWN"
def _get_data(self):
- '''
+ """
Description:
User Data is passed to the launching instance which
is used to perform instance configuration.
@@ -140,18 +140,18 @@ class DataSourceAltCloud(sources.DataSource):
Images not built with Imagefactory will try to
determine what the cloud provider is based on system
information.
- '''
+ """
- LOG.debug('Invoked get_data()')
+ LOG.debug("Invoked get_data()")
cloud_type = self.get_cloud_type()
- LOG.debug('cloud_type: %s', str(cloud_type))
+ LOG.debug("cloud_type: %s", str(cloud_type))
- if 'RHEV' in cloud_type:
+ if "RHEV" in cloud_type:
if self.user_data_rhevm():
return True
- elif 'VSPHERE' in cloud_type:
+ elif "VSPHERE" in cloud_type:
if self.user_data_vsphere():
return True
else:
@@ -160,20 +160,20 @@ class DataSourceAltCloud(sources.DataSource):
return False
# No user data found
- util.logexc(LOG, 'Failed accessing user data.')
+ util.logexc(LOG, "Failed accessing user data.")
return False
def _get_subplatform(self):
"""Return the subplatform metadata details."""
cloud_type = self.get_cloud_type()
- if not hasattr(self, 'source'):
+ if not hasattr(self, "source"):
self.source = sources.METADATA_UNKNOWN
- if cloud_type == 'RHEV':
- self.source = '/dev/fd0'
- return '%s (%s)' % (cloud_type.lower(), self.source)
+ if cloud_type == "RHEV":
+ self.source = "/dev/fd0"
+ return "%s (%s)" % (cloud_type.lower(), self.source)
def user_data_rhevm(self):
- '''
+ """
RHEVM specific userdata read
If on RHEV-M the user data will be contained on the
@@ -186,7 +186,7 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
@@ -194,16 +194,16 @@ class DataSourceAltCloud(sources.DataSource):
try:
modprobe_floppy()
except subp.ProcessExecutionError as e:
- util.logexc(LOG, 'Failed modprobe: %s', e)
+ util.logexc(LOG, "Failed modprobe: %s", e)
return False
- floppy_dev = '/dev/fd0'
+ floppy_dev = "/dev/fd0"
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
except (subp.ProcessExecutionError, OSError) as e:
- util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
+ util.logexc(LOG, "Failed udevadm_settle: %s\n", e)
return False
try:
@@ -212,8 +212,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ floppy_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -224,7 +227,7 @@ class DataSourceAltCloud(sources.DataSource):
return False
def user_data_vsphere(self):
- '''
+ """
vSphere specific userdata read
If on vSphere the user data will be contained on the
@@ -235,10 +238,10 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
+ cdrom_list = util.find_devs_with("LABEL=CDROM")
for cdrom_dev in cdrom_list:
try:
return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
@@ -249,8 +252,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ cdrom_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -263,7 +269,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
out, _err = subp.subp(CMD_PROBE_FLOPPY)
- LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+ LOG.debug("Command: %s\nOutput%s", " ".join(CMD_PROBE_FLOPPY), out)
# Used to match classes to dependencies
@@ -279,4 +285,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index eee98fa8..a8b403e8 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -5,66 +5,62 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-from collections import namedtuple
import crypt
-from functools import partial
import os
import os.path
import re
-from time import time
-from time import sleep
-from xml.dom import minidom
import xml.etree.ElementTree as ET
+from collections import namedtuple
from enum import Enum
+from functools import partial
+from time import sleep, time
+from xml.dom import minidom
+
import requests
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import net
+from cloudinit import net, sources, ssh_util, subp, util
from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
from cloudinit.net.dhcp import EphemeralDHCPv4
-from cloudinit import sources
-from cloudinit.sources.helpers import netlink
-from cloudinit import ssh_util
-from cloudinit import subp
-from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
-from cloudinit import util
from cloudinit.reporting import events
-
+from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE,
+ EphemeralDHCPv4WithReporting,
azure_ds_reporter,
azure_ds_telemetry_reporter,
- get_metadata_from_fabric,
+ build_minimal_ovf,
+ dhcp_log_cb,
get_boot_telemetry,
+ get_metadata_from_fabric,
get_system_info,
- report_diagnostic_event,
- EphemeralDHCPv4WithReporting,
is_byte_swapped,
- dhcp_log_cb,
push_log_to_kvp,
+ report_diagnostic_event,
report_failure_to_fabric,
- build_minimal_ovf)
+)
+from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
LOG = logging.getLogger(__name__)
-DS_NAME = 'Azure'
+DS_NAME = "Azure"
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
# ensures that it gets linked to this path.
-RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
-LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
-DEFAULT_FS = 'ext4'
+RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
+LEASE_FILE = "/var/lib/dhcp/dhclient.eth0.leases"
+DEFAULT_FS = "ext4"
# DMI chassis-asset-tag is set static for all azure instances
-AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
+AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach"
REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
-AGENT_SEED_DIR = '/var/lib/waagent'
-DEFAULT_PROVISIONING_ISO_DEV = '/dev/sr0'
+AGENT_SEED_DIR = "/var/lib/waagent"
+DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
@@ -90,10 +86,10 @@ PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
# List of static scripts and network config artifacts created by
# stock ubuntu suported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
- '/etc/netplan/90-hotplug-azure.yaml',
- '/usr/local/sbin/ephemeral_eth.sh',
- '/etc/udev/rules.d/10-net-device-added.rules',
- '/run/network/interfaces.ephemeral.d',
+ "/etc/netplan/90-hotplug-azure.yaml",
+ "/usr/local/sbin/ephemeral_eth.sh",
+ "/etc/udev/rules.d/10-net-device-added.rules",
+ "/run/network/interfaces.ephemeral.d",
]
# This list is used to blacklist devices that will be considered
@@ -113,7 +109,7 @@ UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv2-dsv2-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv3-dsv3-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/ev3-esv3-series
-BLACKLIST_DRIVERS = ['mlx4_core', 'mlx5_core']
+BLACKLIST_DRIVERS = ["mlx4_core", "mlx5_core"]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -127,11 +123,13 @@ def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
if re.search(r"pnpinfo", line):
fields = line.split()
if len(fields) >= 3:
- columns = fields[2].split('=')
- if (len(columns) >= 2 and
- columns[0] == "deviceid" and
- columns[1].startswith(deviceid)):
- comps = fields[0].split('.')
+ columns = fields[2].split("=")
+ if (
+ len(columns) >= 2
+ and columns[0] == "deviceid"
+ and columns[1].startswith(deviceid)
+ ):
+ comps = fields[0].split(".")
return comps[2]
return None
@@ -165,9 +163,9 @@ def find_dev_from_busdev(camcontrol_out, busdev):
"""
for line in camcontrol_out.splitlines():
if re.search(busdev, line):
- items = line.split('(')
+ items = line.split("(")
if len(items) == 2:
- dev_pass = items[1].split(',')
+ dev_pass = items[1].split(",")
return dev_pass[0]
return None
@@ -176,7 +174,7 @@ def execute_or_debug(cmd, fail_ret=None):
try:
return subp.subp(cmd)[0]
except subp.ProcessExecutionError:
- LOG.debug("Failed to execute: %s", ' '.join(cmd))
+ LOG.debug("Failed to execute: %s", " ".join(cmd))
return fail_ret
@@ -185,11 +183,11 @@ def get_dev_storvsc_sysctl():
def get_camcontrol_dev_bus():
- return execute_or_debug(['camcontrol', 'devlist', '-b'])
+ return execute_or_debug(["camcontrol", "devlist", "-b"])
def get_camcontrol_dev():
- return execute_or_debug(['camcontrol', 'devlist'])
+ return execute_or_debug(["camcontrol", "devlist"])
def get_resource_disk_on_freebsd(port_id):
@@ -236,8 +234,8 @@ def get_resource_disk_on_freebsd(port_id):
# update the FreeBSD specific information
if util.is_FreeBSD():
- LEASE_FILE = '/var/db/dhclient.leases.hn0'
- DEFAULT_FS = 'freebsd-ufs'
+ LEASE_FILE = "/var/db/dhclient.leases.hn0"
+ DEFAULT_FS = "freebsd-ufs"
res_disk = get_resource_disk_on_freebsd(1)
if res_disk is not None:
LOG.debug("resource disk is not None")
@@ -248,52 +246,55 @@ if util.is_FreeBSD():
PLATFORM_ENTROPY_SOURCE = None
BUILTIN_DS_CONFIG = {
- 'data_dir': AGENT_SEED_DIR,
- 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
- 'dhclient_lease_file': LEASE_FILE,
- 'apply_network_config': True, # Use IMDS published network configuration
+ "data_dir": AGENT_SEED_DIR,
+ "disk_aliases": {"ephemeral0": RESOURCE_DISK_PATH},
+ "dhclient_lease_file": LEASE_FILE,
+ "apply_network_config": True, # Use IMDS published network configuration
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "gpt",
+ "layout": [100],
+ "overwrite": True,
+ },
},
- 'fs_setup': [{'filesystem': DEFAULT_FS,
- 'device': 'ephemeral0.1'}],
+ "fs_setup": [{"filesystem": DEFAULT_FS, "device": "ephemeral0.1"}],
}
-DS_CFG_PATH = ['datasource', DS_NAME]
-DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+DS_CFG_PATH = ["datasource", DS_NAME]
+DS_CFG_KEY_PRESERVE_NTFS = "never_destroy_ntfs"
+DEF_EPHEMERAL_LABEL = "Temporary Storage"
# The redacted password fails to meet password complexity requirements
# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
+DEF_PASSWD_REDACTION = "REDACTED"
class DataSourceAzure(sources.DataSource):
- dsname = 'Azure'
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- }}
+ dsname = "Azure"
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ }
+ }
_negotiated = False
_metadata_imds = sources.UNSET
_ci_pkl_version = 1
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
+ self.seed_dir = os.path.join(paths.seed_dir, "azure")
self.cfg = {}
self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
- self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
+ self.ds_cfg = util.mergemanydict(
+ [util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]
+ )
+ self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
self._network_config = None
self._ephemeral_dhcp_ctx = None
self.failed_desired_api_version = False
@@ -312,13 +313,13 @@ class DataSourceAzure(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
- elif self.seed.lower() == 'imds':
- subplatform_type = 'imds'
+ if self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
+ elif self.seed.lower() == "imds":
+ subplatform_type = "imds"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
@azure_ds_telemetry_reporter
def crawl_metadata(self):
@@ -332,7 +333,7 @@ class DataSourceAzure(sources.DataSource):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
+ ddir = self.ds_cfg["data_dir"]
# The order in which the candidates are inserted matters here, because
# it determines the value of ret. More specifically, the first one in
@@ -346,25 +347,28 @@ class DataSourceAzure(sources.DataSource):
if os.path.isfile(REPROVISION_MARKER_FILE):
reprovision = True
metadata_source = "IMDS"
- report_diagnostic_event("Reprovision marker file already present "
- "before crawling Azure metadata: %s" %
- REPROVISION_MARKER_FILE,
- logger_func=LOG.debug)
+ report_diagnostic_event(
+ "Reprovision marker file already present "
+ "before crawling Azure metadata: %s" % REPROVISION_MARKER_FILE,
+ logger_func=LOG.debug,
+ )
elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE):
reprovision_after_nic_attach = True
metadata_source = "NIC_ATTACH_MARKER_PRESENT"
- report_diagnostic_event("Reprovision nic attach marker file "
- "already present before crawling Azure "
- "metadata: %s" %
- REPROVISION_NIC_ATTACH_MARKER_FILE,
- logger_func=LOG.debug)
+ report_diagnostic_event(
+ "Reprovision nic attach marker file "
+ "already present before crawling Azure "
+ "metadata: %s" % REPROVISION_NIC_ATTACH_MARKER_FILE,
+ logger_func=LOG.debug,
+ )
else:
for src in list_possible_azure_ds(self.seed_dir, ddir):
try:
if src.startswith("/dev/"):
if util.is_FreeBSD():
- ret = util.mount_cb(src, load_azure_ds_dir,
- mtype="udf")
+ ret = util.mount_cb(
+ src, load_azure_ds_dir, mtype="udf"
+ )
else:
ret = util.mount_cb(src, load_azure_ds_dir)
# save the device for ejection later
@@ -377,36 +381,33 @@ class DataSourceAzure(sources.DataSource):
except NonAzureDataSource:
report_diagnostic_event(
"Did not find Azure data source in %s" % src,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
continue
except util.MountFailedError:
report_diagnostic_event(
- '%s was not mountable' % src,
- logger_func=LOG.debug)
+ "%s was not mountable" % src, logger_func=LOG.debug
+ )
ovf_is_accessible = False
- empty_md = {'local-hostname': ''}
+ empty_md = {"local-hostname": ""}
empty_cfg = dict(
- system_info=dict(
- default_user=dict(
- name=''
- )
- )
+ system_info=dict(default_user=dict(name=""))
)
- ret = (empty_md, '', empty_cfg, {})
- metadata_source = 'IMDS'
+ ret = (empty_md, "", empty_cfg, {})
+ metadata_source = "IMDS"
continue
except BrokenAzureDataSource as exc:
- msg = 'BrokenAzureDataSource: %s' % exc
+ msg = "BrokenAzureDataSource: %s" % exc
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
report_diagnostic_event(
"Found provisioning metadata in %s" % metadata_source,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
+ self.fallback_interface, retries=10
)
# reset _fallback_interface so that if the code enters reprovisioning
@@ -414,16 +415,17 @@ class DataSourceAzure(sources.DataSource):
self._fallback_interface = None
if not imds_md and not ovf_is_accessible:
- msg = 'No OVF or IMDS available'
+ msg = "No OVF or IMDS available"
report_diagnostic_event(msg)
raise sources.InvalidMetaDataException(msg)
- perform_reprovision = (
- reprovision or
- self._should_reprovision(ret, imds_md))
+ perform_reprovision = reprovision or self._should_reprovision(
+ ret, imds_md
+ )
perform_reprovision_after_nic_attach = (
- reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret, imds_md))
+ reprovision_after_nic_attach
+ or self._should_reprovision_after_nic_attach(ret, imds_md)
+ )
if perform_reprovision or perform_reprovision_after_nic_attach:
if util.is_FreeBSD():
@@ -435,45 +437,50 @@ class DataSourceAzure(sources.DataSource):
ret = self._reprovision()
# fetch metadata again as it has changed after reprovisioning
imds_md = self.get_imds_data_with_api_fallback(
- self.fallback_interface,
- retries=10
+ self.fallback_interface, retries=10
)
(md, userdata_raw, cfg, files) = ret
self.seed = metadata_source
- crawled_data.update({
- 'cfg': cfg,
- 'files': files,
- 'metadata': util.mergemanydict(
- [md, {'imds': imds_md}]),
- 'userdata_raw': userdata_raw})
+ crawled_data.update(
+ {
+ "cfg": cfg,
+ "files": files,
+ "metadata": util.mergemanydict([md, {"imds": imds_md}]),
+ "userdata_raw": userdata_raw,
+ }
+ )
imds_username = _username_from_imds(imds_md)
imds_hostname = _hostname_from_imds(imds_md)
imds_disable_password = _disable_password_from_imds(imds_md)
if imds_username:
- LOG.debug('Username retrieved from IMDS: %s', imds_username)
- cfg['system_info']['default_user']['name'] = imds_username
+ LOG.debug("Username retrieved from IMDS: %s", imds_username)
+ cfg["system_info"]["default_user"]["name"] = imds_username
if imds_hostname:
- LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname)
- crawled_data['metadata']['local-hostname'] = imds_hostname
+ LOG.debug("Hostname retrieved from IMDS: %s", imds_hostname)
+ crawled_data["metadata"]["local-hostname"] = imds_hostname
if imds_disable_password:
LOG.debug(
- 'Disable password retrieved from IMDS: %s',
- imds_disable_password
+ "Disable password retrieved from IMDS: %s",
+ imds_disable_password,
)
- crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501
+ crawled_data["metadata"][
+ "disable_password"
+ ] = imds_disable_password
- if metadata_source == 'IMDS' and not crawled_data['files']:
+ if metadata_source == "IMDS" and not crawled_data["files"]:
try:
contents = build_minimal_ovf(
username=imds_username,
hostname=imds_hostname,
- disableSshPwd=imds_disable_password)
- crawled_data['files'] = {'ovf-env.xml': contents}
+ disableSshPwd=imds_disable_password,
+ )
+ crawled_data["files"] = {"ovf-env.xml": contents}
except Exception as e:
report_diagnostic_event(
"Failed to construct OVF from IMDS data %s" % e,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
# only use userdata from imds if OVF did not provide custom data
# userdata provided by IMDS is always base64 encoded
@@ -482,48 +489,53 @@ class DataSourceAzure(sources.DataSource):
if imds_userdata:
LOG.debug("Retrieved userdata from IMDS")
try:
- crawled_data['userdata_raw'] = base64.b64decode(
- ''.join(imds_userdata.split()))
+ crawled_data["userdata_raw"] = base64.b64decode(
+ "".join(imds_userdata.split())
+ )
except Exception:
report_diagnostic_event(
- "Bad userdata in IMDS",
- logger_func=LOG.warning)
+ "Bad userdata in IMDS", logger_func=LOG.warning
+ )
if not metadata_source:
- msg = 'No Azure metadata found'
+ msg = "No Azure metadata found"
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
else:
report_diagnostic_event(
- 'found datasource in %s' % metadata_source,
- logger_func=LOG.debug)
+ "found datasource in %s" % metadata_source,
+ logger_func=LOG.debug,
+ )
if metadata_source == ddir:
report_diagnostic_event(
- "using files cached in %s" % ddir, logger_func=LOG.debug)
+ "using files cached in %s" % ddir, logger_func=LOG.debug
+ )
seed = _get_random_seed()
if seed:
- crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = self._iid()
+ crawled_data["metadata"]["random_seed"] = seed
+ crawled_data["metadata"]["instance-id"] = self._iid()
if perform_reprovision or perform_reprovision_after_nic_attach:
LOG.info("Reporting ready to Azure after getting ReprovisionData")
- use_cached_ephemeral = (
- self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None))
+ use_cached_ephemeral = self.distro.networking.is_up(
+ self.fallback_interface
+ ) and getattr(self, "_ephemeral_dhcp_ctx", None)
if use_cached_ephemeral:
self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
else:
try:
with EphemeralDHCPv4WithReporting(
- azure_ds_reporter) as lease:
+ azure_ds_reporter
+ ) as lease:
self._report_ready(lease=lease)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
return crawled_data
@@ -559,19 +571,24 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
)
except Exception as e:
report_diagnostic_event(
- 'Could not crawl Azure metadata: %s' % e,
- logger_func=LOG.error)
+ "Could not crawl Azure metadata: %s" % e, logger_func=LOG.error
+ )
self._report_failure(
- description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
return False
- if (self.distro and self.distro.name == 'ubuntu' and
- self.ds_cfg.get('apply_network_config')):
+ if (
+ self.distro
+ and self.distro.name == "ubuntu"
+ and self.ds_cfg.get("apply_network_config")
+ ):
maybe_remove_ubuntu_network_config_scripts()
# Process crawled data and augment with various config defaults
@@ -584,21 +601,25 @@ class DataSourceAzure(sources.DataSource):
"Ephemeral resource disk '%s' exists. "
"Merging default Azure cloud ephemeral disk configs."
% devpath,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
self.cfg = util.mergemanydict(
- [crawled_data['cfg'], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG])
+ [crawled_data["cfg"], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG]
+ )
else:
report_diagnostic_event(
"Ephemeral resource disk '%s' does not exist. "
"Not merging default Azure cloud ephemeral disk configs."
% devpath,
- logger_func=LOG.debug)
- self.cfg = crawled_data['cfg']
+ logger_func=LOG.debug,
+ )
+ self.cfg = crawled_data["cfg"]
- self._metadata_imds = crawled_data['metadata']['imds']
+ self._metadata_imds = crawled_data["metadata"]["imds"]
self.metadata = util.mergemanydict(
- [crawled_data['metadata'], DEFAULT_METADATA])
- self.userdata_raw = crawled_data['userdata_raw']
+ [crawled_data["metadata"], DEFAULT_METADATA]
+ )
+ self.userdata_raw = crawled_data["userdata_raw"]
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -606,17 +627,19 @@ class DataSourceAzure(sources.DataSource):
# walinux agent writes files world readable, but expects
# the directory to be protected.
write_files(
- self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
+ self.ds_cfg["data_dir"], crawled_data["files"], dirmode=0o700
+ )
return True
@azure_ds_telemetry_reporter
def get_imds_data_with_api_fallback(
- self,
- fallback_nic,
- retries,
- md_type=metadata_type.all,
- exc_cb=retry_on_url_exc,
- infinite=False):
+ self,
+ fallback_nic,
+ retries,
+ md_type=metadata_type.all,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+ ):
"""
Wrapper for get_metadata_from_imds so that we can have flexibility
in which IMDS api-version we use. If a particular instance of IMDS
@@ -628,30 +651,23 @@ class DataSourceAzure(sources.DataSource):
if not self.failed_desired_api_version:
for _ in range(retries):
try:
- LOG.info(
- "Attempting IMDS api-version: %s",
- IMDS_VER_WANT
- )
+ LOG.info("Attempting IMDS api-version: %s", IMDS_VER_WANT)
return get_metadata_from_imds(
fallback_nic=fallback_nic,
retries=0,
md_type=md_type,
api_version=IMDS_VER_WANT,
- exc_cb=exc_cb
+ exc_cb=exc_cb,
)
except UrlError as err:
LOG.info(
- "UrlError with IMDS api-version: %s",
- IMDS_VER_WANT
+ "UrlError with IMDS api-version: %s", IMDS_VER_WANT
)
if err.code == 400:
log_msg = "Fall back to IMDS api-version: {}".format(
IMDS_VER_MIN
)
- report_diagnostic_event(
- log_msg,
- logger_func=LOG.info
- )
+ report_diagnostic_event(log_msg, logger_func=LOG.info)
self.failed_desired_api_version = True
break
@@ -662,11 +678,11 @@ class DataSourceAzure(sources.DataSource):
md_type=md_type,
api_version=IMDS_VER_MIN,
exc_cb=exc_cb,
- infinite=infinite
+ infinite=infinite,
)
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
@azure_ds_telemetry_reporter
def get_public_ssh_keys(self):
@@ -687,15 +703,16 @@ class DataSourceAzure(sources.DataSource):
OVF as a second option for environments that don't have IMDS.
"""
- LOG.debug('Retrieving public SSH keys')
+ LOG.debug("Retrieving public SSH keys")
ssh_keys = []
keys_from_imds = True
- LOG.debug('Attempting to get SSH keys from IMDS')
+ LOG.debug("Attempting to get SSH keys from IMDS")
try:
ssh_keys = [
- public_key['keyData']
- for public_key
- in self.metadata['imds']['compute']['publicKeys']
+ public_key["keyData"]
+ for public_key in self.metadata["imds"]["compute"][
+ "publicKeys"
+ ]
]
for key in ssh_keys:
if not _key_is_openssh_formatted(key=key):
@@ -703,33 +720,28 @@ class DataSourceAzure(sources.DataSource):
break
if not keys_from_imds:
- log_msg = 'Keys not in OpenSSH format, using OVF'
+ log_msg = "Keys not in OpenSSH format, using OVF"
else:
- log_msg = 'Retrieved {} keys from IMDS'.format(
- len(ssh_keys)
- if ssh_keys is not None
- else 0
+ log_msg = "Retrieved {} keys from IMDS".format(
+ len(ssh_keys) if ssh_keys is not None else 0
)
except KeyError:
- log_msg = 'Unable to get keys from IMDS, falling back to OVF'
+ log_msg = "Unable to get keys from IMDS, falling back to OVF"
keys_from_imds = False
finally:
report_diagnostic_event(log_msg, logger_func=LOG.debug)
if not keys_from_imds:
- LOG.debug('Attempting to get SSH keys from OVF')
+ LOG.debug("Attempting to get SSH keys from OVF")
try:
- ssh_keys = self.metadata['public-keys']
- log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys))
+ ssh_keys = self.metadata["public-keys"]
+ log_msg = "Retrieved {} keys from OVF".format(len(ssh_keys))
except KeyError:
- log_msg = 'No keys available from OVF'
+ log_msg = "No keys available from OVF"
finally:
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- return SSHKeys(
- keys_from_imds=keys_from_imds,
- ssh_keys=ssh_keys
- )
+ return SSHKeys(keys_from_imds=keys_from_imds, ssh_keys=ssh_keys)
def get_config_obj(self):
return self.cfg
@@ -740,12 +752,13 @@ class DataSourceAzure(sources.DataSource):
def _iid(self, previous=None):
prev_iid_path = os.path.join(
- self.paths.get_cpath('data'), 'instance-id')
+ self.paths.get_cpath("data"), "instance-id"
+ )
# Older kernels than 4.15 will have UPPERCASE product_uuid.
# We don't want Azure to react to an UPPER/lower difference as a new
# instance id as it rewrites SSH host keys.
# LP: #1835584
- iid = dmi.read_dmi_data('system-uuid').lower()
+ iid = dmi.read_dmi_data("system-uuid").lower()
if os.path.exists(prev_iid_path):
previous = util.load_file(prev_iid_path).strip()
if previous.lower() == iid:
@@ -759,22 +772,26 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def setup(self, is_new_instance):
if self._negotiated is False:
- LOG.debug("negotiating for %s (new_instance=%s)",
- self.get_instance_id(), is_new_instance)
+ LOG.debug(
+ "negotiating for %s (new_instance=%s)",
+ self.get_instance_id(),
+ is_new_instance,
+ )
fabric_data = self._negotiate()
LOG.debug("negotiating returned %s", fabric_data)
if fabric_data:
self.metadata.update(fabric_data)
self._negotiated = True
else:
- LOG.debug("negotiating already done for %s",
- self.get_instance_id())
+ LOG.debug(
+ "negotiating already done for %s", self.get_instance_id()
+ )
@azure_ds_telemetry_reporter
def _wait_for_nic_detach(self, nl_sock):
"""Use the netlink socket provided to wait for nic detach event.
- NOTE: The function doesn't close the socket. The caller owns closing
- the socket and disposing it safely.
+ NOTE: The function doesn't close the socket. The caller owns closing
+ the socket and disposing it safely.
"""
try:
ifname = None
@@ -782,21 +799,27 @@ class DataSourceAzure(sources.DataSource):
# Preprovisioned VM will only have one NIC, and it gets
# detached immediately after deployment.
with events.ReportEventStack(
- name="wait-for-nic-detach",
- description=("wait for nic detach"),
- parent=azure_ds_reporter):
+ name="wait-for-nic-detach",
+ description="wait for nic detach",
+ parent=azure_ds_reporter,
+ ):
ifname = netlink.wait_for_nic_detach_event(nl_sock)
if ifname is None:
- msg = ("Preprovisioned nic not detached as expected. "
- "Proceeding without failing.")
+ msg = (
+ "Preprovisioned nic not detached as expected. "
+ "Proceeding without failing."
+ )
report_diagnostic_event(msg, logger_func=LOG.warning)
else:
- report_diagnostic_event("The preprovisioned nic %s is detached"
- % ifname, logger_func=LOG.warning)
+ report_diagnostic_event(
+ "The preprovisioned nic %s is detached" % ifname,
+ logger_func=LOG.warning,
+ )
path = REPROVISION_NIC_DETACHED_MARKER_FILE
LOG.info("Creating a marker file for nic detached: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
except AssertionError as error:
report_diagnostic_event(error, logger_func=LOG.error)
raise
@@ -804,14 +827,15 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def wait_for_link_up(self, ifname):
"""In cases where the link state is still showing down after a nic is
- hot-attached, we can attempt to bring it up by forcing the hv_netvsc
- drivers to query the link state by unbinding and then binding the
- device. This function attempts infinitely until the link is up,
- because we cannot proceed further until we have a stable link."""
+ hot-attached, we can attempt to bring it up by forcing the hv_netvsc
+ drivers to query the link state by unbinding and then binding the
+ device. This function attempts infinitely until the link is up,
+ because we cannot proceed further until we have a stable link."""
if self.distro.networking.try_set_link_up(ifname):
- report_diagnostic_event("The link %s is already up." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "The link %s is already up." % ifname, logger_func=LOG.info
+ )
return
LOG.debug("Attempting to bring %s up", ifname)
@@ -820,22 +844,27 @@ class DataSourceAzure(sources.DataSource):
LOG.info("Unbinding and binding the interface %s", ifname)
while True:
- devicename = net.read_sys_net(ifname,
- 'device/device_id').strip('{}')
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/unbind',
- devicename)
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/bind',
- devicename)
+ devicename = net.read_sys_net(ifname, "device/device_id").strip(
+ "{}"
+ )
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/unbind", devicename
+ )
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/bind", devicename
+ )
attempts = attempts + 1
if self.distro.networking.try_set_link_up(ifname):
- msg = "The link %s is up after %s attempts" % (ifname,
- attempts)
+ msg = "The link %s is up after %s attempts" % (
+ ifname,
+ attempts,
+ )
report_diagnostic_event(msg, logger_func=LOG.info)
return
if attempts % 10 == 0:
- msg = ("Link is not up after %d attempts to rebind" % attempts)
+ msg = "Link is not up after %d attempts to rebind" % attempts
report_diagnostic_event(msg, logger_func=LOG.info)
LOG.info(msg)
@@ -844,13 +873,17 @@ class DataSourceAzure(sources.DataSource):
# again.
sleep_duration = 0.5
max_status_polls = 20
- LOG.debug("Polling %d seconds for primary NIC link up after "
- "rebind.", sleep_duration * max_status_polls)
+ LOG.debug(
+ "Polling %d seconds for primary NIC link up after rebind.",
+ sleep_duration * max_status_polls,
+ )
for i in range(0, max_status_polls):
if self.distro.networking.is_up(ifname):
- msg = ("After %d attempts to rebind, link is up after "
- "polling the link status %d times" % (attempts, i))
+ msg = (
+ "After %d attempts to rebind, link is up after "
+ "polling the link status %d times" % (attempts, i)
+ )
report_diagnostic_event(msg, logger_func=LOG.info)
LOG.debug(msg)
return
@@ -860,40 +893,47 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
path = REPORTED_READY_MARKER_FILE
- LOG.info(
- "Creating a marker file to report ready: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info("Creating a marker file to report ready: %s", path)
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
report_diagnostic_event(
- 'Successfully created reported ready marker file '
- 'while in the preprovisioning pool.',
- logger_func=LOG.debug)
+ "Successfully created reported ready marker file "
+ "while in the preprovisioning pool.",
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
def _report_ready_if_needed(self):
"""Report ready to the platform if the marker file is not present,
and create the marker file.
"""
- have_not_reported_ready = (
- not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ have_not_reported_ready = not os.path.isfile(
+ REPORTED_READY_MARKER_FILE
+ )
if have_not_reported_ready:
- report_diagnostic_event("Reporting ready before nic detach",
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Reporting ready before nic detach", logger_func=LOG.info
+ )
try:
with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
self._report_ready(lease=lease)
except Exception as e:
- report_diagnostic_event("Exception reporting ready during "
- "preprovisioning before nic detach: %s"
- % e, logger_func=LOG.error)
+ report_diagnostic_event(
+ "Exception reporting ready during "
+ "preprovisioning before nic detach: %s" % e,
+ logger_func=LOG.error,
+ )
raise
self._create_report_ready_marker()
else:
- report_diagnostic_event("Already reported ready before nic detach."
- " The marker file already exists: %s" %
- REPORTED_READY_MARKER_FILE,
- logger_func=LOG.error)
+ report_diagnostic_event(
+ "Already reported ready before nic detach."
+ " The marker file already exists: %s"
+ % REPORTED_READY_MARKER_FILE,
+ logger_func=LOG.error,
+ )
@azure_ds_telemetry_reporter
def _check_if_nic_is_primary(self, ifname):
@@ -915,20 +955,26 @@ class DataSourceAzure(sources.DataSource):
# the primary NIC.
try:
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description=("obtain dhcp lease for %s when attempting to "
- "determine primary NIC during reprovision of "
- "a pre-provisioned VM" % ifname),
- parent=azure_ds_reporter):
+ name="obtain-dhcp-lease",
+ description=(
+ "obtain dhcp lease for %s when attempting to "
+ "determine primary NIC during reprovision of "
+ "a pre-provisioned VM"
+ )
+ % ifname,
+ parent=azure_ds_reporter,
+ ):
dhcp_ctx = EphemeralDHCPv4(
- iface=ifname,
- dhcp_log_func=dhcp_log_cb)
+ iface=ifname, dhcp_log_func=dhcp_log_cb
+ )
dhcp_ctx.obtain_lease()
except Exception as e:
- report_diagnostic_event("Giving up. Failed to obtain dhcp lease "
- "for %s when attempting to determine "
- "primary NIC during reprovision due to %s"
- % (ifname, e), logger_func=LOG.error)
+ report_diagnostic_event(
+ "Giving up. Failed to obtain dhcp lease "
+ "for %s when attempting to determine "
+ "primary NIC during reprovision due to %s" % (ifname, e),
+ logger_func=LOG.error,
+ )
raise
# Retry polling network metadata for a limited duration only when the
@@ -953,13 +999,15 @@ class DataSourceAzure(sources.DataSource):
report_diagnostic_event(
"Ran into exception when attempting to reach %s "
"after %d polls." % (msg, metadata_poll_count),
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
if isinstance(exc, UrlError):
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exc.cause, exc.code),
- logger_func=LOG.error)
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exc.cause, exc.code),
+ logger_func=LOG.error,
+ )
# Retry up to a certain limit for both timeout and network
# unreachable errors.
@@ -967,7 +1015,7 @@ class DataSourceAzure(sources.DataSource):
exc.cause, (requests.Timeout, requests.ConnectionError)
):
expected_errors_count = expected_errors_count + 1
- return (expected_errors_count <= 10)
+ return expected_errors_count <= 10
return True
# Primary nic detection will be optimized in the future. The fact that
@@ -975,17 +1023,16 @@ class DataSourceAzure(sources.DataSource):
# could add several seconds of delay.
try:
imds_md = self.get_imds_data_with_api_fallback(
- ifname,
- 0,
- metadata_type.network,
- network_metadata_exc_cb,
- True
+ ifname, 0, metadata_type.network, network_metadata_exc_cb, True
)
except Exception as e:
LOG.warning(
"Failed to get network metadata using nic %s. Attempt to "
"contact IMDS failed with error %s. Assuming this is not the "
- "primary nic.", ifname, e)
+ "primary nic.",
+ ifname,
+ e,
+ )
finally:
# If we are not the primary nic, then clean the dhcp context.
if imds_md is None:
@@ -1000,10 +1047,11 @@ class DataSourceAzure(sources.DataSource):
self._ephemeral_dhcp_ctx = dhcp_ctx
# Set the expected nic count based on the response received.
- expected_nic_count = len(
- imds_md['interface'])
- report_diagnostic_event("Expected nic count: %d" %
- expected_nic_count, logger_func=LOG.info)
+ expected_nic_count = len(imds_md["interface"])
+ report_diagnostic_event(
+ "Expected nic count: %d" % expected_nic_count,
+ logger_func=LOG.info,
+ )
return is_primary, expected_nic_count
@@ -1028,17 +1076,22 @@ class DataSourceAzure(sources.DataSource):
while True:
ifname = None
with events.ReportEventStack(
- name="wait-for-nic-attach",
- description=("wait for nic attach after %d nics have "
- "been attached" % len(nics_found)),
- parent=azure_ds_reporter):
- ifname = netlink.wait_for_nic_attach_event(nl_sock,
- nics_found)
+ name="wait-for-nic-attach",
+ description=(
+ "wait for nic attach after %d nics have been attached"
+ % len(nics_found)
+ ),
+ parent=azure_ds_reporter,
+ ):
+ ifname = netlink.wait_for_nic_attach_event(
+ nl_sock, nics_found
+ )
# wait_for_nic_attach_event guarantees that ifname it not None
nics_found.append(ifname)
- report_diagnostic_event("Detected nic %s attached." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Detected nic %s attached." % ifname, logger_func=LOG.info
+ )
# Attempt to bring the interface's operating state to
# UP in case it is not already.
@@ -1048,14 +1101,17 @@ class DataSourceAzure(sources.DataSource):
# platform will attach the primary nic first so we
# won't be in primary_nic_found = false state for long.
if not primary_nic_found:
- LOG.info("Checking if %s is the primary nic",
- ifname)
- (primary_nic_found, expected_nic_count) = (
- self._check_if_nic_is_primary(ifname))
+ LOG.info("Checking if %s is the primary nic", ifname)
+ (
+ primary_nic_found,
+ expected_nic_count,
+ ) = self._check_if_nic_is_primary(ifname)
# Exit criteria: check if we've discovered all nics
- if (expected_nic_count != -1
- and len(nics_found) >= expected_nic_count):
+ if (
+ expected_nic_count != -1
+ and len(nics_found) >= expected_nic_count
+ ):
LOG.info("Found all the nics for this VM.")
break
@@ -1065,9 +1121,9 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def _wait_for_all_nics_ready(self):
"""Wait for nic(s) to be hot-attached. There may be multiple nics
- depending on the customer request.
- But only primary nic would be able to communicate with wireserver
- and IMDS. So we detect and save the primary nic to be used later.
+ depending on the customer request.
+ But only primary nic would be able to communicate with wireserver
+ and IMDS. So we detect and save the primary nic to be used later.
"""
nl_sock = None
@@ -1075,7 +1131,8 @@ class DataSourceAzure(sources.DataSource):
nl_sock = netlink.create_bound_netlink_socket()
report_ready_marker_present = bool(
- os.path.isfile(REPORTED_READY_MARKER_FILE))
+ os.path.isfile(REPORTED_READY_MARKER_FILE)
+ )
# Report ready if the marker file is not already present.
# The nic of the preprovisioned vm gets hot-detached as soon as
@@ -1083,7 +1140,8 @@ class DataSourceAzure(sources.DataSource):
self._report_ready_if_needed()
has_nic_been_detached = bool(
- os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE))
+ os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE)
+ )
if not has_nic_been_detached:
LOG.info("NIC has not been detached yet.")
@@ -1097,12 +1155,14 @@ class DataSourceAzure(sources.DataSource):
if not self.fallback_interface:
self._wait_for_hot_attached_nics(nl_sock)
else:
- report_diagnostic_event("Skipping waiting for nic attach "
- "because we already have a fallback "
- "interface. Report Ready marker "
- "present before detaching nics: %s" %
- report_ready_marker_present,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Skipping waiting for nic attach "
+ "because we already have a fallback "
+ "interface. Report Ready marker "
+ "present before detaching nics: %s"
+ % report_ready_marker_present,
+ logger_func=LOG.info,
+ )
except netlink.NetlinkCreateSocketError as e:
report_diagnostic_event(e, logger_func=LOG.warning)
raise
@@ -1115,8 +1175,7 @@ class DataSourceAzure(sources.DataSource):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
url = "{}?api-version={}".format(
- metadata_type.reprovisiondata.value,
- IMDS_VER_MIN
+ metadata_type.reprovisiondata.value, IMDS_VER_MIN
)
headers = {"Metadata": "true"}
nl_sock = None
@@ -1133,38 +1192,44 @@ class DataSourceAzure(sources.DataSource):
if self.imds_poll_counter == self.imds_logging_threshold:
# Reducing the logging frequency as we are polling IMDS
self.imds_logging_threshold *= 2
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d",
- self.imds_logging_threshold)
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.debug)
+ LOG.debug(
+ "Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold,
+ )
+ report_diagnostic_event(
+ "poll IMDS with %s failed. "
+ "Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.debug,
+ )
self.imds_poll_counter += 1
return True
else:
# If we get an exception while trying to call IMDS, we call
# DHCP and setup the ephemeral network to acquire a new IP.
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.warning,
+ )
return False
report_diagnostic_event(
- "poll IMDS failed with an "
- "unexpected exception: %s" % exception,
- logger_func=LOG.warning)
+ "poll IMDS failed with an unexpected exception: %s"
+ % exception,
+ logger_func=LOG.warning,
+ )
return False
# When the interface is hot-attached, we would have already
# done dhcp and set the dhcp context. In that case, skip
# the attempt to do dhcp.
is_ephemeral_ctx_present = self._ephemeral_dhcp_ctx is not None
- msg = ("Unexpected error. Dhcp context is not expected to be already "
- "set when we need to wait for vnet switch")
+ msg = (
+ "Unexpected error. Dhcp context is not expected to be already "
+ "set when we need to wait for vnet switch"
+ )
if is_ephemeral_ctx_present and report_ready:
report_diagnostic_event(msg, logger_func=LOG.error)
raise RuntimeError(msg)
@@ -1178,11 +1243,13 @@ class DataSourceAzure(sources.DataSource):
# Save our EphemeralDHCPv4 context to avoid repeated dhcp
# later when we report ready
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=azure_ds_reporter):
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=azure_ds_reporter,
+ ):
self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
- dhcp_log_func=dhcp_log_cb)
+ dhcp_log_func=dhcp_log_cb
+ )
lease = self._ephemeral_dhcp_ctx.obtain_lease()
if vnet_switched:
@@ -1192,15 +1259,18 @@ class DataSourceAzure(sources.DataSource):
nl_sock = netlink.create_bound_netlink_socket()
except netlink.NetlinkCreateSocketError as e:
report_diagnostic_event(
- 'Failed to create bound netlink socket: %s' % e,
- logger_func=LOG.warning)
+ "Failed to create bound netlink socket: %s" % e,
+ logger_func=LOG.warning,
+ )
self._ephemeral_dhcp_ctx.clean_network()
break
report_ready_succeeded = self._report_ready(lease=lease)
if not report_ready_succeeded:
- msg = ('Failed reporting ready while in '
- 'the preprovisioning pool.')
+ msg = (
+ "Failed reporting ready while in "
+ "the preprovisioning pool."
+ )
report_diagnostic_event(msg, logger_func=LOG.error)
self._ephemeral_dhcp_ctx.clean_network()
raise sources.InvalidMetaDataException(msg)
@@ -1210,31 +1280,37 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("Wait for vnetswitch to happen")
with events.ReportEventStack(
- name="wait-for-media-disconnect-connect",
- description="wait for vnet switch",
- parent=azure_ds_reporter):
+ name="wait-for-media-disconnect-connect",
+ description="wait for vnet switch",
+ parent=azure_ds_reporter,
+ ):
try:
netlink.wait_for_media_disconnect_connect(
- nl_sock, lease['interface'])
+ nl_sock, lease["interface"]
+ )
except AssertionError as e:
report_diagnostic_event(
- 'Error while waiting for vnet switch: %s' % e,
- logger_func=LOG.error)
+ "Error while waiting for vnet switch: %s" % e,
+ logger_func=LOG.error,
+ )
break
vnet_switched = True
self._ephemeral_dhcp_ctx.clean_network()
else:
with events.ReportEventStack(
- name="get-reprovision-data-from-imds",
- description="get reprovision data from imds",
- parent=azure_ds_reporter):
- return_val = readurl(url,
- timeout=IMDS_TIMEOUT_IN_SECONDS,
- headers=headers,
- exception_cb=exc_cb,
- infinite=True,
- log_req_resp=False).contents
+ name="get-reprovision-data-from-imds",
+ description="get reprovision data from imds",
+ parent=azure_ds_reporter,
+ ):
+ return_val = readurl(
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ exception_cb=exc_cb,
+ infinite=True,
+ log_req_resp=False,
+ ).contents
break
except UrlError:
# Teardown our EphemeralDHCPv4 context on failure as we retry
@@ -1248,12 +1324,14 @@ class DataSourceAzure(sources.DataSource):
nl_sock.close()
if vnet_switched:
- report_diagnostic_event("attempted dhcp %d times after reuse" %
- dhcp_attempts,
- logger_func=LOG.debug)
- report_diagnostic_event("polled imds %d times after reuse" %
- self.imds_poll_counter,
- logger_func=LOG.debug)
+ report_diagnostic_event(
+ "attempted dhcp %d times after reuse" % dhcp_attempts,
+ logger_func=LOG.debug,
+ )
+ report_diagnostic_event(
+ "polled imds %d times after reuse" % self.imds_poll_counter,
+ logger_func=LOG.debug,
+ )
return return_val
@@ -1264,52 +1342,63 @@ class DataSourceAzure(sources.DataSource):
@param description: A description of the error encountered.
@return: The success status of sending the failure signal.
"""
- unknown_245_key = 'unknown-245'
+ unknown_245_key = "unknown-245"
try:
- if (self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None) and
- getattr(self._ephemeral_dhcp_ctx, 'lease', None) and
- unknown_245_key in self._ephemeral_dhcp_ctx.lease):
+ if (
+ self.distro.networking.is_up(self.fallback_interface)
+ and getattr(self, "_ephemeral_dhcp_ctx", None)
+ and getattr(self._ephemeral_dhcp_ctx, "lease", None)
+ and unknown_245_key in self._ephemeral_dhcp_ctx.lease
+ ):
report_diagnostic_event(
- 'Using cached ephemeral dhcp context '
- 'to report failure to Azure', logger_func=LOG.debug)
+ "Using cached ephemeral dhcp context "
+ "to report failure to Azure",
+ logger_func=LOG.debug,
+ )
report_failure_to_fabric(
dhcp_opts=self._ephemeral_dhcp_ctx.lease[unknown_245_key],
- description=description)
+ description=description,
+ )
self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using '
- 'cached ephemeral dhcp context: %s' % e,
- logger_func=LOG.error)
+ "Failed to report failure using "
+ "cached ephemeral dhcp context: %s" % e,
+ logger_func=LOG.error,
+ )
try:
report_diagnostic_event(
- 'Using new ephemeral dhcp to report failure to Azure',
- logger_func=LOG.debug)
+ "Using new ephemeral dhcp to report failure to Azure",
+ logger_func=LOG.debug,
+ )
with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
report_failure_to_fabric(
- dhcp_opts=lease[unknown_245_key],
- description=description)
+ dhcp_opts=lease[unknown_245_key], description=description
+ )
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using new ephemeral dhcp: %s' % e,
- logger_func=LOG.debug)
+ "Failed to report failure using new ephemeral dhcp: %s" % e,
+ logger_func=LOG.debug,
+ )
try:
report_diagnostic_event(
- 'Using fallback lease to report failure to Azure')
+ "Using fallback lease to report failure to Azure"
+ )
report_failure_to_fabric(
fallback_lease_file=self.dhclient_lease_file,
- description=description)
+ description=description,
+ )
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using fallback lease: %s' % e,
- logger_func=LOG.debug)
+ "Failed to report failure using fallback lease: %s" % e,
+ logger_func=LOG.debug,
+ )
return False
@@ -1320,27 +1409,33 @@ class DataSourceAzure(sources.DataSource):
@return: The success status of sending the ready signal.
"""
try:
- get_metadata_from_fabric(fallback_lease_file=None,
- dhcp_opts=lease['unknown-245'],
- iso_dev=self.iso_dev)
+ get_metadata_from_fabric(
+ fallback_lease_file=None,
+ dhcp_opts=lease["unknown-245"],
+ iso_dev=self.iso_dev,
+ )
return True
except Exception as e:
report_diagnostic_event(
"Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
+ "connectivity issues: %s" % e,
+ logger_func=LOG.warning,
+ )
return False
def _ppstype_from_imds(self, imds_md: dict = None) -> str:
try:
- return imds_md['extended']['compute']['ppsType']
+ return imds_md["extended"]["compute"]["ppsType"]
except Exception as e:
report_diagnostic_event(
- "Could not retrieve pps configuration from IMDS: %s" %
- e, logger_func=LOG.debug)
+ "Could not retrieve pps configuration from IMDS: %s" % e,
+ logger_func=LOG.debug,
+ )
return None
def _should_reprovision_after_nic_attach(
- self, ovf_md, imds_md=None) -> bool:
+ self, ovf_md, imds_md=None
+ ) -> bool:
"""Whether or not we should wait for nic attach and then poll
IMDS for reprovisioning data. Also sets a marker file to poll IMDS.
@@ -1360,14 +1455,19 @@ class DataSourceAzure(sources.DataSource):
return False
(_md, _userdata_raw, cfg, _files) = ovf_md
path = REPROVISION_NIC_ATTACH_MARKER_FILE
- if (cfg.get('PreprovisionedVMType', None) == "Savable" or
- self._ppstype_from_imds(imds_md) == "Savable" or
- os.path.isfile(path)):
+ if (
+ cfg.get("PreprovisionedVMType", None) == "Savable"
+ or self._ppstype_from_imds(imds_md) == "Savable"
+ or os.path.isfile(path)
+ ):
if not os.path.isfile(path):
- LOG.info("Creating a marker file to wait for nic attach: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info(
+ "Creating a marker file to wait for nic attach: %s", path
+ )
+ util.write_file(
+ path,
+ "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
+ )
return True
return False
@@ -1386,15 +1486,18 @@ class DataSourceAzure(sources.DataSource):
return False
(_md, _userdata_raw, cfg, _files) = ovf_md
path = REPROVISION_MARKER_FILE
- if (cfg.get('PreprovisionedVm') is True or
- cfg.get('PreprovisionedVMType', None) == 'Running' or
- self._ppstype_from_imds(imds_md) == "Running" or
- os.path.isfile(path)):
+ if (
+ cfg.get("PreprovisionedVm") is True
+ or cfg.get("PreprovisionedVMType", None) == "Running"
+ or self._ppstype_from_imds(imds_md) == "Running"
+ or os.path.isfile(path)
+ ):
if not os.path.isfile(path):
- LOG.info("Creating a marker file to poll imds: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info("Creating a marker file to poll imds: %s", path)
+ util.write_file(
+ path,
+ "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
+ )
return True
return False
@@ -1402,35 +1505,35 @@ class DataSourceAzure(sources.DataSource):
"""Initiate the reprovisioning workflow."""
contents = self._poll_imds()
with events.ReportEventStack(
- name="reprovisioning-read-azure-ovf",
- description="read azure ovf during reprovisioning",
- parent=azure_ds_reporter):
+ name="reprovisioning-read-azure-ovf",
+ description="read azure ovf during reprovisioning",
+ parent=azure_ds_reporter,
+ ):
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
def _negotiate(self):
"""Negotiate with fabric and return data from it.
- On success, returns a dictionary including 'public_keys'.
- On failure, returns False.
+ On success, returns a dictionary including 'public_keys'.
+ On failure, returns False.
"""
pubkey_info = None
ssh_keys_and_source = self._get_public_ssh_keys_and_source()
if not ssh_keys_and_source.keys_from_imds:
- pubkey_info = self.cfg.get('_pubkeys', None)
- log_msg = 'Retrieved {} fingerprints from OVF'.format(
- len(pubkey_info)
- if pubkey_info is not None
- else 0
+ pubkey_info = self.cfg.get("_pubkeys", None)
+ log_msg = "Retrieved {} fingerprints from OVF".format(
+ len(pubkey_info) if pubkey_info is not None else 0
)
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- metadata_func = partial(get_metadata_from_fabric,
- fallback_lease_file=self.
- dhclient_lease_file,
- pubkey_info=pubkey_info)
+ metadata_func = partial(
+ get_metadata_from_fabric,
+ fallback_lease_file=self.dhclient_lease_file,
+ pubkey_info=pubkey_info,
+ )
LOG.debug("negotiating with fabric")
try:
@@ -1438,7 +1541,9 @@ class DataSourceAzure(sources.DataSource):
except Exception as e:
report_diagnostic_event(
"Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
+ "connectivity issues: %s" % e,
+ logger_func=LOG.warning,
+ )
return False
util.del_file(REPORTED_READY_MARKER_FILE)
@@ -1450,30 +1555,34 @@ class DataSourceAzure(sources.DataSource):
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
try:
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ address_ephemeral_resize(
+ is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(DS_CFG_KEY_PRESERVE_NTFS, False),
+ )
finally:
- push_log_to_kvp(self.sys_cfg['def_log_file'])
+ push_log_to_kvp(self.sys_cfg["def_log_file"])
return
@property
def availability_zone(self):
- return self.metadata.get(
- 'imds', {}).get('compute', {}).get('platformFaultDomain')
+ return (
+ self.metadata.get("imds", {})
+ .get("compute", {})
+ .get("platformFaultDomain")
+ )
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following exceptions.
+ the following exceptions.
- 1. Probe the drivers of the net-devices present and inject them in
- the network configuration under params: driver: <driver> value
- 2. Generate a fallback network config that does not include any of
- the blacklisted devices.
+ 1. Probe the drivers of the net-devices present and inject them in
+ the network configuration under params: driver: <driver> value
+ 2. Generate a fallback network config that does not include any of
+ the blacklisted devices.
"""
if not self._network_config or self._network_config == sources.UNSET:
- if self.ds_cfg.get('apply_network_config'):
+ if self.ds_cfg.get("apply_network_config"):
nc_src = self._metadata_imds
else:
nc_src = None
@@ -1482,33 +1591,36 @@ class DataSourceAzure(sources.DataSource):
@property
def region(self):
- return self.metadata.get('imds', {}).get('compute', {}).get('location')
+ return self.metadata.get("imds", {}).get("compute", {}).get("location")
def _username_from_imds(imds_data):
try:
- return imds_data['compute']['osProfile']['adminUsername']
+ return imds_data["compute"]["osProfile"]["adminUsername"]
except KeyError:
return None
def _userdata_from_imds(imds_data):
try:
- return imds_data['compute']['userData']
+ return imds_data["compute"]["userData"]
except KeyError:
return None
def _hostname_from_imds(imds_data):
try:
- return imds_data['compute']['osProfile']['computerName']
+ return imds_data["compute"]["osProfile"]["computerName"]
except KeyError:
return None
def _disable_password_from_imds(imds_data):
try:
- return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501
+ return (
+ imds_data["compute"]["osProfile"]["disablePasswordAuthentication"]
+ == "true"
+ )
except KeyError:
return None
@@ -1518,7 +1630,7 @@ def _key_is_openssh_formatted(key):
Validate whether or not the key is OpenSSH-formatted.
"""
# See https://bugs.launchpad.net/cloud-init/+bug/1910835
- if '\r\n' in key.strip():
+ if "\r\n" in key.strip():
return False
parser = ssh_util.AuthKeyLineParser()
@@ -1546,7 +1658,7 @@ def _partitions_on_device(devpath, maxnum=16):
@azure_ds_telemetry_reporter
def _has_ntfs_filesystem(devpath):
ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
- LOG.debug('ntfs_devices found = %s', ntfs_devices)
+ LOG.debug("ntfs_devices found = %s", ntfs_devices)
return os.path.realpath(devpath) in ntfs_devices
@@ -1570,24 +1682,29 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
If cloud-init cannot mount the disk to check for data, destruction
will be allowed, unless the dscfg key is set."""
if preserve_ntfs:
- msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
- (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
+ msg = "config says to never destroy NTFS (%s.%s), skipping checks" % (
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
return False, msg
if not os.path.exists(devpath):
- return False, 'device %s does not exist' % devpath
+ return False, "device %s does not exist" % devpath
- LOG.debug('Resolving realpath of %s -> %s', devpath,
- os.path.realpath(devpath))
+ LOG.debug(
+ "Resolving realpath of %s -> %s", devpath, os.path.realpath(devpath)
+ )
# devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
# where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
partitions = _partitions_on_device(devpath)
if len(partitions) == 0:
- return False, 'device %s was not partitioned' % devpath
+ return False, "device %s was not partitioned" % devpath
elif len(partitions) > 2:
- msg = ('device %s had 3 or more partitions: %s' %
- (devpath, ' '.join([p[1] for p in partitions])))
+ msg = "device %s had 3 or more partitions: %s" % (
+ devpath,
+ " ".join([p[1] for p in partitions]),
+ )
return False, msg
elif len(partitions) == 2:
cand_part, cand_path = partitions[1]
@@ -1595,57 +1712,78 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
cand_part, cand_path = partitions[0]
if not _has_ntfs_filesystem(cand_path):
- msg = ('partition %s (%s) on device %s was not ntfs formatted' %
- (cand_part, cand_path, devpath))
+ msg = "partition %s (%s) on device %s was not ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
return False, msg
@azure_ds_telemetry_reporter
def count_files(mp):
- ignored = set(['dataloss_warning_readme.txt'])
+ ignored = set(["dataloss_warning_readme.txt"])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
- bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
- (cand_part, cand_path, devpath))
+ bmsg = "partition %s (%s) on device %s was ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
with events.ReportEventStack(
name="mount-ntfs-and-count",
description="mount-ntfs-and-count",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
try:
- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
- update_env_for_mount={'LANG': 'C'})
+ file_count = util.mount_cb(
+ cand_path,
+ count_files,
+ mtype="ntfs",
+ update_env_for_mount={"LANG": "C"},
+ )
except util.MountFailedError as e:
evt.description = "cannot mount ntfs"
if "unknown filesystem type 'ntfs'" in str(e):
- return True, (bmsg + ' but this system cannot mount NTFS,'
- ' assuming there are no important files.'
- ' Formatting allowed.')
- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+ return (
+ True,
+ (
+ bmsg + " but this system cannot mount NTFS,"
+ " assuming there are no important files."
+ " Formatting allowed."
+ ),
+ )
+ return False, bmsg + " but mount of %s failed: %s" % (cand_part, e)
if file_count != 0:
evt.description = "mounted and counted %d files" % file_count
- LOG.warning("it looks like you're using NTFS on the ephemeral"
- " disk, to ensure that filesystem does not get wiped,"
- " set %s.%s in config", '.'.join(DS_CFG_PATH),
- DS_CFG_KEY_PRESERVE_NTFS)
- return False, bmsg + ' but had %d files on it.' % file_count
+ LOG.warning(
+ "it looks like you're using NTFS on the ephemeral"
+ " disk, to ensure that filesystem does not get wiped,"
+ " set %s.%s in config",
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
+ return False, bmsg + " but had %d files on it." % file_count
- return True, bmsg + ' and had no important files. Safe for reformatting.'
+ return True, bmsg + " and had no important files. Safe for reformatting."
@azure_ds_telemetry_reporter
-def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH,
- is_new_instance=False, preserve_ntfs=False):
+def address_ephemeral_resize(
+ devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False
+):
if not os.path.exists(devpath):
report_diagnostic_event(
"Ephemeral resource disk '%s' does not exist." % devpath,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
return
else:
report_diagnostic_event(
"Ephemeral resource disk '%s' exists." % devpath,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
result = False
msg = None
@@ -1658,31 +1796,32 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH,
if not result:
return
- for mod in ['disk_setup', 'mounts']:
- sempath = '/var/lib/cloud/instance/sem/config_' + mod
+ for mod in ["disk_setup", "mounts"]:
+ sempath = "/var/lib/cloud/instance/sem/config_" + mod
bmsg = 'Marker "%s" for module "%s"' % (sempath, mod)
if os.path.exists(sempath):
try:
os.unlink(sempath)
- LOG.debug('%s removed.', bmsg)
+ LOG.debug("%s removed.", bmsg)
except Exception as e:
# python3 throws FileNotFoundError, python2 throws OSError
- LOG.warning('%s: remove failed! (%s)', bmsg, e)
+ LOG.warning("%s: remove failed! (%s)", bmsg, e)
else:
- LOG.debug('%s did not exist.', bmsg)
+ LOG.debug("%s did not exist.", bmsg)
return
@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
-
def _redact_password(cnt, fname):
"""Azure provides the UserPassword in plain text. So we redact it"""
try:
root = ET.fromstring(cnt)
for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
+ if (
+ "UserPassword" in elem.tag
+ and elem.text != DEF_PASSWD_REDACTION
+ ):
elem.text = DEF_PASSWD_REDACTION
return ET.tostring(root)
except Exception:
@@ -1696,7 +1835,7 @@ def write_files(datadir, files, dirmode=None):
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
+ if "ovf-env.xml" in name:
content = _redact_password(content, fname)
util.write_file(filename=fname, content=content, mode=0o600)
@@ -1728,8 +1867,9 @@ def load_azure_ovf_pubkeys(sshnode):
if len(results) == 0:
return []
if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
+ raise BrokenAzureDataSource(
+ "Multiple 'PublicKeys'(%s) in SSH node" % len(results)
+ )
pubkeys_node = results[0]
pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
@@ -1744,7 +1884,7 @@ def load_azure_ovf_pubkeys(sshnode):
if not pk_node.hasChildNodes():
continue
- cur = {'fingerprint': "", 'path': "", 'value': ""}
+ cur = {"fingerprint": "", "path": "", "value": ""}
for child in pk_node.childNodes:
if child.nodeType == text_node or not child.localName:
continue
@@ -1754,8 +1894,10 @@ def load_azure_ovf_pubkeys(sshnode):
if name not in cur.keys():
continue
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
+ if (
+ len(child.childNodes) != 1
+ or child.childNodes[0].nodeType != text_node
+ ):
continue
cur[name] = child.childNodes[0].wholeText.strip()
@@ -1773,33 +1915,37 @@ def read_azure_ovf(contents):
report_diagnostic_event(error_str, logger_func=LOG.warning)
raise BrokenAzureDataSource(error_str) from e
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ results = find_child(
+ dom.documentElement, lambda n: n.localName == "ProvisioningSection"
+ )
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
+ raise BrokenAzureDataSource(
+ "found '%d' ProvisioningSection items" % len(results)
+ )
provSection = results[0]
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
+ lpcs_nodes = find_child(
+ provSection,
+ lambda n: n.localName == "LinuxProvisioningConfigurationSet",
+ )
if len(lpcs_nodes) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
if len(lpcs_nodes) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- (len(lpcs_nodes),
- "LinuxProvisioningConfigurationSet"))
+ raise BrokenAzureDataSource(
+ "found '%d' %ss"
+ % (len(lpcs_nodes), "LinuxProvisioningConfigurationSet")
+ )
lpcs = lpcs_nodes[0]
if not lpcs.hasChildNodes():
raise BrokenAzureDataSource("no child nodes of configuration set")
- md_props = 'seedfrom'
- md = {'azure_data': {}}
+ md_props = "seedfrom"
+ md = {"azure_data": {}}
cfg = {}
ud = ""
password = None
@@ -1813,8 +1959,10 @@ def read_azure_ovf(contents):
simple = False
value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
+ if (
+ len(child.childNodes) == 1
+ and child.childNodes[0].nodeType == dom.TEXT_NODE
+ ):
simple = True
value = child.childNodes[0].wholeText
@@ -1823,8 +1971,8 @@ def read_azure_ovf(contents):
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ ud = base64.b64decode("".join(value.split()))
else:
ud = value
elif name == "username":
@@ -1832,36 +1980,36 @@ def read_azure_ovf(contents):
elif name == "userpassword":
password = value
elif name == "hostname":
- md['local-hostname'] = value
+ md["local-hostname"] = value
elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ dscfg = base64.b64decode("".join(value.split()))
else:
dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
+ cfg["datasource"] = {DS_NAME: util.load_yaml(dscfg, default={})}
elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
+ cfg["_pubkeys"] = load_azure_ovf_pubkeys(child)
elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
+ cfg["ssh_pwauth"] = util.is_false(value)
elif simple:
if name in md_props:
md[name] = value
else:
- md['azure_data'][name] = value
+ md["azure_data"][name] = value
defuser = {}
if username:
- defuser['name'] = username
+ defuser["name"] = username
if password:
- defuser['lock_passwd'] = False
+ defuser["lock_passwd"] = False
if DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = cfg['password'] = encrypt_pass(password)
+ defuser["passwd"] = cfg["password"] = encrypt_pass(password)
if defuser:
- cfg['system_info'] = {'default_user': defuser}
+ cfg["system_info"] = {"default_user": defuser}
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
+ if "ssh_pwauth" not in cfg and password:
+ cfg["ssh_pwauth"] = True
preprovisioning_cfg = _get_preprovisioning_cfgs(dom)
cfg = util.mergemanydict([cfg, preprovisioning_cfg])
@@ -1887,20 +2035,18 @@ def _get_preprovisioning_cfgs(dom):
More specifically, this will never happen:
- PreprovisionedVm=True and PreprovisionedVMType=Savable
"""
- cfg = {
- "PreprovisionedVm": False,
- "PreprovisionedVMType": None
- }
+ cfg = {"PreprovisionedVm": False, "PreprovisionedVMType": None}
platform_settings_section = find_child(
- dom.documentElement,
- lambda n: n.localName == "PlatformSettingsSection")
+ dom.documentElement, lambda n: n.localName == "PlatformSettingsSection"
+ )
if not platform_settings_section or len(platform_settings_section) == 0:
LOG.debug("PlatformSettingsSection not found")
return cfg
platform_settings = find_child(
platform_settings_section[0],
- lambda n: n.localName == "PlatformSettings")
+ lambda n: n.localName == "PlatformSettings",
+ )
if not platform_settings or len(platform_settings) == 0:
LOG.debug("PlatformSettings not found")
return cfg
@@ -1909,10 +2055,12 @@ def _get_preprovisioning_cfgs(dom):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
cfg["PreprovisionedVm"] = _get_preprovisionedvm_cfg_value(
- platform_settings)
+ platform_settings
+ )
cfg["PreprovisionedVMType"] = _get_preprovisionedvmtype_cfg_value(
- platform_settings)
+ platform_settings
+ )
return cfg
@@ -1924,16 +2072,18 @@ def _get_preprovisionedvm_cfg_value(platform_settings):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
preprovisionedVmVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVm")
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVm"
+ )
if not preprovisionedVmVal or len(preprovisionedVmVal) == 0:
LOG.debug("PreprovisionedVm not found")
return preprovisionedVm
preprovisionedVm = util.translate_bool(
- preprovisionedVmVal[0].firstChild.nodeValue)
+ preprovisionedVmVal[0].firstChild.nodeValue
+ )
report_diagnostic_event(
- "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info)
+ "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info
+ )
return preprovisionedVm
@@ -1952,18 +2102,21 @@ def _get_preprovisionedvmtype_cfg_value(platform_settings):
# Once assigned to customer, the customer-requested nics are
# hot-attached to it and reprovision happens like today.
preprovisionedVMTypeVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVMType")
- if (not preprovisionedVMTypeVal or len(preprovisionedVMTypeVal) == 0 or
- preprovisionedVMTypeVal[0].firstChild is None):
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVMType"
+ )
+ if (
+ not preprovisionedVMTypeVal
+ or len(preprovisionedVMTypeVal) == 0
+ or preprovisionedVMTypeVal[0].firstChild is None
+ ):
LOG.debug("PreprovisionedVMType not found")
return preprovisionedVMType
preprovisionedVMType = preprovisionedVMTypeVal[0].firstChild.nodeValue
report_diagnostic_event(
- "PreprovisionedVMType: %s" % preprovisionedVMType,
- logger_func=LOG.info)
+ "PreprovisionedVMType: %s" % preprovisionedVMType, logger_func=LOG.info
+ )
return preprovisionedVMType
@@ -1987,7 +2140,7 @@ def _check_freebsd_cdrom(cdrom_dev):
@azure_ds_telemetry_reporter
def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
"""Return content random seed file if available, otherwise,
- return None."""
+ return None."""
# azure / hyper-v provides random data here
# now update ds_cfg to reflect contents pass in config
if source is None:
@@ -2034,7 +2187,7 @@ def load_azure_ds_dir(source_dir):
contents = fp.read()
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
@@ -2051,12 +2204,14 @@ def parse_network_config(imds_metadata) -> dict:
return _generate_network_config_from_imds_metadata(imds_metadata)
except Exception as e:
LOG.error(
- 'Failed generating network config '
- 'from IMDS network metadata: %s', str(e))
+ "Failed generating network config "
+ "from IMDS network metadata: %s",
+ str(e),
+ )
try:
return _generate_network_config_from_fallback_config()
except Exception as e:
- LOG.error('Failed generating fallback network config: %s', str(e))
+ LOG.error("Failed generating fallback network config: %s", str(e))
return {}
@@ -2068,57 +2223,60 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
@param: imds_metadata: Dict of content read from IMDS network service.
@return: Dictionary containing network version 2 standard configuration.
"""
- netconfig = {'version': 2, 'ethernets': {}}
- network_metadata = imds_metadata['network']
- for idx, intf in enumerate(network_metadata['interface']):
+ netconfig = {"version": 2, "ethernets": {}}
+ network_metadata = imds_metadata["network"]
+ for idx, intf in enumerate(network_metadata["interface"]):
has_ip_address = False
# First IPv4 and/or IPv6 address will be obtained via DHCP.
# Any additional IPs of each type will be set as static
# addresses.
- nicname = 'eth{idx}'.format(idx=idx)
- dhcp_override = {'route-metric': (idx + 1) * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False}
- for addr_type in ('ipv4', 'ipv6'):
- addresses = intf.get(addr_type, {}).get('ipAddress', [])
+ nicname = "eth{idx}".format(idx=idx)
+ dhcp_override = {"route-metric": (idx + 1) * 100}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ }
+ for addr_type in ("ipv4", "ipv6"):
+ addresses = intf.get(addr_type, {}).get("ipAddress", [])
# If there are no available IP addresses, then we don't
# want to add this interface to the generated config.
if not addresses:
continue
has_ip_address = True
- if addr_type == 'ipv4':
- default_prefix = '24'
+ if addr_type == "ipv4":
+ default_prefix = "24"
else:
- default_prefix = '128'
+ default_prefix = "128"
if addresses:
- dev_config['dhcp6'] = True
+ dev_config["dhcp6"] = True
# non-primary interfaces should have a higher
# route-metric (cost) so default routes prefer
# primary nic due to lower route-metric value
- dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config["dhcp6-overrides"] = dhcp_override
for addr in addresses[1:]:
# Append static address config for ip > 1
- netPrefix = intf[addr_type]['subnet'][0].get(
- 'prefix', default_prefix)
- privateIp = addr['privateIpAddress']
- if not dev_config.get('addresses'):
- dev_config['addresses'] = []
- dev_config['addresses'].append(
- '{ip}/{prefix}'.format(
- ip=privateIp, prefix=netPrefix))
+ netPrefix = intf[addr_type]["subnet"][0].get(
+ "prefix", default_prefix
+ )
+ privateIp = addr["privateIpAddress"]
+ if not dev_config.get("addresses"):
+ dev_config["addresses"] = []
+ dev_config["addresses"].append(
+ "{ip}/{prefix}".format(ip=privateIp, prefix=netPrefix)
+ )
if dev_config and has_ip_address:
- mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update({
- 'match': {'macaddress': mac.lower()},
- 'set-name': nicname
- })
+ mac = ":".join(re.findall(r"..", intf["macAddress"]))
+ dev_config.update(
+ {"match": {"macaddress": mac.lower()}, "set-name": nicname}
+ )
# With netvsc, we can get two interfaces that
# share the same MAC, so we need to make sure
# our match condition also contains the driver
driver = device_driver(nicname)
- if driver and driver == 'hv_netvsc':
- dev_config['match']['driver'] = driver
- netconfig['ethernets'][nicname] = dev_config
+ if driver and driver == "hv_netvsc":
+ dev_config["match"]["driver"] = driver
+ netconfig["ethernets"][nicname] = dev_config
return netconfig
@@ -2129,16 +2287,19 @@ def _generate_network_config_from_fallback_config() -> dict:
@return: Dictionary containing network version 2 standard configuration.
"""
return net.generate_fallback_config(
- blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True)
+ blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True
+ )
@azure_ds_telemetry_reporter
-def get_metadata_from_imds(fallback_nic,
- retries,
- md_type=metadata_type.all,
- api_version=IMDS_VER_MIN,
- exc_cb=retry_on_url_exc,
- infinite=False):
+def get_metadata_from_imds(
+ fallback_nic,
+ retries,
+ md_type=metadata_type.all,
+ api_version=IMDS_VER_MIN,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+):
"""Query Azure's instance metadata service, returning a dictionary.
If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
@@ -2154,31 +2315,34 @@ def get_metadata_from_imds(fallback_nic,
@return: A dict of instance metadata containing compute and network
info.
"""
- kwargs = {'logfunc': LOG.debug,
- 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
- 'func': _get_metadata_from_imds,
- 'args': (retries, exc_cb, md_type, api_version, infinite)}
+ kwargs = {
+ "logfunc": LOG.debug,
+ "msg": "Crawl of Azure Instance Metadata Service (IMDS)",
+ "func": _get_metadata_from_imds,
+ "args": (retries, exc_cb, md_type, api_version, infinite),
+ }
if net.is_up(fallback_nic):
return util.log_time(**kwargs)
else:
try:
- with EphemeralDHCPv4WithReporting(
- azure_ds_reporter, fallback_nic):
+ with EphemeralDHCPv4WithReporting(azure_ds_reporter, fallback_nic):
return util.log_time(**kwargs)
except Exception as e:
report_diagnostic_event(
"exception while getting metadata: %s" % e,
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
raise
@azure_ds_telemetry_reporter
def _get_metadata_from_imds(
- retries,
- exc_cb,
- md_type=metadata_type.all,
- api_version=IMDS_VER_MIN,
- infinite=False):
+ retries,
+ exc_cb,
+ md_type=metadata_type.all,
+ api_version=IMDS_VER_MIN,
+ infinite=False,
+):
url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
@@ -2188,20 +2352,27 @@ def _get_metadata_from_imds(
try:
response = readurl(
- url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
- retries=retries, exception_cb=exc_cb, infinite=infinite)
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ retries=retries,
+ exception_cb=exc_cb,
+ infinite=infinite,
+ )
except Exception as e:
# pylint:disable=no-member
if isinstance(e, UrlError) and e.code == 400:
raise
else:
report_diagnostic_event(
- 'Ignoring IMDS instance metadata. '
- 'Get metadata from IMDS failed: %s' % e,
- logger_func=LOG.warning)
+ "Ignoring IMDS instance metadata. "
+ "Get metadata from IMDS failed: %s" % e,
+ logger_func=LOG.warning,
+ )
return {}
try:
from json.decoder import JSONDecodeError
+
json_decode_error = JSONDecodeError
except ImportError:
json_decode_error = ValueError
@@ -2210,9 +2381,10 @@ def _get_metadata_from_imds(
return util.load_json(str(response))
except json_decode_error as e:
report_diagnostic_event(
- 'Ignoring non-json IMDS instance metadata response: %s. '
- 'Loading non-json IMDS response failed: %s' % (str(response), e),
- logger_func=LOG.warning)
+ "Ignoring non-json IMDS instance metadata response: %s. "
+ "Loading non-json IMDS response failed: %s" % (str(response), e),
+ logger_func=LOG.warning,
+ )
return {}
@@ -2242,10 +2414,11 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
if os.path.exists(path):
if not logged:
LOG.info(
- 'Removing Ubuntu extended network scripts because'
- ' cloud-init updates Azure network configuration on the'
- ' following events: %s.',
- [EventType.BOOT.value, EventType.BOOT_LEGACY.value])
+ "Removing Ubuntu extended network scripts because"
+ " cloud-init updates Azure network configuration on the"
+ " following events: %s.",
+ [EventType.BOOT.value, EventType.BOOT_LEGACY.value],
+ )
logged = True
if os.path.isdir(path):
util.del_dir(path)
@@ -2258,15 +2431,15 @@ def _is_platform_viable(seed_dir):
with events.ReportEventStack(
name="check-platform-viability",
description="found azure asset tag",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
evt.description = msg
report_diagnostic_event(msg, logger_func=LOG.debug)
- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ if os.path.exists(os.path.join(seed_dir, "ovf-env.xml")):
return True
return False
@@ -2284,7 +2457,7 @@ DataSourceAzureNet = DataSourceAzure
# Used to match classes to dependencies
datasources = [
- (DataSourceAzure, (sources.DEP_FILESYSTEM, )),
+ (DataSourceAzure, (sources.DEP_FILESYSTEM,)),
]
@@ -2292,4 +2465,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 63435279..426a762e 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -7,14 +7,12 @@
import errno
import json
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
class DataSourceBigstep(sources.DataSource):
- dsname = 'Bigstep'
+ dsname = "Bigstep"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -35,7 +33,7 @@ class DataSourceBigstep(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'metadata (%s)' % get_url_from_file()
+ return "metadata (%s)" % get_url_from_file()
def get_url_from_file():
@@ -61,4 +59,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index f63baf74..de71c3e9 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -4,14 +4,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from base64 import b64decode
import re
-
-from cloudinit.cs_utils import Cepko, SERIAL_PORT
+from base64 import b64decode
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit.cs_utils import SERIAL_PORT, Cepko
LOG = logging.getLogger(__name__)
@@ -24,11 +23,11 @@ class DataSourceCloudSigma(sources.DataSource):
http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
- dsname = 'CloudSigma'
+ dsname = "CloudSigma"
def __init__(self, sys_cfg, distro, paths):
self.cepko = Cepko()
- self.ssh_public_key = ''
+ self.ssh_public_key = ""
sources.DataSource.__init__(self, sys_cfg, distro, paths)
def is_running_in_cloudsigma(self):
@@ -43,7 +42,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.debug("system-product-name not available in dmi data")
return False
LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
+ return "cloudsigma" in sys_product_name.lower()
def _get_data(self):
"""
@@ -56,7 +55,7 @@ class DataSourceCloudSigma(sources.DataSource):
try:
server_context = self.cepko.all().result
- server_meta = server_context['meta']
+ server_meta = server_context["meta"]
except Exception:
# TODO: check for explicit "config on", and then warn
# but since no explicit config is available now, just debug.
@@ -64,41 +63,42 @@ class DataSourceCloudSigma(sources.DataSource):
return False
self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
+ [server_meta.get("cloudinit-dsmode")]
+ )
if dsmode == sources.DSMODE_DISABLED:
return False
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
+ base64_fields = server_meta.get("base64_fields", "").split(",")
+ self.userdata_raw = server_meta.get("cloudinit-user-data", "")
+ if "cloudinit-user-data" in base64_fields:
self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
+ if "cloudinit" in server_context.get("vendor_data", {}):
self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
+ self.ssh_public_key = server_meta["ssh_public_key"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'cepko (%s)' % SERIAL_PORT
+ return "cepko (%s)" % SERIAL_PORT
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""
Cleans up and uses the server's name if the latter is set. Otherwise
the first part from uuid is being used.
"""
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
+ if re.match(r"^[A-Za-z0-9 -_\.]+$", self.metadata["name"]):
+ return self.metadata["name"][:61]
else:
- return self.metadata['uuid'].split('-')[0]
+ return self.metadata["uuid"].split("-")[0]
def get_public_ssh_keys(self):
return [self.ssh_public_key]
def get_instance_id(self):
- return self.metadata['uuid']
+ return self.metadata["uuid"]
# Legacy: Must be present in case we load an old pkl object
@@ -107,7 +107,7 @@ DataSourceCloudSigmaNet = DataSourceCloudSigma
# Used to match classes to dependencies. Since this datasource uses the serial
# port network is not really required, so it's okay to load without it, too.
datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM, )),
+ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM,)),
]
@@ -117,4 +117,5 @@ def get_datasource_list(depends):
"""
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 8cb0d5a7..a742a5e6 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -13,17 +13,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-from socket import inet_ntoa, getaddrinfo, gaierror
-from struct import pack
import time
+from socket import gaierror, getaddrinfo, inet_ntoa
+from struct import pack
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit.net import dhcp
-from cloudinit import sources
+from cloudinit import sources, subp
from cloudinit import url_helper as uhelp
-from cloudinit import subp
from cloudinit import util
+from cloudinit.net import dhcp
LOG = logging.getLogger(__name__)
@@ -47,27 +46,36 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = subp.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
+ output, _ = subp.subp(
+ [
+ "wget",
+ "--quiet",
+ "--tries",
+ "3",
+ "--timeout",
+ "20",
+ "--output-document",
+ "-",
+ "--header",
+ "DomU_Request: {0}".format(domu_request),
+ "{0}:8080".format(self.virtual_router_address),
+ ]
+ )
return output.strip()
def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
+ password = self._do_request("send_my_password")
+ if password in ["", "saved_password"]:
return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
+ if password == "bad_request":
+ raise RuntimeError("Error when attempting to fetch root password.")
+ self._do_request("saved_password")
return password
class DataSourceCloudStack(sources.DataSource):
- dsname = 'CloudStack'
+ dsname = "CloudStack"
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -75,10 +83,10 @@ class DataSourceCloudStack(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
+ self.seed_dir = os.path.join(paths.seed_dir, "cs")
# Cloudstack has its metadata/userdata URLs located at
# http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
+ self.api_ver = "latest"
self.vr_addr = get_vr_address()
if not self.vr_addr:
raise RuntimeError("No virtual router found!")
@@ -91,19 +99,28 @@ class DataSourceCloudStack(sources.DataSource):
if url_params.max_wait_seconds <= 0:
return False
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
+ urls = [
+ uhelp.combine_url(
+ self.metadata_address, "latest/meta-data/instance-id"
+ )
+ ]
start_time = time.time()
url, _response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on waiting for the metadata from %s"
+ " after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(url)
@@ -113,8 +130,8 @@ class DataSourceCloudStack(sources.DataSource):
def _get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
+ self.userdata_raw = seed_ret["user-data"]
+ self.metadata = seed_ret["meta-data"]
LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
return True
try:
@@ -122,39 +139,48 @@ class DataSourceCloudStack(sources.DataSource):
return False
start_time = time.time()
self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ self.api_ver, self.metadata_address
+ )
+ self.metadata = ec2.get_instance_metadata(
+ self.api_ver, self.metadata_address
+ )
+ LOG.debug(
+ "Crawl of metadata service took %s seconds",
+ int(time.time() - start_time),
+ )
password_client = CloudStackPasswordServerClient(self.vr_addr)
try:
set_password = password_client.get_password()
except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
+ util.logexc(
+ LOG,
+ "Failed to fetch password from virtual router %s",
+ self.vr_addr,
+ )
else:
if set_password:
self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": set_password,
+ "chpasswd": {
+ "expire": False,
},
}
return True
except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
+ util.logexc(
+ LOG,
+ "Failed fetching from metadata service %s",
+ self.metadata_address,
+ )
return False
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
def get_data_server():
@@ -183,8 +209,11 @@ def get_default_gateway():
def get_dhclient_d():
# find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp",
- "/var/lib/NetworkManager"]
+ supported_dirs = [
+ "/var/lib/dhclient",
+ "/var/lib/dhcp",
+ "/var/lib/NetworkManager",
+ ]
for d in supported_dirs:
if os.path.exists(d) and len(os.listdir(d)) > 0:
LOG.debug("Using %s lease directory", d)
@@ -233,15 +262,18 @@ def get_vr_address():
# Try data-server DNS entry first
latest_address = get_data_server()
if latest_address:
- LOG.debug("Found metadata server '%s' via data-server DNS entry",
- latest_address)
+ LOG.debug(
+ "Found metadata server '%s' via data-server DNS entry",
+ latest_address,
+ )
return latest_address
# Try networkd second...
- latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
+ latest_address = dhcp.networkd_get_option_from_leases("SERVER_ADDRESS")
if latest_address:
- LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
- latest_address)
+ LOG.debug(
+ "Found SERVER_ADDRESS '%s' via networkd_leases", latest_address
+ )
return latest_address
# Try dhcp lease files next...
@@ -275,4 +307,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 19c8d126..f7c58b12 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -9,9 +9,7 @@
import os
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import sources, subp, util
from cloudinit.event import EventScope, EventType
from cloudinit.net import eni
from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform
@@ -21,32 +19,35 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
+DEFAULT_MODE = "pass"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2', 'CONFIG-2')
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+FS_TYPES = ("vfat", "iso9660")
+LABEL_TYPES = ("config-2", "CONFIG-2")
+POSSIBLE_MOUNTS = ("sr", "cd")
+OPTICAL_DEVICES = tuple(
+ ("/dev/%s%s" % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))
+)
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- dsname = 'ConfigDrive'
+ dsname = "ConfigDrive"
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG,
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.seed_dir = os.path.join(paths.seed_dir, "config_drive")
self.version = None
self.ec2_metadata = None
self._network_config = None
@@ -76,15 +77,16 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, "Failed reading config drive from %s", sdir)
if not found:
- dslist = self.sys_cfg.get('datasource_list')
+ dslist = self.sys_cfg.get("datasource_list")
for dev in find_candidate_devs(dslist=dslist):
mtype = None
if util.is_BSD():
if dev.startswith("/dev/cd"):
mtype = "cd9660"
try:
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype)
+ results = util.mount_cb(
+ dev, read_config_drive, mtype=mtype
+ )
found = dev
except openstack.NonReadable:
pass
@@ -97,41 +99,49 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
+ [
+ results.get("dsmode"),
+ self.ds_cfg.get("dsmode"),
+ sources.DSMODE_PASS if results["version"] == 1 else None,
+ ]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
+ cur_iid = md["instance-id"]
if prev_iid != cur_iid:
# better would be to handle this centrally, allowing
# the datasource to do something on new instance id
# note, networking is only rendered here if dsmode is DSMODE_PASS
# which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
+ on_first_boot(
+ results,
+ distro=self.distro,
+ network=self.dsmode == sources.DSMODE_PASS,
+ )
# This is legacy and sneaky. If dsmode is 'pass' then do not claim
# the datasource was used, even though we did run on_first_boot above.
if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.source = found
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -143,7 +153,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
# obsolete compared to networkdata (from network_data.json) but both
# might be present.
self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
+ self.network_json = results.get("networkdata")
return True
def check_instance_id(self, sys_cfg):
@@ -156,7 +166,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if self.network_json not in (None, sources.UNSET):
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
+ self.network_json, known_macs=self.known_macs
+ )
elif self.network_eni is not None:
self._network_config = eni.convert_eni_data(self.network_eni)
LOG.debug("network config provided via converted eni data")
@@ -166,15 +177,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@property
def platform(self):
- return 'openstack'
+ return "openstack"
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.source.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.source.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.source)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.source)
def read_config_drive(source_dir):
@@ -196,7 +207,7 @@ def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource
# hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ fname = os.path.join(paths.get_cpath("data"), "instance-id")
try:
return util.load_file(fname).rstrip("\n")
except IOError:
@@ -206,14 +217,15 @@ def get_previous_iid(paths):
def on_first_boot(data, distro=None, network=True):
"""Performs any first-boot actions using data read from a config-drive."""
if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
+ raise TypeError(
+ "Config-drive data expected to be a dict; not %s" % (type(data))
+ )
if network:
- net_conf = data.get("network_config", '')
+ net_conf = data.get("network_config", "")
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
distro.apply_network_config(eni.convert_eni_data(net_conf))
- write_injected_files(data.get('files'))
+ write_injected_files(data.get("files"))
def write_injected_files(files):
@@ -270,12 +282,13 @@ def find_candidate_devs(probe_optical=True, dslist=None):
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
+ candidates = by_label + [d for d in by_fstype if d not in by_label]
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
+ devices = [
+ d for d in candidates if d in by_label or not util.is_partition(d)
+ ]
LOG.debug("devices=%s dslist=%s", devices, dslist)
if devices and "IBMCloud" in dslist:
@@ -283,8 +296,11 @@ def find_candidate_devs(probe_optical=True, dslist=None):
ibm_platform, ibm_path = get_ibm_platform()
if ibm_path in devices:
devices.remove(ibm_path)
- LOG.debug("IBMCloud device '%s' (%s) removed from candidate list",
- ibm_path, ibm_platform)
+ LOG.debug(
+ "IBMCloud device '%s' (%s) removed from candidate list",
+ ibm_path,
+ ibm_platform,
+ )
return devices
@@ -302,4 +318,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 08805d99..52d3ad26 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -6,16 +6,14 @@
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
import cloudinit.sources.helpers.digitalocean as do_helper
+from cloudinit import log as logging
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1.json',
+ "metadata_url": "http://169.254.169.254/metadata/v1.json",
}
# Wait for a up to a minute, retrying the meta-data server
@@ -28,20 +26,25 @@ MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
- dsname = 'DigitalOcean'
+ dsname = "DigitalOcean"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(
+ sys_cfg, ["datasource", "DigitalOcean"], {}
+ ),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
def _get_sysinfo(self):
@@ -61,15 +64,18 @@ class DataSourceDigitalOcean(sources.DataSource):
ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
self.metadata_full = md
- self.metadata['instance-id'] = md.get('droplet_id', droplet_id)
- self.metadata['local-hostname'] = md.get('hostname', droplet_id)
- self.metadata['interfaces'] = md.get('interfaces')
- self.metadata['public-keys'] = md.get('public_keys')
- self.metadata['availability_zone'] = md.get('region', 'default')
+ self.metadata["instance-id"] = md.get("droplet_id", droplet_id)
+ self.metadata["local-hostname"] = md.get("hostname", droplet_id)
+ self.metadata["interfaces"] = md.get("interfaces")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
self.vendordata_raw = md.get("vendor_data", None)
self.userdata_raw = md.get("user_data", None)
@@ -80,32 +86,34 @@ class DataSourceDigitalOcean(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
if self._network_config:
return self._network_config
- interfaces = self.metadata.get('interfaces')
+ interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
raise Exception("Unable to get meta-data from server....")
- nameservers = self.metadata_full['dns']['nameservers']
+ nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
- interfaces, nameservers)
+ interfaces, nameservers
+ )
return self._network_config
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )),
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM,)),
]
@@ -113,4 +121,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 700437b0..03b3870c 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -15,13 +15,11 @@ import time
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit import sources
+from cloudinit import net, sources
from cloudinit import url_helper as uhelp
-from cloudinit import util
-from cloudinit import warnings
+from cloudinit import util, warnings
from cloudinit.event import EventScope, EventType
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
@@ -30,10 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-API_TOKEN_ROUTE = 'latest/api/token'
-AWS_TOKEN_TTL_SECONDS = '21600'
-AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
-AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+API_TOKEN_ROUTE = "latest/api/token"
+AWS_TOKEN_TTL_SECONDS = "21600"
+AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token"
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds"
AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
@@ -53,18 +51,18 @@ class CloudNames(object):
class DataSourceEc2(sources.DataSource):
- dsname = 'Ec2'
+ dsname = "Ec2"
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2009-04-04'
+ min_metadata_version = "2009-04-04"
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2018-09-24', '2016-09-02']
+ extended_metadata_versions = ["2018-09-24", "2016-09-02"]
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -76,12 +74,14 @@ class DataSourceEc2(sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG,
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
@@ -93,11 +93,18 @@ class DataSourceEc2(sources.DataSource):
def _get_data(self):
strict_mode, _sleep = read_strict_mode(
- util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
- STRICT_ID_DEFAULT), ("warn", None))
-
- LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s",
- strict_mode, self.cloud_name, self.platform)
+ util.get_cfg_by_path(
+ self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT
+ ),
+ ("warn", None),
+ )
+
+ LOG.debug(
+ "strict_mode: %s, cloud_name=%s cloud_platform=%s",
+ strict_mode,
+ self.cloud_name,
+ self.platform,
+ )
if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN:
return False
elif self.cloud_name == CloudNames.NO_EC2_METADATA:
@@ -110,20 +117,27 @@ class DataSourceEc2(sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
except NoDHCPLeaseError:
return False
else:
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not self._crawled_metadata:
return False
- self.metadata = self._crawled_metadata.get('meta-data', None)
- self.userdata_raw = self._crawled_metadata.get('user-data', None)
- self.identity = self._crawled_metadata.get(
- 'dynamic', {}).get('instance-identity', {}).get('document', {})
+ self.metadata = self._crawled_metadata.get("meta-data", None)
+ self.userdata_raw = self._crawled_metadata.get("user-data", None)
+ self.identity = (
+ self._crawled_metadata.get("dynamic", {})
+ .get("instance-identity", {})
+ .get("document", {})
+ )
return True
def is_classic_instance(self):
@@ -133,9 +147,9 @@ class DataSourceEc2(sources.DataSource):
# network_config where metadata will be present.
# Secondary call site is in packaging postinst script.
return False
- ifaces_md = self.metadata.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
+ ifaces_md = self.metadata.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
return False
return True
@@ -143,12 +157,12 @@ class DataSourceEc2(sources.DataSource):
def launch_index(self):
if not self.metadata:
return None
- return self.metadata.get('ami-launch-index')
+ return self.metadata.get("ami-launch-index")
@property
def platform(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = DataSourceEc2.dsname.lower()
if not self._platform_type:
self._platform_type = DataSourceEc2.dsname.lower()
@@ -164,44 +178,47 @@ class DataSourceEc2(sources.DataSource):
min_metadata_version.
"""
# Assumes metadata service is already up
- url_tmpl = '{0}/{1}/meta-data/instance-id'
+ url_tmpl = "{0}/{1}/meta-data/instance-id"
headers = self._get_headers()
for api_ver in self.extended_metadata_versions:
url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url, headers=headers,
- headers_redact=AWS_TOKEN_REDACT)
+ resp = uhelp.readurl(
+ url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT
+ )
except uhelp.UrlError as e:
- LOG.debug('url %s raised exception %s', url, e)
+ LOG.debug("url %s raised exception %s", url, e)
else:
if resp.code == 200:
- LOG.debug('Found preferred metadata version %s', api_ver)
+ LOG.debug("Found preferred metadata version %s", api_ver)
return api_ver
elif resp.code == 404:
- msg = 'Metadata api version %s not present. Headers: %s'
+ msg = "Metadata api version %s not present. Headers: %s"
LOG.debug(msg, api_ver, resp.headers)
return self.min_metadata_version
def get_instance_id(self):
if self.cloud_name == CloudNames.AWS:
# Prefer the ID from the instance identity document, but fall back
- if not getattr(self, 'identity', None):
+ if not getattr(self, "identity", None):
# If re-using cached datasource, it's get_data run didn't
# setup self.identity. So we need to do that now.
api_version = self.get_metadata_api_version()
self.identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
+ api_version,
+ self.metadata_address,
headers_cb=self._get_headers,
headers_redact=AWS_TOKEN_REDACT,
- exception_cb=self._refresh_stale_aws_token_cb).get(
- 'document', {})
+ exception_cb=self._refresh_stale_aws_token_cb,
+ ).get("document", {})
return self.identity.get(
- 'instanceId', self.metadata['instance-id'])
+ "instanceId", self.metadata["instance-id"]
+ )
else:
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
- """ Get an API token for EC2 Instance Metadata Service.
+ """Get an API token for EC2 Instance Metadata Service.
On EC2. IMDS will always answer an API token, unless
the instance owner has disabled the IMDS HTTP endpoint or
@@ -213,26 +230,29 @@ class DataSourceEc2(sources.DataSource):
urls = []
url2base = {}
url_path = API_TOKEN_ROUTE
- request_method = 'PUT'
+ request_method = "PUT"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
# use the self._imds_exception_cb to check for Read errors
- LOG.debug('Fetching Ec2 IMDSv2 API Token')
+ LOG.debug("Fetching Ec2 IMDSv2 API Token")
response = None
url = None
url_params = self.get_url_params()
try:
url, response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
headers_cb=self._get_headers,
exception_cb=self._imds_exception_cb,
request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+ headers_redact=AWS_TOKEN_REDACT,
+ )
except uhelp.UrlError:
# We use the raised exception to interupt the retry loop.
# Nothing else to do here.
@@ -258,8 +278,10 @@ class DataSourceEc2(sources.DataSource):
filtered = [x for x in mdurls if util.is_resolvable_url(x)]
if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(mdurls) - set(filtered))),
+ )
if len(filtered):
mdurls = filtered
@@ -277,20 +299,25 @@ class DataSourceEc2(sources.DataSource):
# if we can't get a token, use instance-id path
urls = []
url2base = {}
- url_path = '{ver}/meta-data/instance-id'.format(
- ver=self.min_metadata_version)
- request_method = 'GET'
+ url_path = "{ver}/meta-data/instance-id".format(
+ ver=self.min_metadata_version
+ )
+ request_method = "GET"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
start_time = time.time()
url, _ = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
- headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
- request_method=request_method)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ headers_redact=AWS_TOKEN_REDACT,
+ headers_cb=self._get_headers,
+ request_method=request_method,
+ )
if url:
metadata_address = url2base[url]
@@ -301,8 +328,11 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_name == CloudNames.AWS:
LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on md from %s after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(metadata_address)
@@ -310,7 +340,7 @@ class DataSourceEc2(sources.DataSource):
# Consult metadata service, that has
# ephemeral0: sdb
# and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
+ if "block-device-mapping" not in self.metadata:
return None
# Example:
@@ -319,7 +349,7 @@ class DataSourceEc2(sources.DataSource):
# 'ephemeral0': '/dev/sdb',
# 'root': '/dev/sda1'}
found = None
- bdm = self.metadata['block-device-mapping']
+ bdm = self.metadata["block-device-mapping"]
if not isinstance(bdm, dict):
LOG.debug("block-device-mapping not a dictionary: '%s'", bdm)
return None
@@ -362,17 +392,18 @@ class DataSourceEc2(sources.DataSource):
try:
if self.cloud_name == CloudNames.AWS:
return self.identity.get(
- 'availabilityZone',
- self.metadata['placement']['availability-zone'])
+ "availabilityZone",
+ self.metadata["placement"]["availability-zone"],
+ )
else:
- return self.metadata['placement']['availability-zone']
+ return self.metadata["placement"]["availability-zone"]
except KeyError:
return None
@property
def region(self):
if self.cloud_name == CloudNames.AWS:
- region = self.identity.get('region')
+ region = self.identity.get("region")
# Fallback to trimming the availability zone if region is missing
if self.availability_zone and not region:
region = self.availability_zone[:-1]
@@ -389,7 +420,8 @@ class DataSourceEc2(sources.DataSource):
if self.cloud_name == CloudNames.UNKNOWN:
warn_if_necessary(
util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
- cfg)
+ cfg,
+ )
@property
def network_config(self):
@@ -400,30 +432,39 @@ class DataSourceEc2(sources.DataSource):
if self.metadata is None:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- "Unexpected call to network_config when metadata is None.")
+ "Unexpected call to network_config when metadata is None."
+ )
return None
result = None
no_network_metadata_on_aws = bool(
- 'network' not in self.metadata and
- self.cloud_name == CloudNames.AWS)
+ "network" not in self.metadata
+ and self.cloud_name == CloudNames.AWS
+ )
if no_network_metadata_on_aws:
- LOG.debug("Metadata 'network' not present:"
- " Refreshing stale metadata from prior to upgrade.")
+ LOG.debug(
+ "Metadata 'network' not present:"
+ " Refreshing stale metadata from prior to upgrade."
+ )
util.log_time(
- logfunc=LOG.debug, msg='Re-crawl of metadata service',
- func=self.get_data)
+ logfunc=LOG.debug,
+ msg="Re-crawl of metadata service",
+ func=self.get_data,
+ )
iface = self.fallback_interface
- net_md = self.metadata.get('network')
+ net_md = self.metadata.get("network")
if isinstance(net_md, dict):
# SRU_BLOCKER: xenial, bionic and eoan should default
# apply_full_imds_network_config to False to retain original
# behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, fallback_nic=iface,
+ net_md,
+ fallback_nic=iface,
full_network_config=util.get_cfg_option_bool(
- self.ds_cfg, 'apply_full_imds_network_config', True))
+ self.ds_cfg, "apply_full_imds_network_config", True
+ ),
+ )
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -435,11 +476,14 @@ class DataSourceEc2(sources.DataSource):
# network config file every boot due to MAC address change.
if self.is_classic_instance():
self.default_update_events = copy.deepcopy(
- self.default_update_events)
+ self.default_update_events
+ )
self.default_update_events[EventScope.NETWORK].add(
- EventType.BOOT)
+ EventType.BOOT
+ )
self.default_update_events[EventScope.NETWORK].add(
- EventType.BOOT_LEGACY)
+ EventType.BOOT_LEGACY
+ )
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
@@ -451,7 +495,7 @@ class DataSourceEc2(sources.DataSource):
if self._fallback_interface is None:
# fallback_nic was used at one point, so restored objects may
# have an attribute there. respect that if found.
- _legacy_fbnic = getattr(self, 'fallback_nic', None)
+ _legacy_fbnic = getattr(self, "fallback_nic", None)
if _legacy_fbnic:
self._fallback_interface = _legacy_fbnic
self.fallback_nic = None
@@ -476,26 +520,37 @@ class DataSourceEc2(sources.DataSource):
else:
exc_cb = exc_cb_ud = None
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb_ud)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb_ud,
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
- crawled_metadata['dynamic'] = {'instance-identity': identity}
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
+ crawled_metadata["dynamic"] = {"instance-identity": identity}
except Exception:
util.logexc(
- LOG, "Failed reading from metadata address %s",
- self.metadata_address)
+ LOG,
+ "Failed reading from metadata address %s",
+ self.metadata_address,
+ )
return {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
return crawled_metadata
def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
@@ -508,23 +563,27 @@ class DataSourceEc2(sources.DataSource):
return None
LOG.debug("Refreshing Ec2 metadata API token")
request_header = {AWS_TOKEN_REQ_HEADER: seconds}
- token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
+ token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE)
try:
- response = uhelp.readurl(token_url, headers=request_header,
- headers_redact=AWS_TOKEN_REDACT,
- request_method="PUT")
+ response = uhelp.readurl(
+ token_url,
+ headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT",
+ )
except uhelp.UrlError as e:
LOG.warning(
- 'Unable to get API token: %s raised exception %s',
- token_url, e)
+ "Unable to get API token: %s raised exception %s", token_url, e
+ )
return None
return response.contents
def _skip_or_refresh_stale_aws_token_cb(self, msg, exception):
"""Callback will not retry on SKIP_USERDATA_CODES or if no token
- is available."""
+ is available."""
retry = ec2.skip_retry_on_codes(
- ec2.SKIP_USERDATA_CODES, msg, exception)
+ ec2.SKIP_USERDATA_CODES, msg, exception
+ )
if not retry:
return False # False raises exception
return self._refresh_stale_aws_token_cb(msg, exception)
@@ -554,14 +613,17 @@ class DataSourceEc2(sources.DataSource):
# requests.ConnectionError will have exception.code == None
if exception.code and exception.code >= 400:
if exception.code == 403:
- LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
- 'HTTP endpoint is disabled. Aborting.')
+ LOG.warning(
+ "Ec2 IMDS endpoint returned a 403 error. "
+ "HTTP endpoint is disabled. Aborting."
+ )
else:
- LOG.warning('Fatal error while requesting '
- 'Ec2 IMDSv2 API tokens')
+ LOG.warning(
+ "Fatal error while requesting Ec2 IMDSv2 API tokens"
+ )
raise exception
- def _get_headers(self, url=''):
+ def _get_headers(self, url=""):
"""Return a dict of headers for accessing a url.
If _api_token is unset on AWS, attempt to refresh the token via a PUT
@@ -591,13 +653,17 @@ class DataSourceEc2Local(DataSourceEc2):
metadata service. If the metadata service provides network configuration
then render the network configuration for that instance based on metadata.
"""
+
perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
supported_platforms = (CloudNames.AWS,)
if self.cloud_name not in supported_platforms:
- LOG.debug("Local Ec2 mode only supported on %s, not %s",
- supported_platforms, self.cloud_name)
+ LOG.debug(
+ "Local Ec2 mode only supported on %s, not %s",
+ supported_platforms,
+ self.cloud_name,
+ )
return False
return super(DataSourceEc2Local, self).get_data()
@@ -615,18 +681,19 @@ def parse_strict_mode(cfgval):
# true, false, warn,[sleep]
# return tuple with string mode (true|false|warn) and sleep.
if cfgval is True:
- return 'true', None
+ return "true", None
if cfgval is False:
- return 'false', None
+ return "false", None
if not cfgval:
- return 'warn', 0
+ return "warn", 0
mode, _, sleep = cfgval.partition(",")
- if mode not in ('true', 'false', 'warn'):
+ if mode not in ("true", "false", "warn"):
raise ValueError(
"Invalid mode '%s' in strict_id setting '%s': "
- "Expected one of 'true', 'false', 'warn'." % (mode, cfgval))
+ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)
+ )
if sleep:
try:
@@ -652,47 +719,53 @@ def warn_if_necessary(cfgval, cfg):
if mode == "false":
return
- warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep)
+ warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep)
def identify_aws(data):
# data is a dictionary returned by _collect_platform_data.
- if (data['uuid'].startswith('ec2') and
- (data['uuid_source'] == 'hypervisor' or
- data['uuid'] == data['serial'])):
+ if data["uuid"].startswith("ec2") and (
+ data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"]
+ ):
return CloudNames.AWS
return None
def identify_brightbox(data):
- if data['serial'].endswith('.brightbox.com'):
+ if data["serial"].endswith(".brightbox.com"):
return CloudNames.BRIGHTBOX
def identify_zstack(data):
- if data['asset_tag'].endswith('.zstack.io'):
+ if data["asset_tag"].endswith(".zstack.io"):
return CloudNames.ZSTACK
def identify_e24cloud(data):
- if data['vendor'] == 'e24cloud':
+ if data["vendor"] == "e24cloud":
return CloudNames.E24CLOUD
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
- checks = (identify_aws, identify_brightbox, identify_zstack,
- identify_e24cloud, lambda x: CloudNames.UNKNOWN)
+ checks = (
+ identify_aws,
+ identify_brightbox,
+ identify_zstack,
+ identify_e24cloud,
+ lambda x: CloudNames.UNKNOWN,
+ )
for checker in checks:
try:
result = checker(data)
if result:
return result
except Exception as e:
- LOG.warning("calling %s with %s raised exception: %s",
- checker, data, e)
+ LOG.warning(
+ "calling %s with %s raised exception: %s", checker, data, e
+ )
def _collect_platform_data():
@@ -711,36 +784,36 @@ def _collect_platform_data():
data = {}
try:
uuid = util.load_file("/sys/hypervisor/uuid").strip()
- data['uuid_source'] = 'hypervisor'
+ data["uuid_source"] = "hypervisor"
except Exception:
- uuid = dmi.read_dmi_data('system-uuid')
- data['uuid_source'] = 'dmi'
+ uuid = dmi.read_dmi_data("system-uuid")
+ data["uuid_source"] = "dmi"
if uuid is None:
- uuid = ''
- data['uuid'] = uuid.lower()
+ uuid = ""
+ data["uuid"] = uuid.lower()
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial is None:
- serial = ''
+ serial = ""
- data['serial'] = serial.lower()
+ data["serial"] = serial.lower()
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag is None:
- asset_tag = ''
+ asset_tag = ""
- data['asset_tag'] = asset_tag.lower()
+ data["asset_tag"] = asset_tag.lower()
- vendor = dmi.read_dmi_data('system-manufacturer')
- data['vendor'] = (vendor if vendor else '').lower()
+ vendor = dmi.read_dmi_data("system-manufacturer")
+ data["vendor"] = (vendor if vendor else "").lower()
return data
def convert_ec2_metadata_network_config(
- network_md, macs_to_nics=None, fallback_nic=None,
- full_network_config=True):
+ network_md, macs_to_nics=None, fallback_nic=None, full_network_config=True
+):
"""Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
@@ -759,23 +832,25 @@ def convert_ec2_metadata_network_config(
@return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 2, 'ethernets': {}}
+ netcfg = {"version": 2, "ethernets": {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
- macs_metadata = network_md['interfaces']['macs']
+ macs_metadata = network_md["interfaces"]["macs"]
if not full_network_config:
for mac, nic_name in macs_to_nics.items():
if nic_name == fallback_nic:
break
- dev_config = {'dhcp4': True,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
nic_metadata = macs_metadata.get(mac)
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- netcfg['ethernets'][nic_name] = dev_config
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ netcfg["ethernets"][nic_name] = dev_config
return netcfg
# Apply network config for all nics and any secondary IPv4/v6 addresses
nic_idx = 0
@@ -785,24 +860,27 @@ def convert_ec2_metadata_network_config(
continue # Not a physical nic represented in metadata
# device-number is zero-indexed, we want it 1-indexed for the
# multiplication on the following line
- nic_idx = int(nic_metadata.get('device-number', nic_idx)) + 1
- dhcp_override = {'route-metric': nic_idx * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- dev_config['dhcp6-overrides'] = dhcp_override
- dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
- if not dev_config['addresses']:
- dev_config.pop('addresses') # Since we found none configured
- netcfg['ethernets'][nic_name] = dev_config
+ nic_idx = int(nic_metadata.get("device-number", nic_idx)) + 1
+ dhcp_override = {"route-metric": nic_idx * 100}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ dev_config["dhcp6-overrides"] = dhcp_override
+ dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config["addresses"]:
+ dev_config.pop("addresses") # Since we found none configured
+ netcfg["ethernets"][nic_name] = dev_config
# Remove route-metric dhcp overrides if only one nic configured
- if len(netcfg['ethernets']) == 1:
- for nic_name in netcfg['ethernets'].keys():
- netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
- netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
+ if len(netcfg["ethernets"]) == 1:
+ for nic_name in netcfg["ethernets"].keys():
+ netcfg["ethernets"][nic_name].pop("dhcp4-overrides")
+ netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None)
return netcfg
@@ -812,18 +890,22 @@ def get_secondary_addresses(nic_metadata, mac):
:return: List of secondary IPv4 or IPv6 addresses to configure on the
interface
"""
- ipv4s = nic_metadata.get('local-ipv4s')
- ipv6s = nic_metadata.get('ipv6s')
+ ipv4s = nic_metadata.get("local-ipv4s")
+ ipv6s = nic_metadata.get("ipv6s")
addresses = []
# In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24"
+ )
+ )
if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128"
+ )
+ )
return sorted(addresses)
@@ -836,18 +918,22 @@ def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
addresses = []
cidr = nic_metadata.get(cidr_key)
prefix = default_prefix
- if not cidr or len(cidr.split('/')) != 2:
- ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ if not cidr or len(cidr.split("/")) != 2:
+ ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6"
LOG.warning(
- 'Could not parse %s %s for mac %s. %s network'
- ' config prefix defaults to /%s',
- cidr_key, cidr, mac, ip_type, prefix)
+ "Could not parse %s %s for mac %s. %s network"
+ " config prefix defaults to /%s",
+ cidr_key,
+ cidr,
+ mac,
+ ip_type,
+ prefix,
+ )
else:
- prefix = cidr.split('/')[1]
+ prefix = cidr.split("/")[1]
# We know we have > 1 ips for in metadata for this IP type
for ip in ips[1:]:
- addresses.append(
- '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix))
return addresses
@@ -862,4 +948,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index adee6d79..cc5136d7 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -5,11 +5,9 @@
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import log as logging
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
@@ -25,7 +23,7 @@ EXOSCALE_DMI_NAME = "Exoscale"
class DataSourceExoscale(sources.DataSource):
- dsname = 'Exoscale'
+ dsname = "Exoscale"
url_max_wait = 120
@@ -33,12 +31,13 @@ class DataSourceExoscale(sources.DataSource):
super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
LOG.debug("Initializing the Exoscale datasource")
- self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
- self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+ self.metadata_url = self.ds_cfg.get("metadata_url", METADATA_URL)
+ self.api_version = self.ds_cfg.get("api_version", API_VERSION)
self.password_server_port = int(
- self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
- self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
- self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+ self.ds_cfg.get("password_server_port", PASSWORD_SERVER_PORT)
+ )
+ self.url_timeout = self.ds_cfg.get("timeout", URL_TIMEOUT)
+ self.url_retries = self.ds_cfg.get("retries", URL_RETRIES)
self.extra_config = {}
def activate(self, cfg, is_new_instance):
@@ -50,23 +49,25 @@ class DataSourceExoscale(sources.DataSource):
# a user has triggered a password reset. So calling that password
# service generally results in no additional cloud-config.
# TODO(Create util functions for overriding merged sys_cfg module freq)
- mod = 'set_passwords'
- sem_path = self.paths.get_ipath_cur('sem')
+ mod = "set_passwords"
+ sem_path = self.paths.get_ipath_cur("sem")
sem_helper = helpers.FileSemaphores(sem_path)
- if sem_helper.clear('config_' + mod, None):
- LOG.debug('Overriding module set-passwords with frequency always')
+ if sem_helper.clear("config_" + mod, None):
+ LOG.debug("Overriding module set-passwords with frequency always")
def wait_for_metadata_service(self):
"""Wait for the metadata service to be reachable."""
metadata_url = "{}/{}/meta-data/instance-id".format(
- self.metadata_url, self.api_version)
+ self.metadata_url, self.api_version
+ )
url, _response = url_helper.wait_for_url(
urls=[metadata_url],
max_wait=self.url_max_wait,
timeout=self.url_timeout,
- status_cb=LOG.critical)
+ status_cb=LOG.critical,
+ )
return bool(url)
@@ -78,15 +79,20 @@ class DataSourceExoscale(sources.DataSource):
"""
metadata_ready = util.log_time(
logfunc=LOG.info,
- msg='waiting for the metadata service',
- func=self.wait_for_metadata_service)
+ msg="waiting for the metadata service",
+ func=self.wait_for_metadata_service,
+ )
if not metadata_ready:
return {}
- return read_metadata(self.metadata_url, self.api_version,
- self.password_server_port, self.url_timeout,
- self.url_retries)
+ return read_metadata(
+ self.metadata_url,
+ self.api_version,
+ self.password_server_port,
+ self.url_timeout,
+ self.url_retries,
+ )
def _get_data(self):
"""Fetch the user data, the metadata and the VM password
@@ -100,15 +106,16 @@ class DataSourceExoscale(sources.DataSource):
data = util.log_time(
logfunc=LOG.debug,
- msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not data:
return False
- self.userdata_raw = data['user-data']
- self.metadata = data['meta-data']
- password = data.get('password')
+ self.userdata_raw = data["user-data"]
+ self.metadata = data["meta-data"]
+ password = data.get("password")
password_config = {}
if password:
@@ -119,16 +126,17 @@ class DataSourceExoscale(sources.DataSource):
# leave the password always disabled if no password is ever set, or
# leave the password login enabled if we set it once.
password_config = {
- 'ssh_pwauth': True,
- 'password': password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": password,
+ "chpasswd": {
+ "expire": False,
},
}
# builtin extra_config overrides password_config
self.extra_config = util.mergemanydict(
- [self.extra_config, password_config])
+ [self.extra_config, password_config]
+ )
return True
@@ -136,8 +144,9 @@ class DataSourceExoscale(sources.DataSource):
return self.extra_config
def _is_platform_viable(self):
- return dmi.read_dmi_data('system-product-name').startswith(
- EXOSCALE_DMI_NAME)
+ return dmi.read_dmi_data("system-product-name").startswith(
+ EXOSCALE_DMI_NAME
+ )
# Used to match classes to dependencies
@@ -151,28 +160,32 @@ def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
-def get_password(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def get_password(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Obtain the VM's password if set.
Once fetched the password is marked saved. Future calls to this method may
return empty string or 'saved_password'."""
- password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
- api_version)
+ password_url = "{}:{}/{}/".format(
+ metadata_url, password_server_port, api_version
+ )
response = url_helper.read_file_or_url(
password_url,
ssl_details=None,
headers={"DomU_Request": "send_my_password"},
timeout=url_timeout,
- retries=url_retries)
- password = response.contents.decode('utf-8')
+ retries=url_retries,
+ )
+ password = response.contents.decode("utf-8")
# the password is empty or already saved
# Note: the original metadata server would answer an additional
# 'bad_request' status, but the Exoscale implementation does not.
- if password in ['', 'saved_password']:
+ if password in ["", "saved_password"]:
return None
# save the password
url_helper.read_file_or_url(
@@ -180,44 +193,50 @@ def get_password(metadata_url=METADATA_URL,
ssl_details=None,
headers={"DomU_Request": "saved_password"},
timeout=url_timeout,
- retries=url_retries)
+ retries=url_retries,
+ )
return password
-def read_metadata(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def read_metadata(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Query the metadata server and return the retrieved data."""
crawled_metadata = {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
except Exception as e:
- util.logexc(LOG, "failed reading from metadata url %s (%s)",
- metadata_url, e)
+ util.logexc(
+ LOG, "failed reading from metadata url %s (%s)", metadata_url, e
+ )
return {}
try:
- crawled_metadata['password'] = get_password(
+ crawled_metadata["password"] = get_password(
api_version=api_version,
metadata_url=metadata_url,
password_server_port=password_server_port,
url_retries=url_retries,
- url_timeout=url_timeout)
+ url_timeout=url_timeout,
+ )
except Exception as e:
- util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
- metadata_url, password_server_port, e)
+ util.logexc(
+ LOG,
+ "failed to read from password server url %s:%s (%s)",
+ metadata_url,
+ password_server_port,
+ e,
+ )
return crawled_metadata
@@ -225,35 +244,40 @@ def read_metadata(metadata_url=METADATA_URL,
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+ parser = argparse.ArgumentParser(description="Query Exoscale Metadata")
parser.add_argument(
"--endpoint",
metavar="URL",
help="The url of the metadata service.",
- default=METADATA_URL)
+ default=METADATA_URL,
+ )
parser.add_argument(
"--version",
metavar="VERSION",
help="The version of the metadata endpoint to query.",
- default=API_VERSION)
+ default=API_VERSION,
+ )
parser.add_argument(
"--retries",
metavar="NUM",
type=int,
help="The number of retries querying the endpoint.",
- default=URL_RETRIES)
+ default=URL_RETRIES,
+ )
parser.add_argument(
"--timeout",
metavar="NUM",
type=int,
help="The time in seconds to wait before timing out.",
- default=URL_TIMEOUT)
+ default=URL_TIMEOUT,
+ )
parser.add_argument(
"--password-port",
metavar="PORT",
type=int,
help="The port on which the password endpoint listens",
- default=PASSWORD_SERVER_PORT)
+ default=PASSWORD_SERVER_PORT,
+ )
args = parser.parse_args()
@@ -262,7 +286,8 @@ if __name__ == "__main__":
api_version=args.version,
password_server_port=args.password_port,
url_timeout=args.timeout,
- url_retries=args.retries)
+ url_retries=args.retries,
+ )
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index b82fa410..c470bea8 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -4,31 +4,29 @@
import datetime
import json
-from contextlib import suppress as noop
-
from base64 import b64decode
+from contextlib import suppress as noop
from cloudinit import dmi
-from cloudinit.distros import ug_util
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
+from cloudinit.distros import ug_util
from cloudinit.net.dhcp import EphemeralDHCPv4
LOG = logging.getLogger(__name__)
-MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
-BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes')
-HOSTKEY_NAMESPACE = 'hostkeys'
-HEADERS = {'Metadata-Flavor': 'Google'}
+MD_V1_URL = "http://metadata.google.internal/computeMetadata/v1/"
+BUILTIN_DS_CONFIG = {"metadata_url": MD_V1_URL}
+REQUIRED_FIELDS = ("instance-id", "availability-zone", "local-hostname")
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes"
+)
+HOSTKEY_NAMESPACE = "hostkeys"
+HEADERS = {"Metadata-Flavor": "Google"}
class GoogleMetadataFetcher(object):
-
def __init__(self, metadata_address, num_retries, sec_between_retries):
self.metadata_address = metadata_address
self.num_retries = num_retries
@@ -39,10 +37,13 @@ class GoogleMetadataFetcher(object):
try:
url = self.metadata_address + path
if is_recursive:
- url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=HEADERS,
- retries=self.num_retries,
- sec_between=self.sec_between_retries)
+ url += "/?recursive=True"
+ resp = url_helper.readurl(
+ url=url,
+ headers=HEADERS,
+ retries=self.num_retries,
+ sec_between=self.sec_between_retries,
+ )
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -51,7 +52,7 @@ class GoogleMetadataFetcher(object):
if is_text:
value = util.decode_binary(resp.contents)
else:
- value = resp.contents.decode('utf-8')
+ value = resp.contents.decode("utf-8")
else:
LOG.debug("url %s returned code %s", path, resp.code)
return value
@@ -59,7 +60,7 @@ class GoogleMetadataFetcher(object):
class DataSourceGCE(sources.DataSource):
- dsname = 'GCE'
+ dsname = "GCE"
perform_dhcp_setup = False
def __init__(self, sys_cfg, distro, paths):
@@ -69,10 +70,13 @@ class DataSourceGCE(sources.DataSource):
(users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro)
(self.default_user, _user_config) = ug_util.extract_default(users)
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
def _get_data(self):
url_params = self.get_url_params()
@@ -90,14 +94,14 @@ class DataSourceGCE(sources.DataSource):
},
)
- if not ret['success']:
- if ret['platform_reports_gce']:
- LOG.warning(ret['reason'])
+ if not ret["success"]:
+ if ret["platform_reports_gce"]:
+ LOG.warning(ret["reason"])
else:
- LOG.debug(ret['reason'])
+ LOG.debug(ret["reason"])
return False
- self.metadata = ret['meta-data']
- self.userdata_raw = ret['user-data']
+ self.metadata = ret["meta-data"]
+ self.userdata_raw = ret["user-data"]
return True
@property
@@ -106,10 +110,10 @@ class DataSourceGCE(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def get_public_ssh_keys(self):
- public_keys_data = self.metadata['public-keys-data']
+ public_keys_data = self.metadata["public-keys-data"]
return _parse_public_keys(public_keys_data, self.default_user)
def publish_host_keys(self, hostkeys):
@@ -118,15 +122,15 @@ class DataSourceGCE(sources.DataSource):
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
- return self.metadata['local-hostname'].split('.')[0]
+ return self.metadata["local-hostname"].split(".")[0]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
@property
def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
+ return self.availability_zone.rsplit("-", 1)[0]
class DataSourceGCELocal(DataSourceGCE):
@@ -134,14 +138,19 @@ class DataSourceGCELocal(DataSourceGCE):
def _write_host_key_to_guest_attributes(key_type, key_value):
- url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
- key_value = key_value.encode('utf-8')
- resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
- request_method='PUT', check_status=False)
+ url = "%s/%s/%s" % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+ key_value = key_value.encode("utf-8")
+ resp = url_helper.readurl(
+ url=url,
+ data=key_value,
+ headers=HEADERS,
+ request_method="PUT",
+ check_status=False,
+ )
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug("Wrote %s host key to guest attributes.", key_type)
else:
- LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+ LOG.debug("Unable to write %s host key to guest attributes.", key_type)
def _has_expired(public_key):
@@ -155,7 +164,7 @@ def _has_expired(public_key):
return False
# Do not expire keys if they do not have the expected schema identifier.
- if schema != 'google-ssh':
+ if schema != "google-ssh":
return False
try:
@@ -164,11 +173,11 @@ def _has_expired(public_key):
return False
# Do not expire keys if there is no expriation timestamp.
- if 'expireOn' not in json_obj:
+ if "expireOn" not in json_obj:
return False
- expire_str = json_obj['expireOn']
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ expire_str = json_obj["expireOn"]
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
@@ -189,11 +198,11 @@ def _parse_public_keys(public_keys_data, default_user=None):
for public_key in public_keys_data:
if not public_key or not all(ord(c) < 128 for c in public_key):
continue
- split_public_key = public_key.split(':', 1)
+ split_public_key = public_key.split(":", 1)
if len(split_public_key) != 2:
continue
user, key = split_public_key
- if user in ('cloudinit', default_user) and not _has_expired(key):
+ if user in ("cloudinit", default_user) and not _has_expired(key):
public_keys.append(key)
return public_keys
@@ -203,31 +212,35 @@ def read_md(address=None, url_params=None, platform_check=True):
if address is None:
address = MD_V1_URL
- ret = {'meta-data': None, 'user-data': None,
- 'success': False, 'reason': None}
- ret['platform_reports_gce'] = platform_reports_gce()
+ ret = {
+ "meta-data": None,
+ "user-data": None,
+ "success": False,
+ "reason": None,
+ }
+ ret["platform_reports_gce"] = platform_reports_gce()
- if platform_check and not ret['platform_reports_gce']:
- ret['reason'] = "Not running on GCE."
+ if platform_check and not ret["platform_reports_gce"]:
+ ret["reason"] = "Not running on GCE."
return ret
# If we cannot resolve the metadata server, then no point in trying.
if not util.is_resolvable_url(address):
LOG.debug("%s is not resolvable", address)
- ret['reason'] = 'address "%s" is not resolvable' % address
+ ret["reason"] = 'address "%s" is not resolvable' % address
return ret
# url_map: (our-key, path, required, is_text, is_recursive)
url_map = [
- ('instance-id', ('instance/id',), True, True, False),
- ('availability-zone', ('instance/zone',), True, True, False),
- ('local-hostname', ('instance/hostname',), True, True, False),
- ('instance-data', ('instance/attributes',), False, False, True),
- ('project-data', ('project/attributes',), False, False, True),
+ ("instance-id", ("instance/id",), True, True, False),
+ ("availability-zone", ("instance/zone",), True, True, False),
+ ("local-hostname", ("instance/hostname",), True, True, False),
+ ("instance-data", ("instance/attributes",), False, False, True),
+ ("project-data", ("project/attributes",), False, False, True),
]
- metadata_fetcher = GoogleMetadataFetcher(address,
- url_params.num_retries,
- url_params.sec_between_retries)
+ metadata_fetcher = GoogleMetadataFetcher(
+ address, url_params.num_retries, url_params.sec_between_retries
+ )
md = {}
# Iterate over url_map keys to get metadata items.
for (mkey, paths, required, is_text, is_recursive) in url_map:
@@ -238,51 +251,52 @@ def read_md(address=None, url_params=None, platform_check=True):
value = new_value
if required and value is None:
msg = "required key %s returned nothing. not GCE"
- ret['reason'] = msg % mkey
+ ret["reason"] = msg % mkey
return ret
md[mkey] = value
- instance_data = json.loads(md['instance-data'] or '{}')
- project_data = json.loads(md['project-data'] or '{}')
- valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
- block_project = instance_data.get('block-project-ssh-keys', '').lower()
- if block_project != 'true' and not instance_data.get('sshKeys'):
- valid_keys.append(project_data.get('ssh-keys'))
- valid_keys.append(project_data.get('sshKeys'))
- public_keys_data = '\n'.join([key for key in valid_keys if key])
- md['public-keys-data'] = public_keys_data.splitlines()
+ instance_data = json.loads(md["instance-data"] or "{}")
+ project_data = json.loads(md["project-data"] or "{}")
+ valid_keys = [instance_data.get("sshKeys"), instance_data.get("ssh-keys")]
+ block_project = instance_data.get("block-project-ssh-keys", "").lower()
+ if block_project != "true" and not instance_data.get("sshKeys"):
+ valid_keys.append(project_data.get("ssh-keys"))
+ valid_keys.append(project_data.get("sshKeys"))
+ public_keys_data = "\n".join([key for key in valid_keys if key])
+ md["public-keys-data"] = public_keys_data.splitlines()
- if md['availability-zone']:
- md['availability-zone'] = md['availability-zone'].split('/')[-1]
+ if md["availability-zone"]:
+ md["availability-zone"] = md["availability-zone"].split("/")[-1]
- if 'user-data' in instance_data:
+ if "user-data" in instance_data:
# instance_data was json, so values are all utf-8 strings.
- ud = instance_data['user-data'].encode("utf-8")
- encoding = instance_data.get('user-data-encoding')
- if encoding == 'base64':
+ ud = instance_data["user-data"].encode("utf-8")
+ encoding = instance_data.get("user-data-encoding")
+ if encoding == "base64":
ud = b64decode(ud)
elif encoding:
- LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
- ret['user-data'] = ud
+ LOG.warning("unknown user-data-encoding: %s, ignoring", encoding)
+ ret["user-data"] = ud
- ret['meta-data'] = md
- ret['success'] = True
+ ret["meta-data"] = md
+ ret["success"] = True
return ret
def platform_reports_gce():
- pname = dmi.read_dmi_data('system-product-name') or "N/A"
+ pname = dmi.read_dmi_data("system-product-name") or "N/A"
if pname == "Google Compute Engine" or pname == "Google":
return True
# system-product-name is not always guaranteed (LP: #1674861)
- serial = dmi.read_dmi_data('system-serial-number') or "N/A"
+ serial = dmi.read_dmi_data("system-serial-number") or "N/A"
if serial.startswith("GoogleCloud-"):
return True
- LOG.debug("Not running on google cloud. product-name=%s serial=%s",
- pname, serial)
+ LOG.debug(
+ "Not running on google cloud. product-name=%s serial=%s", pname, serial
+ )
return False
@@ -301,31 +315,38 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
import sys
-
from base64 import b64encode
- parser = argparse.ArgumentParser(description='Query GCE Metadata Service')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=MD_V1_URL)
- parser.add_argument("--no-platform-check", dest="platform_check",
- help="Ignore smbios platform check",
- action='store_false', default=True)
+ parser = argparse.ArgumentParser(description="Query GCE Metadata Service")
+ parser.add_argument(
+ "--endpoint",
+ metavar="URL",
+ help="The url of the metadata service.",
+ default=MD_V1_URL,
+ )
+ parser.add_argument(
+ "--no-platform-check",
+ dest="platform_check",
+ help="Ignore smbios platform check",
+ action="store_false",
+ default=True,
+ )
args = parser.parse_args()
data = read_md(address=args.endpoint, platform_check=args.platform_check)
- if 'user-data' in data:
+ if "user-data" in data:
# user-data is bytes not string like other things. Handle it specially.
# If it can be represented as utf-8 then do so. Otherwise print base64
# encoded value in the key user-data-b64.
try:
- data['user-data'] = data['user-data'].decode()
+ data["user-data"] = data["user-data"].decode()
except UnicodeDecodeError:
- sys.stderr.write("User-data cannot be decoded. "
- "Writing as base64\n")
- del data['user-data']
+ sys.stderr.write(
+ "User-data cannot be decoded. Writing as base64\n"
+ )
+ del data["user-data"]
# b64encode returns a bytes value. Decode to get the string.
- data['user-data-b64'] = b64encode(data['user-data']).decode()
+ data["user-data-b64"] = b64encode(data["user-data"]).decode()
- print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index c7c88dd7..50324cc4 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -6,21 +6,19 @@
"""Hetzner Cloud API Documentation
https://docs.hetzner.cloud/"""
+import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import sources
-from cloudinit import util
-
-import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
-BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1'
+BASE_URL_V1 = "http://169.254.169.254/hetzner/v1"
BUILTIN_DS_CONFIG = {
- 'metadata_url': BASE_URL_V1 + '/metadata',
- 'userdata_url': BASE_URL_V1 + '/userdata',
+ "metadata_url": BASE_URL_V1 + "/metadata",
+ "userdata_url": BASE_URL_V1 + "/userdata",
}
MD_RETRIES = 60
@@ -30,20 +28,23 @@ MD_WAIT_RETRY = 2
class DataSourceHetzner(sources.DataSource):
- dsname = 'Hetzner'
+ dsname = "Hetzner"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
self.dsmode = sources.DSMODE_NETWORK
@@ -54,14 +55,21 @@ class DataSourceHetzner(sources.DataSource):
return False
nic = cloudnet.find_fallback_nic()
- with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16,
- "169.254.255.255"):
+ with cloudnet.EphemeralIPv4Network(
+ nic, "169.254.0.1", 16, "169.254.255.255"
+ ):
md = hc_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
ud = hc_helper.read_userdata(
- self.userdata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.userdata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
# Hetzner cloud does not support binary user-data. So here, do a
# base64 decode of the data if we can. The end result being that a
@@ -76,10 +84,10 @@ class DataSourceHetzner(sources.DataSource):
# hostname is name provided by user at launch. The API enforces it is
# a valid hostname, but it is not guaranteed to be resolvable in dns or
# fully qualified.
- self.metadata['instance-id'] = md['instance-id']
- self.metadata['local-hostname'] = md['hostname']
- self.metadata['network-config'] = md.get('network-config', None)
- self.metadata['public-keys'] = md.get('public-keys', None)
+ self.metadata["instance-id"] = md["instance-id"]
+ self.metadata["local-hostname"] = md["hostname"]
+ self.metadata["network-config"] = md.get("network-config", None)
+ self.metadata["public-keys"] = md.get("public-keys", None)
self.vendordata_raw = md.get("vendor_data", None)
# instance-id and serial from SMBIOS should be identical
@@ -92,19 +100,20 @@ class DataSourceHetzner(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
if self._network_config:
return self._network_config
- _net_config = self.metadata['network-config']
+ _net_config = self.metadata["network-config"]
if not _net_config:
raise Exception("Unable to get meta-data from server....")
@@ -114,7 +123,7 @@ class DataSourceHetzner(sources.DataSource):
def get_hcloud_data():
- vendor_name = dmi.read_dmi_data('system-manufacturer')
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
if vendor_name != "Hetzner":
return (False, None)
@@ -129,7 +138,7 @@ def get_hcloud_data():
# Used to match classes to dependencies
datasources = [
- (DataSourceHetzner, (sources.DEP_FILESYSTEM, )),
+ (DataSourceHetzner, (sources.DEP_FILESYSTEM,)),
]
@@ -137,4 +146,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 8d196185..18c3848f 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -97,10 +97,8 @@ import json
import os
from cloudinit import log as logging
-from cloudinit import sources
+from cloudinit import sources, subp, util
from cloudinit.sources.helpers import openstack
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -117,12 +115,13 @@ class Platforms(object):
PROVISIONING = (
Platforms.TEMPLATE_PROVISIONING_METADATA,
- Platforms.TEMPLATE_PROVISIONING_NODATA)
+ Platforms.TEMPLATE_PROVISIONING_NODATA,
+)
class DataSourceIBMCloud(sources.DataSource):
- dsname = 'IBMCloud'
+ dsname = "IBMCloud"
system_uuid = None
def __init__(self, sys_cfg, distro, paths):
@@ -142,14 +141,14 @@ class DataSourceIBMCloud(sources.DataSource):
if results is None:
return False
- self.source = results['source']
- self.platform = results['platform']
- self.metadata = results['metadata']
- self.userdata_raw = results.get('userdata')
- self.network_json = results.get('networkdata')
- vd = results.get('vendordata')
+ self.source = results["source"]
+ self.platform = results["platform"]
+ self.metadata = results["metadata"]
+ self.userdata_raw = results.get("userdata")
+ self.network_json = results.get("networkdata")
+ vd = results.get("vendordata")
self.vendordata_pure = vd
- self.system_uuid = results['system-uuid']
+ self.system_uuid = results["system-uuid"]
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
@@ -160,7 +159,7 @@ class DataSourceIBMCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return '%s (%s)' % (self.platform, self.source)
+ return "%s (%s)" % (self.platform, self.source)
def check_instance_id(self, sys_cfg):
"""quickly (local check only) if self.instance_id is still valid
@@ -177,12 +176,13 @@ class DataSourceIBMCloud(sources.DataSource):
if self.platform != Platforms.OS_CODE:
# If deployed from template, an agent in the provisioning
# environment handles networking configuration. Not cloud-init.
- return {'config': 'disabled', 'version': 1}
+ return {"config": "disabled", "version": 1}
if self._network_config is None:
if self.network_json is not None:
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
else:
LOG.debug("no network configuration available.")
return self._network_config
@@ -200,22 +200,28 @@ def _is_xen():
def _is_ibm_provisioning(
- prov_cfg="/root/provisioningConfiguration.cfg",
- inst_log="/root/swinstall.log",
- boot_ref="/proc/1/environ"):
+ prov_cfg="/root/provisioningConfiguration.cfg",
+ inst_log="/root/swinstall.log",
+ boot_ref="/proc/1/environ",
+):
"""Return boolean indicating if this boot is ibm provisioning boot."""
if os.path.exists(prov_cfg):
msg = "config '%s' exists." % prov_cfg
result = True
if os.path.exists(inst_log):
if os.path.exists(boot_ref):
- result = (os.stat(inst_log).st_mtime >
- os.stat(boot_ref).st_mtime)
- msg += (" log '%s' from %s boot." %
- (inst_log, "current" if result else "previous"))
+ result = (
+ os.stat(inst_log).st_mtime > os.stat(boot_ref).st_mtime
+ )
+ msg += " log '%s' from %s boot." % (
+ inst_log,
+ "current" if result else "previous",
+ )
else:
- msg += (" log '%s' existed, but no reference file '%s'." %
- (inst_log, boot_ref))
+ msg += " log '%s' existed, but no reference file '%s'." % (
+ inst_log,
+ boot_ref,
+ )
result = False
else:
msg += " log '%s' did not exist." % inst_log
@@ -252,17 +258,26 @@ def get_ibm_platform():
if label not in (label_mdata, label_cfg2):
continue
if label in fslabels:
- LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s",
- label, fslabels[label], data)
+ LOG.warning(
+ "Duplicate fslabel '%s'. existing=%s current=%s",
+ label,
+ fslabels[label],
+ data,
+ )
continue
if label == label_cfg2 and uuid != IBM_CONFIG_UUID:
- LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s",
- dev, label, uuid, data)
+ LOG.debug(
+ "Skipping %s with LABEL=%s due to uuid != %s: %s",
+ dev,
+ label,
+ uuid,
+ data,
+ )
continue
fslabels[label] = data
- metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME')
- cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME')
+ metadata_path = fslabels.get(label_mdata, {}).get("DEVNAME")
+ cfg2_path = fslabels.get(label_cfg2, {}).get("DEVNAME")
if cfg2_path:
return (Platforms.OS_CODE, cfg2_path)
@@ -288,12 +303,14 @@ def read_md():
LOG.debug("This is not an IBMCloud platform.")
return None
elif platform in PROVISIONING:
- LOG.debug("Cloud-init is disabled during provisioning: %s.",
- platform)
+ LOG.debug("Cloud-init is disabled during provisioning: %s.", platform)
return None
- ret = {'platform': platform, 'source': path,
- 'system-uuid': _read_system_uuid()}
+ ret = {
+ "platform": platform,
+ "source": path,
+ "system-uuid": _read_system_uuid(),
+ }
try:
if os.path.isdir(path):
@@ -302,8 +319,8 @@ def read_md():
results = util.mount_cb(path, metadata_from_dir)
except sources.BrokenMetadata as e:
raise RuntimeError(
- "Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e)
+ "Failed reading IBM config disk (platform=%s path=%s): %s"
+ % (platform, path, e)
) from e
ret.update(results)
@@ -329,14 +346,14 @@ def metadata_from_dir(source_dir):
return os.path.join("openstack", "latest", fname)
def load_json_bytes(blob):
- return json.loads(blob.decode('utf-8'))
+ return json.loads(blob.decode("utf-8"))
files = [
# tuples of (results_name, path, translator)
- ('metadata_raw', opath('meta_data.json'), load_json_bytes),
- ('userdata', opath('user_data'), None),
- ('vendordata', opath('vendor_data.json'), load_json_bytes),
- ('networkdata', opath('network_data.json'), load_json_bytes),
+ ("metadata_raw", opath("meta_data.json"), load_json_bytes),
+ ("userdata", opath("user_data"), None),
+ ("vendordata", opath("vendor_data.json"), load_json_bytes),
+ ("networkdata", opath("network_data.json"), load_json_bytes),
]
results = {}
@@ -355,28 +372,33 @@ def metadata_from_dir(source_dir):
data = transl(raw)
except Exception as e:
raise sources.BrokenMetadata(
- "Failed decoding %s: %s" % (path, e))
+ "Failed decoding %s: %s" % (path, e)
+ )
results[name] = data
- if results.get('metadata_raw') is None:
+ if results.get("metadata_raw") is None:
raise sources.BrokenMetadata(
- "%s missing required file 'meta_data.json'" % source_dir)
+ "%s missing required file 'meta_data.json'" % source_dir
+ )
- results['metadata'] = {}
+ results["metadata"] = {}
- md_raw = results['metadata_raw']
- md = results['metadata']
- if 'random_seed' in md_raw:
+ md_raw = results["metadata_raw"]
+ md = results["metadata"]
+ if "random_seed" in md_raw:
try:
- md['random_seed'] = base64.b64decode(md_raw['random_seed'])
+ md["random_seed"] = base64.b64decode(md_raw["random_seed"])
except (ValueError, TypeError) as e:
raise sources.BrokenMetadata(
- "Badly formatted metadata random_seed entry: %s" % e)
+ "Badly formatted metadata random_seed entry: %s" % e
+ )
renames = (
- ('public_keys', 'public-keys'), ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'))
+ ("public_keys", "public-keys"),
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ )
for mdname, newname in renames:
if mdname in md_raw:
md[newname] = md_raw[mdname]
@@ -398,7 +420,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata')
+ parser = argparse.ArgumentParser(description="Query IBM Cloud Metadata")
args = parser.parse_args()
data = read_md()
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
index 469707d2..071ea87c 100644
--- a/cloudinit/sources/DataSourceLXD.py
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -1,4 +1,3 @@
-
"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data.
Notes:
@@ -10,8 +9,10 @@ Notes:
* TODO( Hotplug support using websockets API 1.0/events )
"""
-from json.decoder import JSONDecodeError
import os
+import socket
+import stat
+from json.decoder import JSONDecodeError
import requests
from requests.adapters import HTTPAdapter
@@ -29,9 +30,6 @@ from requests.adapters import HTTPAdapter
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3.connectionpool import HTTPConnectionPool
-import socket
-import stat
-
from cloudinit import log as logging
from cloudinit import sources, subp, util
@@ -47,7 +45,7 @@ CONFIG_KEY_ALIASES = {
"cloud-init.vendor-data": "vendor-data",
"user.user-data": "user-data",
"user.network-config": "network-config",
- "user.vendor-data": "vendor-data"
+ "user.vendor-data": "vendor-data",
}
@@ -57,18 +55,20 @@ def generate_fallback_network_config() -> dict:
"version": 1,
"config": [
{
- "type": "physical", "name": "eth0",
- "subnets": [{"type": "dhcp", "control": "auto"}]
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
}
- ]
+ ],
}
if subp.which("systemd-detect-virt"):
try:
- virt_type, _ = subp.subp(['systemd-detect-virt'])
+ virt_type, _ = subp.subp(["systemd-detect-virt"])
except subp.ProcessExecutionError as err:
LOG.warning(
"Unable to run systemd-detect-virt: %s."
- " Rendering default network config.", err
+ " Rendering default network config.",
+ err,
)
return network_v1
if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
@@ -84,7 +84,7 @@ def generate_fallback_network_config() -> dict:
class SocketHTTPConnection(HTTPConnection):
def __init__(self, socket_path):
- super().__init__('localhost')
+ super().__init__("localhost")
self.socket_path = socket_path
def connect(self):
@@ -95,7 +95,7 @@ class SocketHTTPConnection(HTTPConnection):
class SocketConnectionPool(HTTPConnectionPool):
def __init__(self, socket_path):
self.socket_path = socket_path
- super().__init__('localhost')
+ super().__init__("localhost")
def _new_conn(self):
return SocketHTTPConnection(self.socket_path)
@@ -118,16 +118,16 @@ def _maybe_remove_top_network(cfg):
if "network" not in cfg:
return cfg
network_val = cfg["network"]
- bmsg = 'Top level network key in network-config %s: %s'
+ bmsg = "Top level network key in network-config %s: %s"
if not isinstance(network_val, dict):
LOG.debug(bmsg, "was not a dict", cfg)
return cfg
if len(list(cfg.keys())) != 1:
LOG.debug(bmsg, "had multiple top level keys", cfg)
return cfg
- if network_val.get('config') == "disabled":
+ if network_val.get("config") == "disabled":
LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(('config' in network_val, 'version' in network_val)):
+ elif not all(("config" in network_val, "version" in network_val)):
LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
return cfg
LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
@@ -165,13 +165,16 @@ def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict:
class DataSourceLXD(sources.DataSource):
- dsname = 'LXD'
+ dsname = "LXD"
_network_config = sources.UNSET
_crawled_metadata = sources.UNSET
sensitive_metadata_keys = (
- 'merged_cfg', 'user.meta-data', 'user.vendor-data', 'user.user-data',
+ "merged_cfg",
+ "user.meta-data",
+ "user.vendor-data",
+ "user.user-data",
)
def _is_platform_viable(self) -> bool:
@@ -185,8 +188,10 @@ class DataSourceLXD(sources.DataSource):
return False
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=read_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=read_metadata,
+ )
self.metadata = _raw_instance_data_to_dict(
"meta-data", self._crawled_metadata.get("meta-data")
)
@@ -293,7 +298,7 @@ def read_metadata(
"Invalid HTTP response [{code}] from {route}: {resp}".format(
code=response.status_code,
route=md_route,
- resp=response.text
+ resp=response.text,
)
)
@@ -304,7 +309,7 @@ def read_metadata(
md = {
"_metadata_api_version": api_version, # Document API version read
"config": {},
- "meta-data": md["meta-data"]
+ "meta-data": md["meta-data"],
}
config_url = version_url + "config"
@@ -317,7 +322,7 @@ def read_metadata(
"Invalid HTTP response [{code}] from {route}: {resp}".format(
code=response.status_code,
route=config_url,
- resp=response.text
+ resp=response.text,
)
)
try:
@@ -326,8 +331,7 @@ def read_metadata(
raise sources.InvalidMetaDataException(
"Unable to determine cloud-init config from {route}."
" Expected JSON but found: {resp}".format(
- route=config_url,
- resp=response.text
+ route=config_url, resp=response.text
)
) from exc
@@ -354,12 +358,15 @@ def read_metadata(
else:
LOG.warning(
"Ignoring LXD config %s in favor of %s value.",
- cfg_key, cfg_key.replace("user", "cloud-init", 1)
+ cfg_key,
+ cfg_key.replace("user", "cloud-init", 1),
)
else:
LOG.debug(
"Skipping %s on [HTTP:%d]:%s",
- url, response.status_code, response.text
+ url,
+ response.status_code,
+ response.text,
)
return md
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 9156925f..d554db0d 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -11,20 +11,18 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
DS_FIELDS = [
# remote path, location in dictionary, binary data?, optional?
- ("meta-data/instance-id", 'meta-data/instance-id', False, False),
- ("meta-data/local-hostname", 'meta-data/local-hostname', False, False),
- ("meta-data/public-keys", 'meta-data/public-keys', False, True),
- ('meta-data/vendor-data', 'vendor-data', True, True),
- ('user-data', 'user-data', True, True),
+ ("meta-data/instance-id", "meta-data/instance-id", False, False),
+ ("meta-data/local-hostname", "meta-data/local-hostname", False, False),
+ ("meta-data/public-keys", "meta-data/public-keys", False, True),
+ ("meta-data/vendor-data", "vendor-data", True, True),
+ ("user-data", "user-data", True, True),
]
@@ -46,7 +44,7 @@ class DataSourceMAAS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
+ self.seed_dir = os.path.join(paths.seed_dir, "maas")
self.id_hash = get_id_from_ds_cfg(self.ds_cfg)
@property
@@ -72,7 +70,7 @@ class DataSourceMAAS(sources.DataSource):
raise
# If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
+ url = mcfg.get("metadata_url", None)
if not url:
return False
@@ -85,9 +83,14 @@ class DataSourceMAAS(sources.DataSource):
return False
self._set_data(
- url, read_maas_seed_url(
- url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1))
+ url,
+ read_maas_seed_url(
+ url,
+ read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths,
+ retries=1,
+ ),
+ )
return True
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", url)
@@ -109,7 +112,7 @@ class DataSourceMAAS(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'seed-dir (%s)' % self.base_url
+ return "seed-dir (%s)" % self.base_url
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
@@ -135,13 +138,17 @@ class DataSourceMAAS(sources.DataSource):
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url, _response = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
+ urls=urls, max_wait=max_wait, timeout=timeout
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
+ LOG.critical(
+ "Giving up on md from %s after %i seconds",
+ urls,
+ int(time.time() - starttime),
+ )
return bool(url)
@@ -154,26 +161,26 @@ class DataSourceMAAS(sources.DataSource):
if self.id_hash is None:
return False
ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {})
- return (self.id_hash == get_id_from_ds_cfg(ncfg))
+ return self.id_hash == get_id_from_ds_cfg(ncfg)
def get_oauth_helper(cfg):
"""Return an oauth helper instance for values in cfg.
- @raises ValueError from OauthUrlHelper if some required fields have
- true-ish values but others do not."""
- keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret')
+ @raises ValueError from OauthUrlHelper if some required fields have
+ true-ish values but others do not."""
+ keys = ("consumer_key", "consumer_secret", "token_key", "token_secret")
kwargs = dict([(r, cfg.get(r)) for r in keys])
return url_helper.OauthUrlHelper(**kwargs)
def get_id_from_ds_cfg(ds_cfg):
"""Given a config, generate a unique identifier for this node."""
- fields = ('consumer_key', 'token_key', 'token_secret')
- idstr = '\0'.join([ds_cfg.get(f, "") for f in fields])
+ fields = ("consumer_key", "token_key", "token_secret")
+ idstr = "\0".join([ds_cfg.get(f, "") for f in fields])
# store the encoding version as part of the hash in the event
# that it ever changed we can compute older versions.
- return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest()
+ return "v1:" + hashlib.sha256(idstr.encode("utf-8")).hexdigest()
def read_maas_seed_dir(seed_d):
@@ -186,8 +193,14 @@ def read_maas_seed_dir(seed_d):
return read_maas_seed_url("file://%s" % seed_d, version=None)
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
+def read_maas_seed_url(
+ seed_url,
+ read_file_or_url=None,
+ timeout=None,
+ version=MD_VERSION,
+ paths=None,
+ retries=None,
+):
"""
Read the maas datasource at seed_url.
read_file_or_url is a method that should provide an interface
@@ -213,16 +226,20 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
url = "%s/%s/%s" % (seed_url, version, path)
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=retries, timeout=timeout,
- ssl_details=ssl_details)
+ resp = read_file_or_url(
+ url, retries=retries, timeout=timeout, ssl_details=ssl_details
+ )
if resp.ok():
if binary:
md[path] = resp.contents
else:
md[path] = util.decode_binary(resp.contents)
else:
- LOG.warning(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
+ LOG.warning(
+ "Fetching from %s resulted in an invalid http code %s",
+ url,
+ resp.code,
+ )
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
@@ -236,8 +253,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
def check_seed_contents(content, seed):
"""Validate if dictionary content valid as a return for a datasource.
- Either return a (userdata, metadata, vendordata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
+ Either return a (userdata, metadata, vendordata) tuple or
+ Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
ret = {}
missing = []
@@ -262,14 +279,15 @@ def check_seed_contents(content, seed):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
vd_data = None
- if ret.get('vendor-data'):
+ if ret.get("vendor-data"):
err = object()
- vd_data = util.load_yaml(ret.get('vendor-data'), default=err,
- allowed=(object))
+ vd_data = util.load_yaml(
+ ret.get("vendor-data"), default=err, allowed=(object)
+ )
if vd_data is err:
raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.")
- return ret.get('user-data'), ret.get('meta-data'), vd_data
+ return ret.get("user-data"), ret.get("meta-data"), vd_data
class MAASSeedDirNone(Exception):
@@ -292,6 +310,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
+
def main():
"""
Call with single argument of directory or http or https url.
@@ -302,36 +321,66 @@ if __name__ == "__main__":
import pprint
import sys
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
+ parser = argparse.ArgumentParser(description="Interact with MAAS DS")
+ parser.add_argument(
+ "--config",
+ metavar="file",
+ help="specify DS config file",
+ default=None,
+ )
+ parser.add_argument(
+ "--ckey",
+ metavar="key",
+ help="the consumer key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--tkey",
+ metavar="key",
+ help="the token key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--csec",
+ metavar="secret",
+ help="the consumer secret (likely '')",
+ default="",
+ )
+ parser.add_argument(
+ "--tsec",
+ metavar="secret",
+ help="the token secret to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--apiver",
+ metavar="version",
+ help="the apiver to use ( can be used)",
+ default=MD_VERSION,
+ )
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- for (name, help) in (('crawl', 'crawl the datasource'),
- ('get', 'do a single GET of provided url'),
- ('check-seed', 'read and verify seed at url')):
+ for (name, help) in (
+ ("crawl", "crawl the datasource"),
+ ("get", "do a single GET of provided url"),
+ ("check-seed", "read and verify seed at url"),
+ ):
p = subcmds.add_parser(name, help=help)
- p.add_argument("url", help="the datasource url", nargs='?',
- default=None)
+ p.add_argument(
+ "url", help="the datasource url", nargs="?", default=None
+ )
args = parser.parse_args()
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ creds = {
+ "consumer_key": args.ckey,
+ "token_key": args.tkey,
+ "token_secret": args.tsec,
+ "consumer_secret": args.csec,
+ }
if args.config is None:
- for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'):
+ for fname in ("91_kernel_cmdline_url", "90_dpkg_maas"):
fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg"
if os.path.exists(fpath) and os.access(fpath, os.R_OK):
sys.stderr.write("Used config in %s.\n" % fpath)
@@ -339,13 +388,13 @@ if __name__ == "__main__":
if args.config:
cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
+ if "datasource" in cfg:
+ cfg = cfg["datasource"]["MAAS"]
for key in creds.keys():
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
- if args.url is None and 'metadata_url' in cfg:
- args.url = cfg['metadata_url']
+ if args.url is None and "metadata_url" in cfg:
+ args.url = cfg["metadata_url"]
if args.url is None:
sys.stderr.write("Must provide a url or a config with url.\n")
@@ -380,8 +429,11 @@ if __name__ == "__main__":
(userdata, metadata, vd) = read_maas_seed_dir(args.url)
else:
(userdata, metadata, vd) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
+ args.url,
+ version=args.apiver,
+ read_file_or_url=readurl,
+ retries=2,
+ )
print("=== user-data ===")
print("N/A" if userdata is None else userdata.decode())
print("=== meta-data ===")
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 2d9e86b4..56559630 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -13,9 +13,8 @@ import os
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import sources, util
from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -27,8 +26,10 @@ class DataSourceNoCloud(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dirs = [
+ os.path.join(paths.seed_dir, "nocloud"),
+ os.path.join(paths.seed_dir, "nocloud-net"),
+ ]
self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
@@ -55,17 +56,21 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': None}
+ mydata = {
+ "meta-data": {},
+ "user-data": "",
+ "vendor-data": "",
+ "network-config": None,
+ }
try:
# Parse the system serial label from dmi. If not empty, try parsing
# like the commandline
md = {}
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial and load_cmdline_data(md, serial):
found.append("dmi")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse dmi data")
return False
@@ -75,14 +80,16 @@ class DataSourceNoCloud(sources.DataSource):
md = {}
if load_cmdline_data(md):
found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
+ pp2d_kwargs = {
+ "required": ["user-data", "meta-data"],
+ "optional": ["vendor-data", "network-config"],
+ }
for path in self.seed_dirs:
try:
@@ -97,31 +104,35 @@ class DataSourceNoCloud(sources.DataSource):
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
- if self.ds_cfg.get('seedfrom'):
+ if self.ds_cfg.get("seedfrom"):
found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
+ mydata["meta-data"]["seedfrom"] = self.ds_cfg["seedfrom"]
# fields appropriately named can also just come from the datasource
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
+ if "user-data" in self.ds_cfg and "meta-data" in self.ds_cfg:
mydata = _merge_new_seed(mydata, self.ds_cfg)
found.append("ds_config")
def _pp2d_callback(mp, data):
return util.pathprefix2dict(mp, **data)
- label = self.ds_cfg.get('fs_label', "cidata")
+ label = self.ds_cfg.get("fs_label", "cidata")
if label is not None:
for dev in self._get_devices(label):
try:
LOG.debug("Attempting to use data from %s", dev)
try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
+ seeded = util.mount_cb(
+ dev, _pp2d_callback, pp2d_kwargs
+ )
except ValueError:
- LOG.warning("device %s with label=%s not a "
- "valid seed.", dev, label)
+ LOG.warning(
+ "device %s with label=%s not a valid seed.",
+ dev,
+ label,
+ )
continue
mydata = _merge_new_seed(mydata, seeded)
@@ -133,8 +144,9 @@ class DataSourceNoCloud(sources.DataSource):
if e.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for data", dev
+ )
# There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
@@ -145,8 +157,8 @@ class DataSourceNoCloud(sources.DataSource):
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
+ if "seedfrom" in mydata["meta-data"]:
+ seedfrom = mydata["meta-data"]["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
@@ -162,39 +174,43 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- mydata['vendor-data'] = vd
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], md_seed]
+ )
+ mydata["user-data"] = ud
+ mydata["vendor-data"] = vd
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], defaults]
+ )
self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
+ [mydata["meta-data"].get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
+ self.metadata = mydata["meta-data"]
+ self.userdata_raw = mydata["user-data"]
+ self.vendordata_raw = mydata["vendor-data"]
+ self._network_config = mydata["network-config"]
+ self._network_eni = mydata["meta-data"].get("network-interfaces")
return True
@property
def platform_type(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = None
if not self._platform_type:
- self._platform_type = 'lxd' if util.is_lxd() else 'nocloud'
+ self._platform_type = "lxd" if util.is_lxd() else "nocloud"
return self._platform_type
def _get_cloud_name(self):
@@ -203,11 +219,11 @@ class DataSourceNoCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
@@ -218,7 +234,7 @@ class DataSourceNoCloud(sources.DataSource):
# LP: #1568150 need getattr in the case that an old class object
# has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
+ dirs = getattr(self, "seed_dirs", [self.seed_dir])
quick_id = _quick_read_instance_id(dirs=dirs)
if not quick_id:
return None
@@ -236,7 +252,7 @@ def _quick_read_instance_id(dirs=None):
if dirs is None:
dirs = []
- iid_key = 'instance-id'
+ iid_key = "instance-id"
fill = {}
if load_cmdline_data(fill) and iid_key in fill:
return fill[iid_key]
@@ -245,8 +261,8 @@ def _quick_read_instance_id(dirs=None):
if d is None:
continue
try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
+ data = util.pathprefix2dict(d, required=["meta-data"])
+ md = util.load_yaml(data["meta-data"])
if md and iid_key in md:
return md[iid_key]
except ValueError:
@@ -256,14 +272,16 @@ def _quick_read_instance_id(dirs=None):
def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
+ pairs = [
+ ("ds=nocloud", sources.DSMODE_LOCAL),
+ ("ds=nocloud-net", sources.DSMODE_NETWORK),
+ ]
for idstr, dsmode in pairs:
if parse_cmdline_data(idstr, fill, cmdline):
# if dsmode was explicitly in the command line, then
# prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
+ if "dsmode" not in fill:
+ fill["dsmode"] = dsmode
return True
return False
@@ -323,19 +341,19 @@ def _maybe_remove_top_network(cfg):
Return the original value if no change or the updated value if changed."""
nullval = object()
- network_val = cfg.get('network', nullval)
+ network_val = cfg.get("network", nullval)
if network_val is nullval:
return cfg
- bmsg = 'Top level network key in network-config %s: %s'
+ bmsg = "Top level network key in network-config %s: %s"
if not isinstance(network_val, dict):
LOG.debug(bmsg, "was not a dict", cfg)
return cfg
if len(list(cfg.keys())) != 1:
LOG.debug(bmsg, "had multiple top level keys", cfg)
return cfg
- if network_val.get('config') == "disabled":
+ if network_val.get("config") == "disabled":
LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(('config' in network_val, 'version' in network_val)):
+ elif not all(("config" in network_val, "version" in network_val)):
LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
return cfg
LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
@@ -345,19 +363,20 @@ def _maybe_remove_top_network(cfg):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+ newmd = seeded.get("meta-data", {})
+ if not isinstance(seeded["meta-data"], dict):
+ newmd = util.load_yaml(seeded["meta-data"])
+ ret["meta-data"] = util.mergemanydict([cur["meta-data"], newmd])
- if seeded.get('network-config'):
- ret['network-config'] = _maybe_remove_top_network(
- util.load_yaml(seeded.get('network-config')))
+ if seeded.get("network-config"):
+ ret["network-config"] = _maybe_remove_top_network(
+ util.load_yaml(seeded.get("network-config"))
+ )
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
+ if "user-data" in seeded:
+ ret["user-data"] = seeded["user-data"]
+ if "vendor-data" in seeded:
+ ret["vendor-data"] = seeded["vendor-data"]
return ret
@@ -369,7 +388,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM,)),
(DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -378,4 +397,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index b7656ac5..036d00b2 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -14,23 +14,23 @@ class DataSourceNone(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
- self.userdata_raw = ''
+ self.userdata_raw = ""
def _get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
+ if "userdata_raw" in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg["userdata_raw"]
+ if "metadata" in self.ds_cfg:
+ self.metadata = self.ds_cfg["metadata"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'config'
+ return "config"
def get_instance_id(self):
- return 'iid-datasource-none'
+ return "iid-datasource-none"
@property
def is_disconnected(self):
@@ -48,4 +48,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 5257a534..0df39824 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,32 +16,32 @@ from xml.dom import minidom
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import safeyaml
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_custom_script \
- import PreCustomScript, PostCustomScript
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.config_passwd \
- import PasswordConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum as GuestCustEvent
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
+from cloudinit import safeyaml, sources, subp, util
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ PostCustomScript,
+ PreCustomScript,
+)
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_passwd import (
+ PasswordConfigurator,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_error import (
+ GuestCustErrorEnum,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
+ GuestCustEventEnum as GuestCustEvent,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
+ GuestCustStateEnum,
+)
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
- set_customization_status,
get_tools_config,
- set_gc_status
+ set_customization_status,
+ set_gc_status,
)
LOG = logging.getLogger(__name__)
@@ -58,7 +58,7 @@ class DataSourceOVF(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf")
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
@@ -96,36 +96,43 @@ class DataSourceOVF(sources.DataSource):
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
+ elif system_type and "vmware" in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
allow_vmware_cust = False
allow_raw_data = False
if not self.vmware_customization_supported:
- LOG.debug("Skipping the check for "
- "VMware Customization support")
+ LOG.debug(
+ "Skipping the check for VMware Customization support"
+ )
else:
allow_vmware_cust = not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True)
+ self.sys_cfg, "disable_vmware_customization", True
+ )
allow_raw_data = util.get_cfg_option_bool(
- self.ds_cfg, "allow_raw_data", True)
+ self.ds_cfg, "allow_raw_data", True
+ )
if not (allow_vmware_cust or allow_raw_data):
- LOG.debug(
- "Customization for VMware platform is disabled.")
+ LOG.debug("Customization for VMware platform is disabled.")
else:
search_paths = (
- "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
- "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools",
+ "/usr/lib/vmware-tools",
+ "/usr/lib64/vmware-tools",
+ "/usr/lib/open-vm-tools",
+ "/usr/lib64/open-vm-tools",
"/usr/lib/x86_64-linux-gnu/open-vm-tools",
- "/usr/lib/aarch64-linux-gnu/open-vm-tools")
+ "/usr/lib/aarch64-linux-gnu/open-vm-tools",
+ )
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
for path in search_paths:
deployPkgPluginPath = search_file(path, plugin)
if deployPkgPluginPath:
- LOG.debug("Found the customization plugin at %s",
- deployPkgPluginPath)
+ LOG.debug(
+ "Found the customization plugin at %s",
+ deployPkgPluginPath,
+ )
break
if deployPkgPluginPath:
@@ -140,7 +147,8 @@ class DataSourceOVF(sources.DataSource):
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("cust.cfg", max_wait))
+ args=("cust.cfg", max_wait),
+ )
else:
LOG.debug("Did not find the customization plugin.")
@@ -149,30 +157,34 @@ class DataSourceOVF(sources.DataSource):
imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
cf = ConfigFile(vmwareImcConfigFilePath)
self._vmware_cust_conf = Config(cf)
- LOG.debug("Found VMware Customization Config File at %s",
- vmwareImcConfigFilePath)
+ LOG.debug(
+ "Found VMware Customization Config File at %s",
+ vmwareImcConfigFilePath,
+ )
try:
(md_path, ud_path, nicspath) = collect_imc_file_paths(
- self._vmware_cust_conf)
+ self._vmware_cust_conf
+ )
except FileNotFoundError as e:
_raise_error_status(
"File(s) missing in directory",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
# Don't handle the customization for below 2 cases:
# 1. meta data is found, allow_raw_data is False.
# 2. no meta data is found, allow_vmware_cust is False.
if md_path and not allow_raw_data:
- LOG.debug(
- "Customization using raw data is disabled.")
+ LOG.debug("Customization using raw data is disabled.")
# reset vmwareImcConfigFilePath to None to avoid
# customization for VMware platform
vmwareImcConfigFilePath = None
if md_path is None and not allow_vmware_cust:
LOG.debug(
- "Customization using VMware config is disabled.")
+ "Customization using VMware config is disabled."
+ )
vmwareImcConfigFilePath = None
else:
LOG.debug("Did not find VMware Customization Config File")
@@ -197,22 +209,25 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
except Exception as e:
_raise_error_status(
"Error loading cloud-init configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
self._vmware_cust_found = True
- found.append('vmware-tools')
+ found.append("vmware-tools")
util.del_dir(imcdirpath)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
set_gc_status(self._vmware_cust_conf, "Successful")
elif vmwareImcConfigFilePath:
@@ -225,7 +240,8 @@ class DataSourceOVF(sources.DataSource):
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
product_marker = self._vmware_cust_conf.marker_id
hasmarkerfile = check_marker_exists(
- product_marker, os.path.join(self.paths.cloud_dir, 'data'))
+ product_marker, os.path.join(self.paths.cloud_dir, "data")
+ )
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
@@ -243,7 +259,8 @@ class DataSourceOVF(sources.DataSource):
custScriptConfig = get_tools_config(
CONFGROUPNAME_GUESTCUSTOMIZATION,
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- defVal)
+ defVal,
+ )
if custScriptConfig.lower() != "true":
# Update the customization status if custom script
# is disabled
@@ -251,19 +268,21 @@ class DataSourceOVF(sources.DataSource):
LOG.debug(msg)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
+ )
raise RuntimeError(msg)
ccScriptsDir = os.path.join(
- self.paths.get_cpath("scripts"),
- "per-instance")
+ self.paths.get_cpath("scripts"), "per-instance"
+ )
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
if customscript:
@@ -276,22 +295,22 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
try:
LOG.debug("Preparing the Network configuration")
self._network_config = get_network_config_from_conf(
- self._vmware_cust_conf,
- True,
- True,
- self.distro.osfamily)
+ self._vmware_cust_conf, True, True, self.distro.osfamily
+ )
except Exception as e:
_raise_error_status(
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
LOG.debug("Applying password customization")
@@ -300,8 +319,9 @@ class DataSourceOVF(sources.DataSource):
try:
resetpwd = self._vmware_cust_conf.reset_password
if adminpwd or resetpwd:
- pwdConfigurator.configure(adminpwd, resetpwd,
- self.distro)
+ pwdConfigurator.configure(
+ adminpwd, resetpwd, self.distro
+ )
else:
LOG.debug("Changing password is not needed")
except Exception as e:
@@ -310,13 +330,14 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if customscript:
try:
- postcust = PostCustomScript(customscript,
- imcdirpath,
- ccScriptsDir)
+ postcust = PostCustomScript(
+ customscript, imcdirpath, ccScriptsDir
+ )
postcust.execute()
except Exception as e:
_raise_error_status(
@@ -324,23 +345,26 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if product_marker:
try:
setup_marker_files(
product_marker,
- os.path.join(self.paths.cloud_dir, 'data'))
+ os.path.join(self.paths.cloud_dir, "data"),
+ )
except Exception as e:
_raise_error_status(
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
self._vmware_cust_found = True
- found.append('vmware-tools')
+ found.append("vmware-tools")
# TODO: Need to set the status to DONE only when the
# customization is done successfully.
@@ -348,12 +372,15 @@ class DataSourceOVF(sources.DataSource):
enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
set_gc_status(self._vmware_cust_conf, "Successful")
else:
- np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
- ('iso', transport_iso9660)]
+ np = [
+ ("com.vmware.guestInfo", transport_vmware_guestinfo),
+ ("iso", transport_iso9660),
+ ]
name = None
for name, transfunc in np:
contents = transfunc()
@@ -362,24 +389,23 @@ class DataSourceOVF(sources.DataSource):
if contents:
(md, ud, cfg) = read_ovf_environment(contents, True)
self.environment = contents
- if 'network-config' in md and md['network-config']:
- self._network_config = md['network-config']
+ if "network-config" in md and md["network-config"]:
+ self._network_config = md["network-config"]
found.append(name)
# There was no OVF transports found
if len(found) == 0:
return False
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
+ if "seedfrom" in md and md["seedfrom"]:
+ seedfrom = md["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
seedfound = proto
break
if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
+ LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
(md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None)
@@ -400,14 +426,14 @@ class DataSourceOVF(sources.DataSource):
def _get_subplatform(self):
system_type = dmi.read_dmi_data("system-product-name").lower()
- if system_type == 'vmware':
- return 'vmware (%s)' % self.seed
- return 'ovf (%s)' % self.seed
+ if system_type == "vmware":
+ return "vmware (%s)" % self.seed
+ return "ovf (%s)" % self.seed
def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
+ if "public-keys" not in self.metadata:
return []
- pks = self.metadata['public-keys']
+ pks = self.metadata["public-keys"]
if isinstance(pks, (list)):
return pks
else:
@@ -427,14 +453,14 @@ class DataSourceOVF(sources.DataSource):
class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf-net")
self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
def get_max_wait_from_cfg(cfg):
default_max_wait = 15
- max_wait_cfg_option = 'vmware_cust_file_max_wait'
+ max_wait_cfg_option = "vmware_cust_file_max_wait"
max_wait = default_max_wait
if not cfg:
@@ -443,19 +469,27 @@ def get_max_wait_from_cfg(cfg):
try:
max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
except ValueError:
- LOG.warning("Failed to get '%s', using %s",
- max_wait_cfg_option, default_max_wait)
+ LOG.warning(
+ "Failed to get '%s', using %s",
+ max_wait_cfg_option,
+ default_max_wait,
+ )
if max_wait < 0:
- LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
- max_wait, max_wait_cfg_option, default_max_wait)
+ LOG.warning(
+ "Invalid value '%s' for '%s', using '%s' instead",
+ max_wait,
+ max_wait_cfg_option,
+ default_max_wait,
+ )
max_wait = default_max_wait
return max_wait
-def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
- dirpath="/var/run/vmware-imc"):
+def wait_for_imc_cfg_file(
+ filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"
+):
waited = 0
if maxwait <= naplen:
naplen = 1
@@ -470,24 +504,26 @@ def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
return None
-def get_network_config_from_conf(config, use_system_devices=True,
- configure=False, osfamily=None):
+def get_network_config_from_conf(
+ config, use_system_devices=True, configure=False, osfamily=None
+):
nicConfigurator = NicConfigurator(config.nics, use_system_devices)
nics_cfg_list = nicConfigurator.generate(configure, osfamily)
- return get_network_config(nics_cfg_list,
- config.name_servers,
- config.dns_suffixes)
+ return get_network_config(
+ nics_cfg_list, config.name_servers, config.dns_suffixes
+ )
def get_network_config(nics=None, nameservers=None, search=None):
config_list = nics
if nameservers or search:
- config_list.append({'type': 'nameserver', 'address': nameservers,
- 'search': search})
+ config_list.append(
+ {"type": "nameserver", "address": nameservers, "search": search}
+ )
- return {'version': 1, 'config': config_list}
+ return {"version": 1, "config": config_list}
# This will return a dict with some content
@@ -498,14 +534,14 @@ def read_vmware_imc(config):
ud = None
if config.host_name:
if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
+ md["local-hostname"] = config.host_name + "." + config.domain_name
else:
- md['local-hostname'] = config.host_name
+ md["local-hostname"] = config.host_name
if config.timezone:
- cfg['timezone'] = config.timezone
+ cfg["timezone"] = config.timezone
- md['instance-id'] = "iid-vmware-imc"
+ md["instance-id"] = "iid-vmware-imc"
return (md, ud, cfg)
@@ -516,11 +552,11 @@ def read_ovf_environment(contents, read_network=False):
md = {}
cfg = {}
ud = None
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- network_props = ['network-config']
+ cfg_props = ["password"]
+ md_props = ["seedfrom", "local-hostname", "public-keys", "instance-id"]
+ network_props = ["network-config"]
for (prop, val) in props.items():
- if prop == 'hostname':
+ if prop == "hostname":
prop = "local-hostname"
if prop in md_props:
md[prop] = val
@@ -529,7 +565,7 @@ def read_ovf_environment(contents, read_network=False):
elif prop in network_props and read_network:
try:
network_config = base64.b64decode(val.encode())
- md[prop] = safeload_yaml_or_dict(network_config).get('network')
+ md[prop] = safeload_yaml_or_dict(network_config).get("network")
except Exception:
LOG.debug("Ignore network-config in wrong format")
elif prop == "user-data":
@@ -601,12 +637,12 @@ def transport_iso9660(require_iso=True):
# Go through mounts to see if it was already mounted
mounts = util.mounts()
for (dev, info) in mounts.items():
- fstype = info['fstype']
+ fstype = info["fstype"]
if fstype != "iso9660" and require_iso:
continue
if not maybe_cdrom_device(dev):
continue
- mp = info['mountpoint']
+ mp = info["mountpoint"]
(_fname, contents) = get_ovf_env(mp)
if contents is not False:
return contents
@@ -617,9 +653,11 @@ def transport_iso9660(require_iso=True):
mtype = None
# generate a list of devices with mtype filesystem, filter by regex
- devs = [dev for dev in
- util.find_devs_with("TYPE=%s" % mtype if mtype else None)
- if maybe_cdrom_device(dev)]
+ devs = [
+ dev
+ for dev in util.find_devs_with("TYPE=%s" % mtype if mtype else None)
+ if maybe_cdrom_device(dev)
+ ]
for dev in devs:
try:
(_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
@@ -674,15 +712,17 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ propSections = find_child(
+ dom.documentElement, lambda n: n.localName == "PropertySection"
+ )
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ propElems = find_child(
+ propSections[0], (lambda n: n.localName == "Property")
+ )
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -709,7 +749,7 @@ class XmlError(Exception):
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM,)),
(DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
@@ -753,7 +793,7 @@ def setup_marker_files(markerid, marker_dir):
for fname in os.listdir(marker_dir):
if fname.startswith(".markerfile"):
util.del_file(os.path.join(marker_dir, fname))
- open(markerfile, 'w').close()
+ open(markerfile, "w").close()
def _raise_error_status(prefix, error, event, config_file, conf):
@@ -761,10 +801,8 @@ def _raise_error_status(prefix, error, event, config_file, conf):
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
"""
- LOG.debug('%s: %s', prefix, error)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- event)
+ LOG.debug("%s: %s", prefix, error)
+ set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event)
set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
@@ -780,16 +818,15 @@ def load_cloudinit_data(md_path, ud_path):
@raises: FileNotFoundError if md_path or ud_path are absent
"""
- LOG.debug('load meta data from: %s: user data from: %s',
- md_path, ud_path)
+ LOG.debug("load meta data from: %s: user data from: %s", md_path, ud_path)
md = {}
ud = None
network = None
md = safeload_yaml_or_dict(util.load_file(md_path))
- if 'network' in md:
- network = md['network']
+ if "network" in md:
+ network = md["network"]
if ud_path:
ud = util.load_file(ud_path).replace("\r", "")
@@ -797,18 +834,18 @@ def load_cloudinit_data(md_path, ud_path):
def safeload_yaml_or_dict(data):
- '''
+ """
The meta data could be JSON or YAML. Since YAML is a strict superset of
JSON, we will unmarshal the data as YAML. If data is None then a new
dictionary is returned.
- '''
+ """
if not data:
return {}
return safeyaml.load(data)
def collect_imc_file_paths(cust_conf):
- '''
+ """
collect all the other imc files.
metadata is preferred to nics.txt configuration data.
@@ -822,7 +859,7 @@ def collect_imc_file_paths(cust_conf):
2. user provided metadata (md_path, None, None)
3. user-provided network config (None, None, nics_path)
4. No config found (None, None, None)
- '''
+ """
md_path = None
ud_path = None
nics_path = None
@@ -830,19 +867,21 @@ def collect_imc_file_paths(cust_conf):
if md_file:
md_path = os.path.join(VMWARE_IMC_DIR, md_file)
if not os.path.exists(md_path):
- raise FileNotFoundError("meta data file is not found: %s"
- % md_path)
+ raise FileNotFoundError(
+ "meta data file is not found: %s" % md_path
+ )
ud_file = cust_conf.user_data_name
if ud_file:
ud_path = os.path.join(VMWARE_IMC_DIR, ud_file)
if not os.path.exists(ud_path):
- raise FileNotFoundError("user data file is not found: %s"
- % ud_path)
+ raise FileNotFoundError(
+ "user data file is not found: %s" % ud_path
+ )
else:
nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt")
if not os.path.exists(nics_path):
- LOG.debug('%s does not exist.', nics_path)
+ LOG.debug("%s does not exist.", nics_path)
nics_path = None
return md_path, ud_path, nics_path
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 21603fbd..9734d1a8 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -20,16 +20,12 @@ import re
import string
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import net, sources, subp, util
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
+DEFAULT_PARSEUSER = "nobody"
CONTEXT_DISK_FILES = ["context.sh"]
@@ -40,7 +36,7 @@ class DataSourceOpenNebula(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+ self.seed_dir = os.path.join(paths.seed_dir, "opennebula")
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -53,8 +49,8 @@ class DataSourceOpenNebula(sources.DataSource):
# decide parseuser for context.sh shell reader
parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
+ if "parseuser" in self.ds_cfg:
+ parseuser = self.ds_cfg.get("parseuser")
candidates = [self.seed_dir]
candidates.extend(find_candidate_devs())
@@ -90,29 +86,30 @@ class DataSourceOpenNebula(sources.DataSource):
return False
# merge fetched metadata with datasource defaults
- md = results['metadata']
+ md = results["metadata"]
md = util.mergemanydict([md, defaults])
# check for valid user specified dsmode
self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
+ [results.get("DSMODE"), self.ds_cfg.get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
self.seed = seed
- self.network = results.get('network-interfaces')
+ self.network = results.get("network-interfaces")
self.metadata = md
- self.userdata_raw = results.get('userdata')
+ self.userdata_raw = results.get("userdata")
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
if self.seed_dir in self.seed:
- subplatform_type = 'seed-dir'
+ subplatform_type = "seed-dir"
else:
- subplatform_type = 'config-disk'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "config-disk"
+ return "%s (%s)" % (subplatform_type, self.seed)
@property
def network_config(self):
@@ -144,19 +141,25 @@ class OpenNebulaNetwork(object):
if system_nics_by_mac is None:
system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
- [k for k in sorted(system_nics_by_mac.items(),
- key=lambda k: net.natural_sort_key(k[1]))])
+ [
+ k
+ for k in sorted(
+ system_nics_by_mac.items(),
+ key=lambda k: net.natural_sort_key(k[1]),
+ )
+ ]
+ )
# OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC.
# context_devname provides {mac.lower():ETHX, mac2.lower():ETHX}
self.context_devname = {}
for k, v in context.items():
- m = re.match(r'^(.+)_MAC$', k)
+ m = re.match(r"^(.+)_MAC$", k)
if m:
self.context_devname[v.lower()] = m.group(1)
def mac2ip(self, mac):
- return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]])
+ return ".".join([str(int(c, 16)) for c in mac.split(":")[2:]])
def mac2network(self, mac):
return self.mac2ip(mac).rpartition(".")[0] + ".0"
@@ -164,12 +167,12 @@ class OpenNebulaNetwork(object):
def get_nameservers(self, dev):
nameservers = {}
dns = self.get_field(dev, "dns", "").split()
- dns.extend(self.context.get('DNS', "").split())
+ dns.extend(self.context.get("DNS", "").split())
if dns:
- nameservers['addresses'] = dns
+ nameservers["addresses"] = dns
search_domain = self.get_field(dev, "search_domain", "").split()
if search_domain:
- nameservers['search'] = search_domain
+ nameservers["search"] = search_domain
return nameservers
def get_mtu(self, dev):
@@ -198,8 +201,9 @@ class OpenNebulaNetwork(object):
# OpenNebula 6.1.80 introduced new context parameter ETHx_IP6_GATEWAY
# to replace old ETHx_GATEWAY6. Old ETHx_GATEWAY6 will be removed in
# OpenNebula 6.4.0 (https://github.com/OpenNebula/one/issues/5536).
- return self.get_field(dev, "ip6_gateway",
- self.get_field(dev, "gateway6"))
+ return self.get_field(
+ dev, "ip6_gateway", self.get_field(dev, "gateway6")
+ )
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
@@ -212,14 +216,21 @@ class OpenNebulaNetwork(object):
context stores <dev>_<NAME> (example: eth0_DOMAIN).
an empty string for value will return default."""
- val = self.context.get('_'.join((dev, name,)).upper())
+ val = self.context.get(
+ "_".join(
+ (
+ dev,
+ name,
+ )
+ ).upper()
+ )
# allow empty string to return the default.
return default if val in (None, "") else val
def gen_conf(self):
netconf = {}
- netconf['version'] = 2
- netconf['ethernets'] = {}
+ netconf["version"] = 2
+ netconf["ethernets"] = {}
ethernets = {}
for mac, dev in self.ifaces.items():
@@ -232,46 +243,46 @@ class OpenNebulaNetwork(object):
devconf = {}
# Set MAC address
- devconf['match'] = {'macaddress': mac}
+ devconf["match"] = {"macaddress": mac}
# Set IPv4 address
- devconf['addresses'] = []
+ devconf["addresses"] = []
mask = self.get_mask(c_dev)
prefix = str(net.mask_to_net_prefix(mask))
- devconf['addresses'].append(
- self.get_ip(c_dev, mac) + '/' + prefix)
+ devconf["addresses"].append(self.get_ip(c_dev, mac) + "/" + prefix)
# Set IPv6 Global and ULA address
addresses6 = self.get_ip6(c_dev)
if addresses6:
prefix6 = self.get_ip6_prefix(c_dev)
- devconf['addresses'].extend(
- [i + '/' + prefix6 for i in addresses6])
+ devconf["addresses"].extend(
+ [i + "/" + prefix6 for i in addresses6]
+ )
# Set IPv4 default gateway
gateway = self.get_gateway(c_dev)
if gateway:
- devconf['gateway4'] = gateway
+ devconf["gateway4"] = gateway
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
if gateway6:
- devconf['gateway6'] = gateway6
+ devconf["gateway6"] = gateway6
# Set DNS servers and search domains
nameservers = self.get_nameservers(c_dev)
if nameservers:
- devconf['nameservers'] = nameservers
+ devconf["nameservers"] = nameservers
# Set MTU size
mtu = self.get_mtu(c_dev)
if mtu:
- devconf['mtu'] = mtu
+ devconf["mtu"] = mtu
ethernets[dev] = devconf
- netconf['ethernets'] = ethernets
- return(netconf)
+ netconf["ethernets"] = ethernets
+ return netconf
def find_candidate_devs():
@@ -279,7 +290,7 @@ def find_candidate_devs():
Return a list of devices that may contain the context disk.
"""
combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
+ for f in ("LABEL=CONTEXT", "LABEL=CDROM", "TYPE=iso9660"):
devs = util.find_devs_with(f)
devs.sort()
for d in devs:
@@ -290,16 +301,17 @@ def find_candidate_devs():
def switch_user_cmd(user):
- return ['sudo', '-u', user]
+ return ["sudo", "-u", user]
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
+def parse_shell_config(
+ content, keylist=None, bash=None, asuser=None, switch_user_cb=None
+):
if isinstance(bash, str):
bash = [bash]
elif bash is None:
- bash = ['bash', '-e']
+ bash = ["bash", "-e"]
if switch_user_cb is None:
switch_user_cb = switch_user_cmd
@@ -313,17 +325,24 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
keylist = allvars
keylist_in = []
- setup = '\n'.join(('__v="";', '',))
+ setup = "\n".join(
+ (
+ '__v="";',
+ "",
+ )
+ )
def varprinter(vlist):
# output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
+ return "\n".join(
+ (
+ 'printf "%s\\0" _start_',
+ "for __v in %s; do" % " ".join(vlist),
+ ' printf "%s=%s\\0" "$__v" "${!__v}";',
+ "done",
+ "",
+ )
+ )
# the rendered 'bcmd' is bash syntax that does
# setup: declare variables we use (so they show up in 'all')
@@ -336,12 +355,15 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# key=value (for each preset variable)
# literal '_start_'
# key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
+ bcmd = (
+ "unset IFS\n"
+ + setup
+ + varprinter(allvars)
+ + "{\n%s\n\n:\n} > /dev/null\n" % content
+ + "unset IFS\n"
+ + varprinter(keylist)
+ + "\n"
+ )
cmd = []
if asuser is not None:
@@ -353,8 +375,14 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# exclude vars in bash that change on their own or that we used
excluded = (
- "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_",
- "SRANDOM", "__v",
+ "EPOCHREALTIME",
+ "EPOCHSECONDS",
+ "RANDOM",
+ "LINENO",
+ "SECONDS",
+ "_",
+ "SRANDOM",
+ "__v",
)
preset = {}
ret = {}
@@ -368,8 +396,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(key, val) = line.split("=", 1)
if target is preset:
preset[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
+ elif key not in excluded and (
+ key in keylist_in or preset.get(key) != val
+ ):
ret[key] = val
except ValueError:
if line != "_start_":
@@ -398,7 +427,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
context = {}
- results = {'userdata': None, 'metadata': {}}
+ results = {"userdata": None, "metadata": {}}
if "context.sh" in found:
if asuser is not None:
@@ -407,10 +436,11 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser)
+ user=asuser
+ )
) from e
try:
- path = os.path.join(source_dir, 'context.sh')
+ path = os.path.join(source_dir, "context.sh")
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
except subp.ProcessExecutionError as e:
@@ -427,7 +457,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if not context:
return results
- results['metadata'] = context
+ results["metadata"] = context
# process single or multiple SSH keys
ssh_key_var = None
@@ -438,40 +468,41 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [
+ results["metadata"]["public-keys"] = [
line for line in lines if len(line) and not line.startswith("#")
]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
- for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in ("SET_HOSTNAME", "HOSTNAME", "PUBLIC_IP", "IP_PUBLIC", "ETH0_IP"):
if k in context:
- results['metadata']['local-hostname'] = context[k]
+ results["metadata"]["local-hostname"] = context[k]
break
# raw user data
if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
+ results["userdata"] = context["USER_DATA"]
elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
+ results["userdata"] = context["USERDATA"]
# b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
+ if "userdata" in results:
+ encoding = context.get(
+ "USERDATA_ENCODING", context.get("USER_DATA_ENCODING")
+ )
if encoding == "base64":
try:
- results['userdata'] = util.b64d(results['userdata'])
+ results["userdata"] = util.b64d(results["userdata"])
except TypeError:
LOG.warning("Failed base64 decoding of userdata")
# generate Network Configuration v2
# only if there are any required context variables
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
- ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
+ ipaddr_keys = [k for k in context if re.match(r"^ETH\d+_IP.*$", k)]
if ipaddr_keys:
onet = OpenNebulaNetwork(context, distro)
- results['network-interfaces'] = onet.gen_conf()
+ results["network-interfaces"] = onet.gen_conf()
return results
@@ -488,7 +519,7 @@ DataSourceOpenNebulaNet = DataSourceOpenNebula
# Used to match classes to dependencies
datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM,)),
]
@@ -496,4 +527,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index a85b71d7..6878528d 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -8,13 +8,11 @@ import time
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
@@ -26,13 +24,13 @@ DEFAULT_METADATA = {
}
# OpenStack DMI constants
-DMI_PRODUCT_NOVA = 'OpenStack Nova'
-DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
+DMI_PRODUCT_NOVA = "OpenStack Nova"
+DMI_PRODUCT_COMPUTE = "OpenStack Compute"
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
-DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
+DMI_ASSET_TAG_OPENTELEKOM = "OpenTelekomCloud"
# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
# -> compute.defaults.vmware.smbios_asset_tag for this value
-DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+DMI_ASSET_TAG_SAPCCLOUD = "SAP CCloud VM"
VALID_DMI_ASSET_TAGS = VALID_DMI_PRODUCT_NAMES
VALID_DMI_ASSET_TAGS += [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
@@ -46,12 +44,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
@@ -71,8 +71,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(urls) - set(filtered))),
+ )
if len(filtered):
urls = filtered
else:
@@ -82,20 +84,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls = []
url2base = {}
for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
+ md_url = url_helper.combine_url(url, "openstack")
md_urls.append(md_url)
url2base[md_url] = url
url_params = self.get_url_params()
start_time = time.time()
avail_url, _response = url_helper.wait_for_url(
- urls=md_urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds)
+ urls=md_urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ )
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
+ LOG.debug(
+ "Giving up on OpenStack md from %s after %s seconds",
+ md_urls,
+ int(time.time() - start_time),
+ )
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
@@ -113,18 +120,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
# network_config by default unless configured in /etc/cloud/cloud.cfg*.
# Patch Xenial and Artful before release to default to False.
- if util.is_false(self.ds_cfg.get('apply_network_config', True)):
+ if util.is_false(self.ds_cfg.get("apply_network_config", True)):
self._network_config = None
return self._network_config
if self.network_json == sources.UNSET:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- 'Unexpected call to network_config when network_json is None.')
+ "Unexpected call to network_config when network_json is None."
+ )
return None
- LOG.debug('network config provided via network_json')
+ LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
return self._network_config
def _get_data(self):
@@ -134,7 +143,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
if not detect_openstack(accept_oracle=not oracle_considered):
return False
@@ -142,8 +151,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
results = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
util.logexc(LOG, str(e))
return False
@@ -154,19 +165,19 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, str(e))
return False
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
+ self.dsmode = self._determine_dsmode([results.get("dsmode")])
if self.dsmode == sources.DSMODE_DISABLED:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.network_json = results.get('networkdata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.network_json = results.get("networkdata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -174,7 +185,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
- vd2 = results.get('vendordata2')
+ vd2 = results.get("vendordata2")
self.vendordata2_pure = vd2
try:
self.vendordata2_raw = sources.convert_vendordata(vd2)
@@ -194,26 +205,35 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
if not self.wait_for_metadata_service():
raise sources.InvalidMetaDataException(
- 'No active metadata service found')
+ "No active metadata service found"
+ )
except IOError as e:
raise sources.InvalidMetaDataException(
- 'IOError contacting metadata service: {error}'.format(
- error=str(e)))
+ "IOError contacting metadata service: {error}".format(
+ error=str(e)
+ )
+ )
url_params = self.get_url_params()
try:
result = util.log_time(
- LOG.debug, 'Crawl of openstack metadata service',
- read_metadata_service, args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': url_params.num_retries,
- 'timeout': url_params.timeout_seconds})
+ LOG.debug,
+ "Crawl of openstack metadata service",
+ read_metadata_service,
+ args=[self.metadata_address],
+ kwargs={
+ "ssl_details": self.ssl_details,
+ "retries": url_params.num_retries,
+ "timeout": url_params.timeout_seconds,
+ },
+ )
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
except (openstack.BrokenMetadata, IOError) as e:
- msg = 'Broken metadata address {addr}'.format(
- addr=self.metadata_address)
+ msg = "Broken metadata address {addr}".format(
+ addr=self.metadata_address
+ )
raise sources.InvalidMetaDataException(msg) from e
return result
@@ -230,10 +250,10 @@ class DataSourceOpenStackLocal(DataSourceOpenStack):
perform_dhcp_setup = True # Get metadata network config if present
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
+def read_metadata_service(base_url, ssl_details=None, timeout=5, retries=5):
+ reader = openstack.MetadataReader(
+ base_url, ssl_details=ssl_details, timeout=timeout, retries=retries
+ )
return reader.read_v2()
@@ -241,14 +261,14 @@ def detect_openstack(accept_oracle=False):
"""Return True when a potential OpenStack platform is detected."""
if not util.is_x86():
return True # Non-Intel cpus don't properly report dmi product names
- product_name = dmi.read_dmi_data('system-product-name')
+ product_name = dmi.read_dmi_data("system-product-name")
if product_name in VALID_DMI_PRODUCT_NAMES:
return True
- elif dmi.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
+ elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS:
return True
elif accept_oracle and oracle._is_platform_viable():
return True
- elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
+ elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
return True
return False
@@ -264,4 +284,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index fbb5312a..3f918672 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
# Don't use IMDS to configure secondary NICs by default
- 'configure_secondary_nics': False,
+ "configure_secondary_nics": False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
@@ -61,43 +61,45 @@ def _ensure_netfailover_safe(network_config):
"""
# ignore anything that's not an actual network-config
- if 'version' not in network_config:
+ if "version" not in network_config:
return
- if network_config['version'] not in [1, 2]:
- LOG.debug('Ignoring unknown network config version: %s',
- network_config['version'])
+ if network_config["version"] not in [1, 2]:
+ LOG.debug(
+ "Ignoring unknown network config version: %s",
+ network_config["version"],
+ )
return
mac_to_name = get_interfaces_by_mac()
- if network_config['version'] == 1:
- for cfg in [c for c in network_config['config'] if 'type' in c]:
- if cfg['type'] == 'physical':
- if 'mac_address' in cfg:
- mac = cfg['mac_address']
+ if network_config["version"] == 1:
+ for cfg in [c for c in network_config["config"] if "type" in c]:
+ if cfg["type"] == "physical":
+ if "mac_address" in cfg:
+ mac = cfg["mac_address"]
cur_name = mac_to_name.get(mac)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['mac_address']
+ del cfg["mac_address"]
- elif network_config['version'] == 2:
- for _, cfg in network_config.get('ethernets', {}).items():
- if 'match' in cfg:
- macaddr = cfg.get('match', {}).get('macaddress')
+ elif network_config["version"] == 2:
+ for _, cfg in network_config.get("ethernets", {}).items():
+ if "match" in cfg:
+ macaddr = cfg.get("match", {}).get("macaddress")
if macaddr:
cur_name = mac_to_name.get(macaddr)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['match']['macaddress']
- del cfg['set-name']
- cfg['match']['name'] = cur_name
+ del cfg["match"]["macaddress"]
+ del cfg["set-name"]
+ cfg["match"]["name"] = cur_name
class DataSourceOracle(sources.DataSource):
- dsname = 'Oracle'
+ dsname = "Oracle"
system_uuid = None
vendordata_pure = None
network_config_sources = (
@@ -113,9 +115,12 @@ class DataSourceOracle(sources.DataSource):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
self._vnics_data = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", self.dsname], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
def _is_platform_viable(self):
"""Check platform environment to report if this datasource may run."""
@@ -130,8 +135,8 @@ class DataSourceOracle(sources.DataSource):
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
fetch_vnics_data = self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
)
network_context = noop()
if not _is_iscsi_root():
@@ -140,7 +145,7 @@ class DataSourceOracle(sources.DataSource):
connectivity_url_data={
"url": METADATA_PATTERN.format(version=2, path="instance"),
"headers": V2_HEADERS,
- }
+ },
)
with network_context:
fetched_metadata = read_opc_metadata(
@@ -179,7 +184,7 @@ class DataSourceOracle(sources.DataSource):
return sources.instance_id_matches_system_uuid(self.system_uuid)
def get_public_ssh_keys(self):
- return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+ return sources.normalize_pubkey_data(self.metadata.get("public_keys"))
@property
def network_config(self):
@@ -196,8 +201,8 @@ class DataSourceOracle(sources.DataSource):
self._network_config = self.distro.generate_fallback_config()
if self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
):
try:
# Mutate self._network_config to include secondary
@@ -205,8 +210,8 @@ class DataSourceOracle(sources.DataSource):
self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
- LOG,
- "Failed to parse secondary network configuration!")
+ LOG, "Failed to parse secondary network configuration!"
+ )
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -230,11 +235,10 @@ class DataSourceOracle(sources.DataSource):
(if the IMDS returns valid JSON with unexpected contents).
"""
if self._vnics_data is None:
- LOG.warning(
- "Secondary NIC data is UNSET but should not be")
+ LOG.warning("Secondary NIC data is UNSET but should not be")
return
- if 'nicIndex' in self._vnics_data[0]:
+ if "nicIndex" in self._vnics_data[0]:
# TODO: Once configure_secondary_nics defaults to True, lower the
# level of this log message. (Currently, if we're running this
# code at all, someone has explicitly opted-in to secondary
@@ -243,8 +247,8 @@ class DataSourceOracle(sources.DataSource):
# Metal Machine launch, which means INFO or DEBUG would be more
# appropriate.)
LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; '
- 'skipping secondary VNIC configuration.'
+ "VNIC metadata indicates this is a bare metal machine; "
+ "skipping secondary VNIC configuration."
)
return
@@ -254,39 +258,45 @@ class DataSourceOracle(sources.DataSource):
# We skip the first entry in the response because the primary
# interface is already configured by iSCSI boot; applying
# configuration from the IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
+ mac_address = vnic_dict["macAddr"].lower()
if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping',
- mac_address)
+ LOG.debug(
+ "Interface with MAC %s not found; skipping", mac_address
+ )
continue
name = interfaces_by_mac[mac_address]
- if self._network_config['version'] == 1:
+ if self._network_config["version"] == 1:
subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
+ "type": "static",
+ "address": vnic_dict["privateIp"],
+ }
+ self._network_config["config"].append(
+ {
+ "name": name,
+ "type": "physical",
+ "mac_address": mac_address,
+ "mtu": MTU,
+ "subnets": [subnet],
+ }
+ )
+ elif self._network_config["version"] == 2:
+ self._network_config["ethernets"][name] = {
+ "addresses": [vnic_dict["privateIp"]],
+ "mtu": MTU,
+ "dhcp4": False,
+ "dhcp6": False,
+ "match": {"macaddress": mac_address},
}
- self._network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif self._network_config['version'] == 2:
- self._network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
def _read_system_uuid():
- sys_uuid = dmi.read_dmi_data('system-uuid')
+ sys_uuid = dmi.read_dmi_data("system-uuid")
return None if sys_uuid is None else sys_uuid.lower()
def _is_platform_viable():
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
return asset_tag == CHASSIS_ASSET_TAG
@@ -329,8 +339,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
try:
vnics_data = _fetch(metadata_version, path="vnics")
except UrlError:
- util.logexc(LOG,
- "Failed to fetch secondary network configuration!")
+ util.logexc(
+ LOG, "Failed to fetch secondary network configuration!"
+ )
return OpcMetadata(metadata_version, instance_data, vnics_data)
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index bb69e998..14ac77e4 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -14,32 +14,34 @@ import os
import os.path
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import sources, subp, util
from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
-ETC_HOSTS = '/etc/hosts'
+ETC_HOSTS = "/etc/hosts"
def get_manage_etc_hosts():
hosts = util.load_file(ETC_HOSTS, quiet=True)
if hosts:
- LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False')
+ LOG.debug("/etc/hosts exists - setting manage_etc_hosts to False")
return False
- LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True')
+ LOG.debug("/etc/hosts does not exists - setting manage_etc_hosts to True")
return True
def ip2int(addr):
- parts = addr.split('.')
- return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
- (int(parts[2]) << 8) + int(parts[3])
+ parts = addr.split(".")
+ return (
+ (int(parts[0]) << 24)
+ + (int(parts[1]) << 16)
+ + (int(parts[2]) << 8)
+ + int(parts[3])
+ )
def int2ip(addr):
- return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+ return ".".join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
def _sub_arp(cmd):
@@ -48,33 +50,35 @@ def _sub_arp(cmd):
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return subp.subp(['arping'] + cmd)
+ return subp.subp(["arping"] + cmd)
def gratuitous_arp(items, distro):
- source_param = '-S'
- if distro.name in ['fedora', 'centos', 'rhel']:
- source_param = '-s'
+ source_param = "-S"
+ if distro.name in ["fedora", "centos", "rhel"]:
+ source_param = "-s"
for item in items:
try:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ _sub_arp(
+ ["-c", "2", source_param, item["source"], item["destination"]]
+ )
except subp.ProcessExecutionError as error:
# warning, because the system is able to function properly
# despite no success - some ARP table may be waiting for
# expiration, but the system may continue
- LOG.warning('Failed to arping from "%s" to "%s": %s',
- item['source'], item['destination'], error)
+ LOG.warning(
+ 'Failed to arping from "%s" to "%s": %s',
+ item["source"],
+ item["destination"],
+ error,
+ )
def get_md():
"""Returns False (not found or error) or a dictionary with metadata."""
devices = set(
- util.find_devs_with('LABEL=CLOUDMD') +
- util.find_devs_with('LABEL=cloudmd')
+ util.find_devs_with("LABEL=CLOUDMD")
+ + util.find_devs_with("LABEL=cloudmd")
)
if not devices:
return False
@@ -83,7 +87,7 @@ def get_md():
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat', 'msdosfs']
+ mtype=["vfat", "fat", "msdosfs"],
)
if rbx_data:
return rbx_data
@@ -91,11 +95,13 @@ def get_md():
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", device)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for user data", device
+ )
- LOG.debug("Did not find RbxCloud data, searched devices: %s",
- ",".join(devices))
+ LOG.debug(
+ "Did not find RbxCloud data, searched devices: %s", ",".join(devices)
+ )
return False
@@ -107,25 +113,28 @@ def generate_network_config(netadps):
@returns: A dict containing network config
"""
return {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'eth{}'.format(str(i)),
- 'mac_address': netadp['macaddress'].lower(),
- 'subnets': [
+ "type": "physical",
+ "name": "eth{}".format(str(i)),
+ "mac_address": netadp["macaddress"].lower(),
+ "subnets": [
{
- 'type': 'static',
- 'address': ip['address'],
- 'netmask': netadp['network']['netmask'],
- 'control': 'auto',
- 'gateway': netadp['network']['gateway'],
- 'dns_nameservers': netadp['network']['dns'][
- 'nameservers']
- } for ip in netadp['ip']
+ "type": "static",
+ "address": ip["address"],
+ "netmask": netadp["network"]["netmask"],
+ "control": "auto",
+ "gateway": netadp["network"]["gateway"],
+ "dns_nameservers": netadp["network"]["dns"][
+ "nameservers"
+ ],
+ }
+ for ip in netadp["ip"]
],
- } for i, netadp in enumerate(netadps)
- ]
+ }
+ for i, netadp in enumerate(netadps)
+ ],
}
@@ -140,65 +149,60 @@ def read_user_data_callback(mount_dir):
"""
meta_data = util.load_json(
text=util.load_file(
- fname=os.path.join(mount_dir, 'cloud.json'),
- decode=False
+ fname=os.path.join(mount_dir, "cloud.json"), decode=False
)
)
user_data = util.load_file(
- fname=os.path.join(mount_dir, 'user.data'),
- quiet=True
+ fname=os.path.join(mount_dir, "user.data"), quiet=True
)
- if 'vm' not in meta_data or 'netadp' not in meta_data:
+ if "vm" not in meta_data or "netadp" not in meta_data:
util.logexc(LOG, "Failed to load metadata. Invalid format.")
return None
- username = meta_data.get('additionalMetadata', {}).get('username')
- ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', [])
+ username = meta_data.get("additionalMetadata", {}).get("username")
+ ssh_keys = meta_data.get("additionalMetadata", {}).get("sshKeys", [])
hash = None
- if meta_data.get('additionalMetadata', {}).get('password'):
- hash = meta_data['additionalMetadata']['password']['sha512']
+ if meta_data.get("additionalMetadata", {}).get("password"):
+ hash = meta_data["additionalMetadata"]["password"]["sha512"]
- network = generate_network_config(meta_data['netadp'])
+ network = generate_network_config(meta_data["netadp"])
data = {
- 'userdata': user_data,
- 'metadata': {
- 'instance-id': meta_data['vm']['_id'],
- 'local-hostname': meta_data['vm']['name'],
- 'public-keys': []
+ "userdata": user_data,
+ "metadata": {
+ "instance-id": meta_data["vm"]["_id"],
+ "local-hostname": meta_data["vm"]["name"],
+ "public-keys": [],
},
- 'gratuitous_arp': [
- {
- "source": ip["address"],
- "destination": target
- }
- for netadp in meta_data['netadp']
- for ip in netadp['ip']
+ "gratuitous_arp": [
+ {"source": ip["address"], "destination": target}
+ for netadp in meta_data["netadp"]
+ for ip in netadp["ip"]
for target in [
- netadp['network']["gateway"],
- int2ip(ip2int(netadp['network']["gateway"]) + 2),
- int2ip(ip2int(netadp['network']["gateway"]) + 3)
+ netadp["network"]["gateway"],
+ int2ip(ip2int(netadp["network"]["gateway"]) + 2),
+ int2ip(ip2int(netadp["network"]["gateway"]) + 3),
]
],
- 'cfg': {
- 'ssh_pwauth': True,
- 'disable_root': True,
- 'system_info': {
- 'default_user': {
- 'name': username,
- 'gecos': username,
- 'sudo': ['ALL=(ALL) NOPASSWD:ALL'],
- 'passwd': hash,
- 'lock_passwd': False,
- 'ssh_authorized_keys': ssh_keys,
+ "cfg": {
+ "ssh_pwauth": True,
+ "disable_root": True,
+ "system_info": {
+ "default_user": {
+ "name": username,
+ "gecos": username,
+ "sudo": ["ALL=(ALL) NOPASSWD:ALL"],
+ "passwd": hash,
+ "lock_passwd": False,
+ "ssh_authorized_keys": ssh_keys,
}
},
- 'network_config': network,
- 'manage_etc_hosts': get_manage_etc_hosts(),
+ "network_config": network,
+ "manage_etc_hosts": get_manage_etc_hosts(),
},
}
- LOG.debug('returning DATA object:')
+ LOG.debug("returning DATA object:")
LOG.debug(data)
return data
@@ -206,11 +210,13 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
dsname = "RbxCloud"
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY
- }}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -228,18 +234,18 @@ class DataSourceRbxCloud(sources.DataSource):
rbx_data = get_md()
if rbx_data is False:
return False
- self.userdata_raw = rbx_data['userdata']
- self.metadata = rbx_data['metadata']
- self.gratuitous_arp = rbx_data['gratuitous_arp']
- self.cfg = rbx_data['cfg']
+ self.userdata_raw = rbx_data["userdata"]
+ self.metadata = rbx_data["metadata"]
+ self.gratuitous_arp = rbx_data["gratuitous_arp"]
+ self.cfg = rbx_data["cfg"]
return True
@property
def network_config(self):
- return self.cfg['network_config']
+ return self.cfg["network_config"]
def get_public_ssh_keys(self):
- return self.metadata['public-keys']
+ return self.metadata["public-keys"]
def get_userdata_raw(self):
return self.userdata_raw
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 7b8974a2..8e5dd82c 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -27,21 +27,18 @@ from requests.packages.urllib3.poolmanager import PoolManager
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import net
+from cloudinit import net, sources, url_helper, util
from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
-DS_BASE_URL = 'http://169.254.42.42'
+DS_BASE_URL = "http://169.254.42.42"
BUILTIN_DS_CONFIG = {
- 'metadata_url': DS_BASE_URL + '/conf?format=json',
- 'userdata_url': DS_BASE_URL + '/user_data/cloud-init',
- 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init'
+ "metadata_url": DS_BASE_URL + "/conf?format=json",
+ "userdata_url": DS_BASE_URL + "/user_data/cloud-init",
+ "vendordata_url": DS_BASE_URL + "/vendor_data/cloud-init",
}
DEF_MD_RETRIES = 5
@@ -57,15 +54,15 @@ def on_scaleway():
* the initrd created the file /var/run/scaleway.
* "scaleway" is in the kernel cmdline.
"""
- vendor_name = dmi.read_dmi_data('system-manufacturer')
- if vendor_name == 'Scaleway':
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name == "Scaleway":
return True
- if os.path.exists('/var/run/scaleway'):
+ if os.path.exists("/var/run/scaleway"):
return True
cmdline = util.get_cmdline()
- if 'scaleway' in cmdline:
+ if "scaleway" in cmdline:
return True
return False
@@ -75,6 +72,7 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
"""
Adapter for requests to choose the local address to bind to.
"""
+
def __init__(self, source_address, **kwargs):
self.source_address = source_address
super(SourceAddressAdapter, self).__init__(**kwargs)
@@ -83,11 +81,13 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
socket_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
]
- self.poolmanager = PoolManager(num_pools=connections,
- maxsize=maxsize,
- block=block,
- source_address=self.source_address,
- socket_options=socket_options)
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ source_address=self.source_address,
+ socket_options=socket_options,
+ )
def query_data_api_once(api_address, timeout, requests_session):
@@ -117,9 +117,10 @@ def query_data_api_once(api_address, timeout, requests_session):
session=requests_session,
# If the error is a HTTP/404 or a ConnectionError, go into raise
# block below and don't bother retrying.
- exception_cb=lambda _, exc: exc.code != 404 and (
+ exception_cb=lambda _, exc: exc.code != 404
+ and (
not isinstance(exc.cause, requests.exceptions.ConnectionError)
- )
+ ),
)
return util.decode_binary(resp.contents)
except url_helper.UrlError as exc:
@@ -143,25 +144,22 @@ def query_data_api(api_type, api_address, retries, timeout):
for port in range(1, max(retries, 2)):
try:
LOG.debug(
- 'Trying to get %s data (bind on port %d)...',
- api_type, port
+ "Trying to get %s data (bind on port %d)...", api_type, port
)
requests_session = requests.Session()
requests_session.mount(
- 'http://',
- SourceAddressAdapter(source_address=('0.0.0.0', port))
+ "http://",
+ SourceAddressAdapter(source_address=("0.0.0.0", port)),
)
data = query_data_api_once(
- api_address,
- timeout=timeout,
- requests_session=requests_session
+ api_address, timeout=timeout, requests_session=requests_session
)
- LOG.debug('%s-data downloaded', api_type)
+ LOG.debug("%s-data downloaded", api_type)
return data
except url_helper.UrlError as exc:
# Local port already in use or HTTP/429.
- LOG.warning('Error while trying to get %s data: %s', api_type, exc)
+ LOG.warning("Error while trying to get %s data: %s", api_type, exc)
time.sleep(5)
last_exc = exc
continue
@@ -176,40 +174,40 @@ class DataSourceScaleway(sources.DataSource):
EventScope.NETWORK: {
EventType.BOOT_NEW_INSTANCE,
EventType.BOOT,
- EventType.BOOT_LEGACY
+ EventType.BOOT_LEGACY,
}
}
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
- BUILTIN_DS_CONFIG
- ])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.vendordata_address = self.ds_cfg['vendordata_url']
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.vendordata_address = self.ds_cfg["vendordata_url"]
- self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
- self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self.retries = int(self.ds_cfg.get("retries", DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get("timeout", DEF_MD_TIMEOUT))
self._fallback_interface = None
self._network_config = sources.UNSET
def _crawl_metadata(self):
- resp = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- retries=self.retries)
+ resp = url_helper.readurl(
+ self.metadata_address, timeout=self.timeout, retries=self.retries
+ )
self.metadata = json.loads(util.decode_binary(resp.contents))
self.userdata_raw = query_data_api(
- 'user-data', self.userdata_address,
- self.retries, self.timeout
+ "user-data", self.userdata_address, self.retries, self.timeout
)
self.vendordata_raw = query_data_api(
- 'vendor-data', self.vendordata_address,
- self.retries, self.timeout
+ "vendor-data", self.vendordata_address, self.retries, self.timeout
)
def _get_data(self):
@@ -221,8 +219,10 @@ class DataSourceScaleway(sources.DataSource):
try:
with EphemeralDHCPv4(self._fallback_interface):
util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError) as e:
util.logexc(LOG, str(e))
return False
@@ -235,8 +235,10 @@ class DataSourceScaleway(sources.DataSource):
metadata API.
"""
if self._network_config is None:
- LOG.warning('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
+ LOG.warning(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
self._network_config = sources.UNSET
if self._network_config != sources.UNSET:
@@ -245,16 +247,19 @@ class DataSourceScaleway(sources.DataSource):
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
- netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
- subnets = [{'type': 'dhcp4'}]
- if self.metadata['ipv6']:
- subnets += [{'type': 'static',
- 'address': '%s' % self.metadata['ipv6']['address'],
- 'gateway': '%s' % self.metadata['ipv6']['gateway'],
- 'netmask': '%s' % self.metadata['ipv6']['netmask'],
- }]
- netcfg['subnets'] = subnets
- self._network_config = {'version': 1, 'config': [netcfg]}
+ netcfg = {"type": "physical", "name": "%s" % self._fallback_interface}
+ subnets = [{"type": "dhcp4"}]
+ if self.metadata["ipv6"]:
+ subnets += [
+ {
+ "type": "static",
+ "address": "%s" % self.metadata["ipv6"]["address"],
+ "gateway": "%s" % self.metadata["ipv6"]["gateway"],
+ "netmask": "%s" % self.metadata["ipv6"]["netmask"],
+ }
+ ]
+ netcfg["subnets"] = subnets
+ self._network_config = {"version": 1, "config": [netcfg]}
return self._network_config
@property
@@ -262,14 +267,14 @@ class DataSourceScaleway(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['id']
+ return self.metadata["id"]
def get_public_ssh_keys(self):
- ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']]
+ ssh_keys = [key["key"] for key in self.metadata["ssh_public_keys"]]
akeypre = "AUTHORIZED_KEY="
plen = len(akeypre)
- for tag in self.metadata.get('tags', []):
+ for tag in self.metadata.get("tags", []):
if not tag.startswith(akeypre):
continue
ssh_keys.append(tag[:plen].replace("_", " "))
@@ -277,7 +282,7 @@ class DataSourceScaleway(sources.DataSource):
return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata['hostname']
+ return self.metadata["hostname"]
@property
def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 9b16bf8d..40f915fa 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -32,55 +32,51 @@ import socket
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import serial, sources, subp, util
from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
- 'hostname': ('sdc:hostname', True),
- 'dns_domain': ('sdc:dns_domain', True),
+ "instance-id": ("sdc:uuid", True),
+ "local-hostname": ("hostname", True),
+ "public-keys": ("root_authorized_keys", True),
+ "user-script": ("user-script", False),
+ "legacy-user-data": ("user-data", False),
+ "user-data": ("cloud-init:user-data", False),
+ "iptables_disable": ("iptables_disable", True),
+ "motd_sys_info": ("motd_sys_info", True),
+ "availability_zone": ("sdc:datacenter_name", True),
+ "vendor-data": ("sdc:vendor-data", False),
+ "operator-script": ("sdc:operator-script", False),
+ "hostname": ("sdc:hostname", True),
+ "dns_domain": ("sdc:dns_domain", True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
- 'dns_servers': 'sdc:resolvers',
- 'routes': 'sdc:routes',
+ "network-data": "sdc:nics",
+ "dns_servers": "sdc:resolvers",
+ "routes": "sdc:routes",
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
SMARTOS_ENV_KVM = "kvm"
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
+DS_NAME = "SmartOS"
+DS_CFG_PATH = ["datasource", DS_NAME]
NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
+ "iptables_disable",
+ "motd_sys_info",
+ "root_authorized_keys",
+ "sdc:datacenter_name",
+ "sdc:uuiduser-data",
+ "user-script",
]
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
+METADATA_SOCKFILE = "/native/.zonecontrol/metadata.sock"
+SERIAL_DEVICE = "/dev/ttyS1"
SERIAL_TIMEOUT = 60
# BUILT-IN DATASOURCE CONFIGURATION
@@ -98,24 +94,26 @@ SERIAL_TIMEOUT = 60
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+ "serial_device": SERIAL_DEVICE,
+ "serial_timeout": SERIAL_TIMEOUT,
+ "metadata_sockfile": METADATA_SOCKFILE,
+ "no_base64_decode": NO_BASE64_DECODE,
+ "base64_keys": [],
+ "base64_all": False,
+ "disk_aliases": {"ephemeral0": "/dev/vdb"},
}
BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "mbr",
+ "layout": False,
+ "overwrite": False,
+ }
},
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext4',
- 'device': 'ephemeral0'}],
+ "fs_setup": [
+ {"label": "ephemeral0", "filesystem": "ext4", "device": "ephemeral0"}
+ ],
}
# builtin vendor-data is a boothook that writes a script into
@@ -170,18 +168,23 @@ class DataSourceSmartOS(sources.DataSource):
smartos_type = sources.UNSET
md_client = sources.UNSET
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY
- }}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
self.metadata = {}
self.network_data = None
@@ -204,25 +207,28 @@ class DataSourceSmartOS(sources.DataSource):
if self.md_client == sources.UNSET:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
+ metadata_sockfile=self.ds_cfg["metadata_sockfile"],
+ serial_device=self.ds_cfg["serial_device"],
+ serial_timeout=self.ds_cfg["serial_timeout"],
+ )
def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
+ """Mark the instance provisioning state as successful.
When run in a zone, the host OS will look for /var/svc/provisioning
to be renamed as /var/svc/provision_success. This should be done
after meta-data is successfully retrieved and from this point
the host considers the provision of the zone to be a success and
keeps the zone running.
- '''
+ """
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
+ LOG.debug("Instance provisioning state set as successful")
+ svc_path = "/var/svc"
+ if os.path.exists("/".join([svc_path, "provisioning"])):
+ os.rename(
+ "/".join([svc_path, "provisioning"]),
+ "/".join([svc_path, "provision_success"]),
+ )
def _get_data(self):
self._init()
@@ -235,8 +241,10 @@ class DataSourceSmartOS(sources.DataSource):
return False
if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
+ LOG.debug(
+ "No metadata device '%r' found for SmartOS datasource",
+ self.md_client,
+ )
return False
# Open once for many requests, rather than once for each request
@@ -259,24 +267,33 @@ class DataSourceSmartOS(sources.DataSource):
# We write 'user-script' and 'operator-script' into the
# instance/data directory. The default vendor-data then handles
# executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
+ data_d = os.path.join(
+ self.paths.get_cpath(), "instances", md["instance-id"], "data"
+ )
+ user_script = os.path.join(data_d, "user-script")
u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
+ write_boot_content(
+ md.get("user-script"),
+ content_f=user_script,
+ link=u_script_l,
+ shebang=True,
+ mode=0o700,
+ )
+
+ operator_script = os.path.join(data_d, "operator-script")
+ write_boot_content(
+ md.get("operator-script"),
+ content_f=operator_script,
+ shebang=False,
+ mode=0o700,
+ )
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
+ u_data = md.get("legacy-user-data")
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
@@ -284,38 +301,39 @@ class DataSourceSmartOS(sources.DataSource):
# The hostname may or may not be qualified with the local domain name.
# This follows section 3.14 of RFC 2132.
- if not md['local-hostname']:
- if md['hostname']:
- md['local-hostname'] = md['hostname']
+ if not md["local-hostname"]:
+ if md["hostname"]:
+ md["local-hostname"] = md["hostname"]
else:
- md['local-hostname'] = md['instance-id']
+ md["local-hostname"] = md["instance-id"]
ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
+ if md["user-data"]:
+ ud = md["user-data"]
+
+ if not md["vendor-data"]:
+ md["vendor-data"] = BUILTIN_VENDOR_DATA % {
+ "user_script": user_script,
+ "operator_script": operator_script,
+ "per_boot_d": os.path.join(
+ self.paths.get_cpath("scripts"), "per-boot"
+ ),
}
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
- self.routes_data = md['routes']
+ self.vendordata_raw = md["vendor-data"]
+ self.network_data = md["network-data"]
+ self.routes_data = md["routes"]
self._set_provisioned()
return True
def _get_subplatform(self):
- return 'serial (%s)' % SERIAL_DEVICE
+ return "serial (%s)" % SERIAL_DEVICE
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
def get_config_obj(self):
if self.smartos_type == SMARTOS_ENV_KVM:
@@ -323,7 +341,7 @@ class DataSourceSmartOS(sources.DataSource):
return {}
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def network_config(self):
@@ -333,12 +351,12 @@ class DataSourceSmartOS(sources.DataSource):
if self._network_config is None:
if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(
- network_data=self.network_data,
- dns_servers=self.metadata['dns_servers'],
- dns_domain=self.metadata['dns_domain'],
- routes=self.routes_data))
+ self._network_config = convert_smartos_network_data(
+ network_data=self.network_data,
+ dns_servers=self.metadata["dns_servers"],
+ dns_domain=self.metadata["dns_domain"],
+ routes=self.routes_data,
+ )
return self._network_config
@@ -357,10 +375,12 @@ class JoyentMetadataClient(object):
The full specification can be found at
http://eng.joyent.com/mdata/protocol.html
"""
+
line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
+ r"V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)"
+ r" (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)"
+ r"( (?P<payload>.+))?)"
+ )
def __init__(self, smartos_type=None, fp=None):
if smartos_type is None:
@@ -369,43 +389,50 @@ class JoyentMetadataClient(object):
self.fp = fp
def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+ return "{0:08x}".format(
+ binascii.crc32(body.encode("utf-8")) & 0xFFFFFFFF
+ )
def _get_value_from_frame(self, expected_request_id, frame):
frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
+ if int(frame_data["length"]) != len(frame_data["body"]):
raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
+ "Incorrect frame length given ({0} != {1}).".format(
+ frame_data["length"], len(frame_data["body"])
+ )
+ )
+ expected_checksum = self._checksum(frame_data["body"])
+ if frame_data["checksum"] != expected_checksum:
raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
+ "Invalid checksum (expected: {0}; got {1}).".format(
+ expected_checksum, frame_data["checksum"]
+ )
+ )
+ if frame_data["request_id"] != expected_request_id:
raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
+ "Request ID mismatch (expected: {0}; got {1}).".format(
+ expected_request_id, frame_data["request_id"]
+ )
+ )
+ if not frame_data.get("payload", None):
+ LOG.debug("No value found.")
return None
- value = util.b64d(frame_data['payload'])
+ value = util.b64d(frame_data["payload"])
LOG.debug('Value "%s" found.', value)
return value
def _readline(self):
"""
- Reads a line a byte at a time until \n is encountered. Returns an
- ascii string with the trailing newline removed.
+ Reads a line a byte at a time until \n is encountered. Returns an
+ ascii string with the trailing newline removed.
- If a timeout (per-byte) is set and it expires, a
- JoyentMetadataFetchException will be thrown.
+ If a timeout (per-byte) is set and it expires, a
+ JoyentMetadataFetchException will be thrown.
"""
response = []
def as_ascii():
- return b''.join(response).decode('ascii')
+ return b"".join(response).decode("ascii")
msg = "Partial response: '%s'"
while True:
@@ -413,7 +440,7 @@ class JoyentMetadataClient(object):
byte = self.fp.read(1)
if len(byte) == 0:
raise JoyentMetadataTimeoutException(msg % as_ascii())
- if byte == b'\n':
+ if byte == b"\n":
return as_ascii()
response.append(byte)
except OSError as exc:
@@ -424,26 +451,33 @@ class JoyentMetadataClient(object):
raise
def _write(self, msg):
- self.fp.write(msg.encode('ascii'))
+ self.fp.write(msg.encode("ascii"))
self.fp.flush()
def _negotiate(self):
- LOG.debug('Negotiating protocol V2')
- self._write('NEGOTIATE V2\n')
+ LOG.debug("Negotiating protocol V2")
+ self._write("NEGOTIATE V2\n")
response = self._readline()
LOG.debug('read "%s"', response)
- if response != 'V2_OK':
+ if response != "V2_OK":
raise JoyentMetadataFetchException(
- 'Invalid response "%s" to "NEGOTIATE V2"' % response)
- LOG.debug('Negotiation complete')
+ 'Invalid response "%s" to "NEGOTIATE V2"' % response
+ )
+ LOG.debug("Negotiation complete")
def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
+ request_id = "{0:08x}".format(random.randint(0, 0xFFFFFFFF))
+ message_body = " ".join(
+ (
+ request_id,
+ rtype,
+ )
+ )
if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
+ message_body += " " + base64.b64encode(param.encode()).decode()
+ msg = "V2 {0} {1} {2}\n".format(
+ len(message_body), self._checksum(message_body), message_body
+ )
LOG.debug('Writing "%s" to metadata transport.', msg)
need_close = False
@@ -458,14 +492,14 @@ class JoyentMetadataClient(object):
LOG.debug('Read "%s" from metadata transport.', response)
- if 'SUCCESS' not in response:
+ if "SUCCESS" not in response:
return None
value = self._get_value_from_frame(request_id, response)
return value
def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
+ result = self.request(rtype="GET", param=key)
if result is None:
return default
if result and strip:
@@ -479,18 +513,19 @@ class JoyentMetadataClient(object):
return json.loads(result)
def list(self):
- result = self.request(rtype='KEYS')
+ result = self.request(rtype="KEYS")
if not result:
return []
- return result.split('\n')
+ return result.split("\n")
def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
+ param = b" ".join(
+ [base64.b64encode(i.encode()) for i in (key, val)]
+ ).decode()
+ return self.request(rtype="PUT", param=param)
def delete(self, key):
- return self.request(rtype='DELETE', param=key)
+ return self.request(rtype="DELETE", param=key)
def close_transport(self):
if self.fp:
@@ -519,7 +554,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
def open_transport(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
+ self.fp = sock.makefile("rwb")
self._negotiate()
def exists(self):
@@ -530,8 +565,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
- fp=None):
+ def __init__(
+ self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, fp=None
+ ):
super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
self.device = device
self.timeout = timeout
@@ -550,7 +586,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._negotiate()
def _flush(self):
- LOG.debug('Flushing input')
+ LOG.debug("Flushing input")
# Read any pending data
timeout = self.fp.timeout
self.fp.timeout = 0.1
@@ -559,7 +595,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._readline()
except JoyentMetadataTimeoutException:
break
- LOG.debug('Input empty')
+ LOG.debug("Input empty")
# Send a newline and expect "invalid command". Keep trying until
# successful. Retry rather frequently so that the "Is the host
@@ -571,24 +607,29 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self.fp.timeout = timeout
while True:
LOG.debug('Writing newline, expecting "invalid command"')
- self._write('\n')
+ self._write("\n")
try:
response = self._readline()
- if response == 'invalid command':
+ if response == "invalid command":
break
- if response == 'FAILURE':
+ if response == "FAILURE":
LOG.debug('Got "FAILURE". Retrying.')
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. '
- 'Is the host metadata service running?')
+ LOG.warning(
+ "Timeout while initializing metadata client. "
+ "Is the host metadata service running?"
+ )
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
+ self.__class__.__name__,
+ self.device,
+ self.timeout,
+ )
class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
@@ -620,7 +661,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
keys = None
if self.base64_all is None:
keys = self.list()
- if 'base64_all' in keys:
+ if "base64_all" in keys:
self.base64_all = util.is_true(self._get("base64_all"))
else:
self.base64_all = False
@@ -633,7 +674,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
if keys is None:
keys = self.list()
b64_keys = set()
- if 'base64_keys' in keys:
+ if "base64_keys" in keys:
b64_keys = set(self._get("base64_keys").split(","))
# now add any b64-<keyname> that has a true value
@@ -647,8 +688,9 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
self.base64_keys = b64_keys
def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
+ return super(JoyentMetadataLegacySerialClient, self).get(
+ key, default=default, strip=strip
+ )
def is_b64_encoded(self, key, reset=False):
if key in NO_BASE64_DECODE:
@@ -680,9 +722,12 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
+ smartos_type=None,
+ metadata_sockfile=METADATA_SOCKFILE,
+ serial_device=SERIAL_DEVICE,
+ serial_timeout=SERIAL_TIMEOUT,
+ uname_version=None,
+):
if smartos_type is None:
smartos_type = get_smartos_environ(uname_version)
@@ -691,11 +736,14 @@ def jmc_client_factory(
return None
elif smartos_type == SMARTOS_ENV_KVM:
return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
+ device=serial_device,
+ timeout=serial_timeout,
+ smartos_type=smartos_type,
+ )
elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
- smartos_type=smartos_type)
+ return JoyentMetadataSocketClient(
+ socketpath=metadata_sockfile, smartos_type=smartos_type
+ )
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
@@ -708,12 +756,14 @@ def identify_file(content_f):
LOG.debug("script %s mime type is %s", content_f, f_type)
except subp.ProcessExecutionError as e:
util.logexc(
- LOG, ("Failed to identify script type for %s" % content_f, e))
+ LOG, ("Failed to identify script type for %s" % content_f, e)
+ )
return None if f_type is None else f_type.strip()
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
+def write_boot_content(
+ content, content_f, link=None, shebang=False, mode=0o400
+):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
@@ -747,7 +797,8 @@ def write_boot_content(content, content_f, link=None, shebang=False,
f_type = identify_file(content_f)
if f_type == "text/plain":
util.write_file(
- content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode
+ )
LOG.debug("added shebang to file %s", content_f)
if link:
@@ -768,7 +819,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
- if uname_version == 'BrandZ virtual linux':
+ if uname_version == "BrandZ virtual linux":
return SMARTOS_ENV_LX_BRAND
if product_name is None:
@@ -776,16 +827,16 @@ def get_smartos_environ(uname_version=None, product_name=None):
else:
system_type = product_name
- if system_type and system_type.startswith('SmartDC'):
+ if system_type and system_type.startswith("SmartDC"):
return SMARTOS_ENV_KVM
return None
# Convert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None,
- dns_servers=None, dns_domain=None,
- routes=None):
+def convert_smartos_network_data(
+ network_data=None, dns_servers=None, dns_domain=None, routes=None
+):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -810,28 +861,28 @@ def convert_smartos_network_data(network_data=None,
"""
valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
+ "physical": [
+ "mac_address",
+ "mtu",
+ "name",
+ "params",
+ "subnets",
+ "type",
],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'metric',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
+ "subnet": [
+ "address",
+ "broadcast",
+ "dns_nameservers",
+ "dns_search",
+ "metric",
+ "pointopoint",
+ "routes",
+ "scope",
+ "type",
],
- 'route': [
- 'network',
- 'gateway',
+ "route": [
+ "network",
+ "gateway",
],
}
@@ -851,56 +902,64 @@ def convert_smartos_network_data(network_data=None,
routes = []
def is_valid_ipv4(addr):
- return '.' in addr
+ return "." in addr
def is_valid_ipv6(addr):
- return ':' in addr
+ return ":" in addr
pgws = {
- 'ipv4': {'match': is_valid_ipv4, 'gw': None},
- 'ipv6': {'match': is_valid_ipv6, 'gw': None},
+ "ipv4": {"match": is_valid_ipv4, "gw": None},
+ "ipv6": {"match": is_valid_ipv6, "gw": None},
}
config = []
for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
+ cfg = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["physical"]
+ )
+ cfg.update({"type": "physical", "name": nic["interface"]})
+ if "mac" in nic:
+ cfg.update({"mac_address": nic["mac"]})
subnets = []
- for ip in nic.get('ips', []):
+ for ip in nic.get("ips", []):
if ip == "dhcp":
- subnet = {'type': 'dhcp4'}
+ subnet = {"type": "dhcp4"}
else:
routeents = []
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- })
-
- proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
+ subnet = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["subnet"]
+ )
+ subnet.update(
+ {
+ "type": "static",
+ "address": ip,
+ }
+ )
+
+ proto = "ipv4" if is_valid_ipv4(ip) else "ipv6"
# Only use gateways for 'primary' nics
- if 'primary' in nic and nic.get('primary', False):
+ if "primary" in nic and nic.get("primary", False):
# the ips and gateways list may be N to M, here
# we map the ip index into the gateways list,
# and handle the case that we could have more ips
# than gateways. we only consume the first gateway
- if not pgws[proto]['gw']:
- gateways = [gw for gw in nic.get('gateways', [])
- if pgws[proto]['match'](gw)]
+ if not pgws[proto]["gw"]:
+ gateways = [
+ gw
+ for gw in nic.get("gateways", [])
+ if pgws[proto]["match"](gw)
+ ]
if len(gateways):
- pgws[proto]['gw'] = gateways[0]
- subnet.update({'gateway': pgws[proto]['gw']})
+ pgws[proto]["gw"] = gateways[0]
+ subnet.update({"gateway": pgws[proto]["gw"]})
for route in routes:
- rcfg = dict((k, v) for k, v in route.items()
- if k in valid_keys['route'])
+ rcfg = dict(
+ (k, v)
+ for k, v in route.items()
+ if k in valid_keys["route"]
+ )
# Linux uses the value of 'gateway' to determine
# automatically if the route is a forward/next-hop
# (non-local IP for gateway) or an interface/resolver
@@ -913,25 +972,29 @@ def convert_smartos_network_data(network_data=None,
# to see if it's in the prefix. We can then smartly
# add or not-add this route. But for now,
# when in doubt, use brute force! Routes for everyone!
- rcfg.update({'network': route['dst']})
+ rcfg.update({"network": route["dst"]})
routeents.append(rcfg)
- subnet.update({'routes': routeents})
+ subnet.update({"routes": routeents})
subnets.append(subnet)
- cfg.update({'subnets': subnets})
+ cfg.update({"subnets": subnets})
config.append(cfg)
if dns_servers:
config.append(
- {'type': 'nameserver', 'address': dns_servers,
- 'search': dns_domain})
+ {
+ "type": "nameserver",
+ "address": dns_servers,
+ "search": dns_domain,
+ }
+ )
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
# Used to match classes to dependencies
datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM,)),
]
@@ -942,13 +1005,17 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import sys
+
jmc = jmc_client_factory()
if jmc is None:
print("Do not appear to be on smartos.")
sys.exit(1)
if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
+ keys = (
+ list(SMARTOS_ATTRIB_JSON.keys())
+ + list(SMARTOS_ATTRIB_MAP.keys())
+ + ["network_config"]
+ )
else:
keys = sys.argv[1:]
@@ -960,14 +1027,19 @@ if __name__ == "__main__":
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
- for depkey in ('network-data', 'dns_servers', 'dns_domain',
- 'routes'):
+ for depkey in (
+ "network-data",
+ "dns_servers",
+ "dns_domain",
+ "routes",
+ ):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
- network_data=data['network-data'],
- dns_servers=data['dns_servers'],
- dns_domain=data['dns_domain'],
- routes=data['routes'])
+ network_data=data["network-data"],
+ dns_servers=data["dns_servers"],
+ dns_domain=data["dns_domain"],
+ routes=data["routes"],
+ )
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
@@ -981,7 +1053,6 @@ if __name__ == "__main__":
for key in keys:
load_key(client=jmc, key=key, data=data)
- print(json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
index 209b9672..f4b78da5 100644
--- a/cloudinit/sources/DataSourceUpCloud.py
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -6,12 +6,9 @@
# https://developers.upcloud.com/1.3/8-servers/#metadata-service
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
from cloudinit import net as cloudnet
+from cloudinit import sources, util
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-
-
from cloudinit.sources.helpers import upcloud as uc_helper
LOG = logging.getLogger(__name__)
@@ -152,7 +149,7 @@ class DataSourceUpCloudLocal(DataSourceUpCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM, )),
+ (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM,)),
(DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
index 22ca63de..ed7f487a 100644
--- a/cloudinit/sources/DataSourceVMware.py
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -63,20 +63,19 @@ Netifaces (https://github.com/al45tair/netifaces)
import collections
import copy
-from distutils.spawn import find_executable
import ipaddress
import json
import os
import socket
import time
-
-from cloudinit import dmi, log as logging
-from cloudinit import sources
-from cloudinit import util
-from cloudinit.subp import subp, ProcessExecutionError
+from distutils.spawn import find_executable
import netifaces
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import sources, util
+from cloudinit.subp import ProcessExecutionError, subp
PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
@@ -460,7 +459,7 @@ def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL):
subp(
[
vmware_rpctool,
- ("info-set %s %s" % (get_guestinfo_key_name(key), value)),
+ "info-set %s %s" % (get_guestinfo_key_name(key), value),
]
)
return True
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index abeefbc5..13f7c24d 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -5,35 +5,37 @@
# Vultr Metadata API:
# https://www.vultr.com/metadata/
-from cloudinit import log as log
-from cloudinit import sources
-from cloudinit import util
-from cloudinit import version
-
import cloudinit.sources.helpers.vultr as vultr
+from cloudinit import log as log
+from cloudinit import sources, util, version
LOG = log.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'url': 'http://169.254.169.254',
- 'retries': 30,
- 'timeout': 10,
- 'wait': 5,
- 'user-agent': 'Cloud-Init/%s - OS: %s Variant: %s' %
- (version.version_string(),
- util.system_info()['system'],
- util.system_info()['variant'])
+ "url": "http://169.254.169.254",
+ "retries": 30,
+ "timeout": 10,
+ "wait": 5,
+ "user-agent": "Cloud-Init/%s - OS: %s Variant: %s"
+ % (
+ version.version_string(),
+ util.system_info()["system"],
+ util.system_info()["variant"],
+ ),
}
class DataSourceVultr(sources.DataSource):
- dsname = 'Vultr'
+ dsname = "Vultr"
def __init__(self, sys_cfg, distro, paths):
super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
# Initiate data and check if Vultr
def _get_data(self):
@@ -46,8 +48,8 @@ class DataSourceVultr(sources.DataSource):
# Fetch metadata
self.metadata = self.get_metadata()
- self.metadata['instance-id'] = self.metadata['instanceid']
- self.metadata['local-hostname'] = self.metadata['hostname']
+ self.metadata["instance-id"] = self.metadata["instanceid"]
+ self.metadata["local-hostname"] = self.metadata["hostname"]
self.userdata_raw = self.metadata["user-data"]
# Generate config and process data
@@ -55,9 +57,9 @@ class DataSourceVultr(sources.DataSource):
# Dump some data so diagnosing failures is manageable
LOG.debug("Vultr Vendor Config:")
- LOG.debug(util.json_dumps(self.metadata['vendor-data']))
- LOG.debug("SUBID: %s", self.metadata['instance-id'])
- LOG.debug("Hostname: %s", self.metadata['local-hostname'])
+ LOG.debug(util.json_dumps(self.metadata["vendor-data"]))
+ LOG.debug("SUBID: %s", self.metadata["instance-id"])
+ LOG.debug("Hostname: %s", self.metadata["local-hostname"])
if self.userdata_raw is not None:
LOG.debug("User-Data:")
LOG.debug(self.userdata_raw)
@@ -70,16 +72,16 @@ class DataSourceVultr(sources.DataSource):
if "cloud_interfaces" in md:
# In the future we will just drop pre-configured
# network configs into the array. They need names though.
- self.netcfg = vultr.add_interface_names(md['cloud_interfaces'])
+ self.netcfg = vultr.add_interface_names(md["cloud_interfaces"])
else:
- self.netcfg = vultr.generate_network_config(md['interfaces'])
+ self.netcfg = vultr.generate_network_config(md["interfaces"])
# Grab vendordata
- self.vendordata_raw = md['vendor-data']
+ self.vendordata_raw = md["vendor-data"]
# Default hostname is "guest" for whitelabel
- if self.metadata['local-hostname'] == "":
- self.metadata['local-hostname'] = "guest"
+ if self.metadata["local-hostname"] == "":
+ self.metadata["local-hostname"] = "guest"
self.userdata_raw = md["user-data"]
if self.userdata_raw == "":
@@ -87,11 +89,13 @@ class DataSourceVultr(sources.DataSource):
# Get the metadata by flag
def get_metadata(self):
- return vultr.get_metadata(self.ds_cfg['url'],
- self.ds_cfg['timeout'],
- self.ds_cfg['retries'],
- self.ds_cfg['wait'],
- self.ds_cfg['user-agent'])
+ return vultr.get_metadata(
+ self.ds_cfg["url"],
+ self.ds_cfg["timeout"],
+ self.ds_cfg["retries"],
+ self.ds_cfg["wait"],
+ self.ds_cfg["user-agent"],
+ )
# Compare subid as instance id
def check_instance_id(self, sys_cfg):
@@ -102,7 +106,7 @@ class DataSourceVultr(sources.DataSource):
if vultr.is_baremetal():
return False
- subid = vultr.get_sysinfo()['subid']
+ subid = vultr.get_sysinfo()["subid"]
return sources.instance_id_matches_system_uuid(subid)
# Currently unsupported
@@ -117,7 +121,7 @@ class DataSourceVultr(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceVultr, (sources.DEP_FILESYSTEM, )),
+ (DataSourceVultr, (sources.DEP_FILESYSTEM,)),
]
@@ -133,12 +137,14 @@ if __name__ == "__main__":
print("Machine is not a Vultr instance")
sys.exit(1)
- md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'],
- BUILTIN_DS_CONFIG['timeout'],
- BUILTIN_DS_CONFIG['retries'],
- BUILTIN_DS_CONFIG['wait'],
- BUILTIN_DS_CONFIG['user-agent'])
- config = md['vendor-data']
+ md = vultr.get_metadata(
+ BUILTIN_DS_CONFIG["url"],
+ BUILTIN_DS_CONFIG["timeout"],
+ BUILTIN_DS_CONFIG["retries"],
+ BUILTIN_DS_CONFIG["wait"],
+ BUILTIN_DS_CONFIG["user-agent"],
+ )
+ config = md["vendor-data"]
sysinfo = vultr.get_sysinfo()
print(util.json_dumps(sysinfo))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index f2f2343c..9083f399 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -15,11 +15,9 @@ import os
from collections import namedtuple
from typing import Dict, List # noqa: F401
-from cloudinit import dmi
-from cloudinit import importer
+from cloudinit import dmi, importer
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import type_utils
+from cloudinit import net, type_utils
from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.atomic_helper import write_json
@@ -38,42 +36,44 @@ VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
+DS_PREFIX = "DataSource"
EXPERIMENTAL_TEXT = (
"EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
- " key may change in subsequent releases of cloud-init.")
+ " key may change in subsequent releases of cloud-init."
+)
# File in which public available instance meta-data is written
# security-sensitive key values are redacted from this world-readable file
-INSTANCE_JSON_FILE = 'instance-data.json'
+INSTANCE_JSON_FILE = "instance-data.json"
# security-sensitive key values are present in this root-readable file
-INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
-REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
+INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json"
+REDACT_SENSITIVE_VALUE = "redacted for non-root user"
# Key which can be provide a cloud's official product name to cloud-init
-METADATA_CLOUD_NAME_KEY = 'cloud-name'
+METADATA_CLOUD_NAME_KEY = "cloud-name"
UNSET = "_unset"
-METADATA_UNKNOWN = 'unknown'
+METADATA_UNKNOWN = "unknown"
LOG = logging.getLogger(__name__)
# CLOUD_ID_REGION_PREFIX_MAP format is:
# <region-match-prefix>: (<new-cloud-id>: <test_allowed_cloud_callable>)
CLOUD_ID_REGION_PREFIX_MAP = {
- 'cn-': ('aws-china', lambda c: c == 'aws'), # only change aws regions
- 'us-gov-': ('aws-gov', lambda c: c == 'aws'), # only change aws regions
- 'china': ('azure-china', lambda c: c == 'azure'), # only change azure
+ "cn-": ("aws-china", lambda c: c == "aws"), # only change aws regions
+ "us-gov-": ("aws-gov", lambda c: c == "aws"), # only change aws regions
+ "china": ("azure-china", lambda c: c == "azure"), # only change azure
}
# NetworkConfigSource represents the canonical list of network config sources
# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
-_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs')
-NetworkConfigSource = namedtuple('NetworkConfigSource',
- _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+_NETCFG_SOURCE_NAMES = ("cmdline", "ds", "system_cfg", "fallback", "initramfs")
+NetworkConfigSource = namedtuple("NetworkConfigSource", _NETCFG_SOURCE_NAMES)(
+ *_NETCFG_SOURCE_NAMES
+)
class DatasourceUnpickleUserDataError(Exception):
@@ -88,7 +88,7 @@ class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
-def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+def process_instance_metadata(metadata, key_path="", sensitive_keys=()):
"""Process all instance metadata cleaning it up for persisting as json.
Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
@@ -100,22 +100,23 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
sens_keys = []
for key, val in metadata.items():
if key_path:
- sub_key_path = key_path + '/' + key
+ sub_key_path = key_path + "/" + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
sens_keys.append(sub_key_path)
- if isinstance(val, str) and val.startswith('ci-b64:'):
+ if isinstance(val, str) and val.startswith("ci-b64:"):
base64_encoded_keys.append(sub_key_path)
- md_copy[key] = val.replace('ci-b64:', '')
+ md_copy[key] = val.replace("ci-b64:", "")
if isinstance(val, dict):
return_val = process_instance_metadata(
- val, sub_key_path, sensitive_keys)
- base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
- sens_keys.extend(return_val.pop('sensitive_keys'))
+ val, sub_key_path, sensitive_keys
+ )
+ base64_encoded_keys.extend(return_val.pop("base64_encoded_keys"))
+ sens_keys.extend(return_val.pop("sensitive_keys"))
md_copy[key] = return_val
- md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
- md_copy['sensitive_keys'] = sorted(sens_keys)
+ md_copy["base64_encoded_keys"] = sorted(base64_encoded_keys)
+ md_copy["sensitive_keys"] = sorted(sens_keys)
return md_copy
@@ -124,11 +125,11 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
Replace any keys values listed in 'sensitive_keys' with redact_value.
"""
- if not metadata.get('sensitive_keys', []):
+ if not metadata.get("sensitive_keys", []):
return metadata
md_copy = copy.deepcopy(metadata)
- for key_path in metadata.get('sensitive_keys'):
- path_parts = key_path.split('/')
+ for key_path in metadata.get("sensitive_keys"):
+ path_parts = key_path.split("/")
obj = md_copy
for path in path_parts:
if isinstance(obj[path], dict) and path != path_parts[-1]:
@@ -138,18 +139,24 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
URLParams = namedtuple(
- 'URLParms', ['max_wait_seconds', 'timeout_seconds',
- 'num_retries', 'sec_between_retries'])
+ "URLParms",
+ [
+ "max_wait_seconds",
+ "timeout_seconds",
+ "num_retries",
+ "sec_between_retries",
+ ],
+)
class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
- default_locale = 'en_US.UTF-8'
+ default_locale = "en_US.UTF-8"
# Datasource name needs to be set by subclasses to determine which
# cloud-config datasource key is loaded
- dsname = '_undef'
+ dsname = "_undef"
# Cached cloud_name as determined by _get_cloud_name
_cloud_name = None
@@ -170,15 +177,17 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# configuration will be used without considering any that follow.) This
# should always be a subset of the members of NetworkConfigSource with no
# duplicate entries.
- network_config_sources = (NetworkConfigSource.cmdline,
- NetworkConfigSource.initramfs,
- NetworkConfigSource.system_cfg,
- NetworkConfigSource.ds)
+ network_config_sources = (
+ NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.ds,
+ )
# read_url_params
- url_max_wait = -1 # max_wait < 0 means do not wait
- url_timeout = 10 # timeout for each metadata url read attempt
- url_retries = 5 # number of times to retry url upon 404
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
url_sec_between_retries = 1 # amount of seconds to wait between retries
# The datasource defines a set of supported EventTypes during which
@@ -192,30 +201,43 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# would call default_update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- supported_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT,
- EventType.BOOT_LEGACY,
- EventType.HOTPLUG,
- }}
- default_update_events = {EventScope.NETWORK: {
- EventType.BOOT_NEW_INSTANCE,
- }}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ }
+ }
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
# values are reset via clear_cached_attrs during any update_metadata call.
cached_attr_defaults = (
- ('ec2_metadata', UNSET), ('network_json', UNSET),
- ('metadata', {}), ('userdata', None), ('userdata_raw', None),
- ('vendordata', None), ('vendordata_raw', None),
- ('vendordata2', None), ('vendordata2_raw', None))
+ ("ec2_metadata", UNSET),
+ ("network_json", UNSET),
+ ("metadata", {}),
+ ("userdata", None),
+ ("userdata_raw", None),
+ ("vendordata", None),
+ ("vendordata_raw", None),
+ ("vendordata2", None),
+ ("vendordata2_raw", None),
+ )
_dirty_cache = False
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
+ sensitive_metadata_keys = (
+ "merged_cfg",
+ "security-credentials",
+ )
_ci_pkl_version = 1
@@ -232,7 +254,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self.vendordata2_raw = None
self.ds_cfg = util.get_cfg_by_path(
- self.sys_cfg, ("datasource", self.dsname), {})
+ self.sys_cfg, ("datasource", self.dsname), {}
+ )
if not self.ds_cfg:
self.ds_cfg = {}
@@ -243,11 +266,11 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _unpickle(self, ci_pkl_version: int) -> None:
"""Perform deserialization fixes for Paths."""
- if not hasattr(self, 'vendordata2'):
+ if not hasattr(self, "vendordata2"):
self.vendordata2 = None
- if not hasattr(self, 'vendordata2_raw'):
+ if not hasattr(self, "vendordata2_raw"):
self.vendordata2_raw = None
- if hasattr(self, 'userdata') and self.userdata is not None:
+ if hasattr(self, "userdata") and self.userdata is not None:
# If userdata stores MIME data, on < python3.6 it will be
# missing the 'policy' attribute that exists on >=python3.6.
# Calling str() on the userdata will attempt to access this
@@ -258,7 +281,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
except AttributeError as e:
LOG.debug(
"Unable to unpickle datasource: %s."
- " Ignoring current cache.", e
+ " Ignoring current cache.",
+ e,
)
raise DatasourceUnpickleUserDataError() from e
@@ -275,28 +299,30 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
# metadata to discover that content
sysinfo = instance_data["sys_info"]
return {
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': availability_zone,
- 'availability_zone': availability_zone,
- 'cloud-name': self.cloud_name,
- 'cloud_name': self.cloud_name,
- 'distro': sysinfo["dist"][0],
- 'distro_version': sysinfo["dist"][1],
- 'distro_release': sysinfo["dist"][2],
- 'platform': self.platform_type,
- 'public_ssh_keys': self.get_public_ssh_keys(),
- 'python_version': sysinfo["python"],
- 'instance-id': instance_id,
- 'instance_id': instance_id,
- 'kernel_release': sysinfo["uname"][2],
- 'local-hostname': local_hostname,
- 'local_hostname': local_hostname,
- 'machine': sysinfo["uname"][4],
- 'region': self.region,
- 'subplatform': self.subplatform,
- 'system_platform': sysinfo["platform"],
- 'variant': sysinfo["variant"]}}
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": availability_zone,
+ "availability_zone": availability_zone,
+ "cloud-name": self.cloud_name,
+ "cloud_name": self.cloud_name,
+ "distro": sysinfo["dist"][0],
+ "distro_version": sysinfo["dist"][1],
+ "distro_release": sysinfo["dist"][2],
+ "platform": self.platform_type,
+ "public_ssh_keys": self.get_public_ssh_keys(),
+ "python_version": sysinfo["python"],
+ "instance-id": instance_id,
+ "instance_id": instance_id,
+ "kernel_release": sysinfo["uname"][2],
+ "local-hostname": local_hostname,
+ "local_hostname": local_hostname,
+ "machine": sysinfo["uname"][4],
+ "region": self.region,
+ "subplatform": self.subplatform,
+ "system_platform": sysinfo["platform"],
+ "variant": sysinfo["variant"],
+ }
+ }
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -337,48 +363,51 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@return True on successful write, False otherwise.
"""
- if hasattr(self, '_crawled_metadata'):
+ if hasattr(self, "_crawled_metadata"):
# Any datasource with _crawled_metadata will best represent
# most recent, 'raw' metadata
crawled_metadata = copy.deepcopy(
- getattr(self, '_crawled_metadata'))
- crawled_metadata.pop('user-data', None)
- crawled_metadata.pop('vendor-data', None)
- instance_data = {'ds': crawled_metadata}
+ getattr(self, "_crawled_metadata")
+ )
+ crawled_metadata.pop("user-data", None)
+ crawled_metadata.pop("vendor-data", None)
+ instance_data = {"ds": crawled_metadata}
else:
- instance_data = {'ds': {'meta_data': self.metadata}}
- if hasattr(self, 'network_json'):
- network_json = getattr(self, 'network_json')
+ instance_data = {"ds": {"meta_data": self.metadata}}
+ if hasattr(self, "network_json"):
+ network_json = getattr(self, "network_json")
if network_json != UNSET:
- instance_data['ds']['network_json'] = network_json
- if hasattr(self, 'ec2_metadata'):
- ec2_metadata = getattr(self, 'ec2_metadata')
+ instance_data["ds"]["network_json"] = network_json
+ if hasattr(self, "ec2_metadata"):
+ ec2_metadata = getattr(self, "ec2_metadata")
if ec2_metadata != UNSET:
- instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ instance_data["ds"]["ec2_metadata"] = ec2_metadata
+ instance_data["ds"]["_doc"] = EXPERIMENTAL_TEXT
# Add merged cloud.cfg and sys info for jinja templates and cli query
- instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
- instance_data['merged_cfg']['_doc'] = (
- 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
- ' /etc/cloud/cloud.cfg.d/')
- instance_data['sys_info'] = util.system_info()
- instance_data.update(
- self._get_standardized_metadata(instance_data))
+ instance_data["merged_cfg"] = copy.deepcopy(self.sys_cfg)
+ instance_data["merged_cfg"]["_doc"] = (
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg and"
+ " /etc/cloud/cloud.cfg.d/"
+ )
+ instance_data["sys_info"] = util.system_info()
+ instance_data.update(self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
# Strip base64: prefix and set base64_encoded_keys list.
processed_data = process_instance_metadata(
json.loads(content),
- sensitive_keys=self.sensitive_metadata_keys)
+ sensitive_keys=self.sensitive_metadata_keys,
+ )
except TypeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
except UnicodeDecodeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
- json_sensitive_file = os.path.join(self.paths.run_dir,
- INSTANCE_JSON_SENSITIVE_FILE)
+ json_sensitive_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
write_json(json_sensitive_file, processed_data, mode=0o600)
json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
# World readable
@@ -388,8 +417,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _get_data(self):
"""Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
- 'Subclasses of DataSource must implement _get_data which'
- ' sets self.metadata, vendordata_raw and userdata_raw.')
+ "Subclasses of DataSource must implement _get_data which"
+ " sets self.metadata, vendordata_raw and userdata_raw."
+ )
def get_url_params(self):
"""Return the Datasource's prefered url_read parameters.
@@ -404,37 +434,50 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
except ValueError:
util.logexc(
- LOG, "Config max_wait '%s' is not an int, using default '%s'",
- self.ds_cfg.get("max_wait"), max_wait)
+ LOG,
+ "Config max_wait '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("max_wait"),
+ max_wait,
+ )
timeout = self.url_timeout
try:
- timeout = max(
- 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
+ timeout = max(0, int(self.ds_cfg.get("timeout", self.url_timeout)))
except ValueError:
timeout = self.url_timeout
util.logexc(
- LOG, "Config timeout '%s' is not an int, using default '%s'",
- self.ds_cfg.get('timeout'), timeout)
+ LOG,
+ "Config timeout '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("timeout"),
+ timeout,
+ )
retries = self.url_retries
try:
retries = int(self.ds_cfg.get("retries", self.url_retries))
except Exception:
util.logexc(
- LOG, "Config retries '%s' is not an int, using default '%s'",
- self.ds_cfg.get('retries'), retries)
+ LOG,
+ "Config retries '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("retries"),
+ retries,
+ )
sec_between_retries = self.url_sec_between_retries
try:
- sec_between_retries = int(self.ds_cfg.get(
- "sec_between_retries",
- self.url_sec_between_retries))
+ sec_between_retries = int(
+ self.ds_cfg.get(
+ "sec_between_retries", self.url_sec_between_retries
+ )
+ )
except Exception:
util.logexc(
- LOG, "Config sec_between_retries '%s' is not an int,"
- " using default '%s'",
- self.ds_cfg.get("sec_between_retries"), sec_between_retries)
+ LOG,
+ "Config sec_between_retries '%s' is not an int,"
+ " using default '%s'",
+ self.ds_cfg.get("sec_between_retries"),
+ sec_between_retries,
+ )
return URLParams(max_wait, timeout, retries, sec_between_retries)
@@ -462,13 +505,13 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
self._fallback_interface = net.find_fallback_nic()
if self._fallback_interface is None:
LOG.warning(
- "Did not find a fallback interface on %s.",
- self.cloud_name)
+ "Did not find a fallback interface on %s.", self.cloud_name
+ )
return self._fallback_interface
@property
def platform_type(self):
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
# Handle upgrade path where pickled datasource has no _platform.
self._platform_type = self.dsname.lower()
if not self._platform_type:
@@ -487,7 +530,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
nocloud: seed-dir (/seed/dir/path)
lxd: nocloud (/seed/dir/path)
"""
- if not hasattr(self, '_subplatform'):
+ if not hasattr(self, "_subplatform"):
# Handle upgrade path where pickled datasource has no _platform.
self._subplatform = self._get_subplatform()
if not self._subplatform:
@@ -496,8 +539,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def _get_subplatform(self):
"""Subclasses should implement to return a "slug (detail)" string."""
- if hasattr(self, 'metadata_address'):
- return 'metadata (%s)' % getattr(self, 'metadata_address')
+ if hasattr(self, "metadata_address"):
+ return "metadata (%s)" % getattr(self, "metadata_address")
return METADATA_UNKNOWN
@property
@@ -516,8 +559,10 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
else:
self._cloud_name = self._get_cloud_name().lower()
LOG.debug(
- 'Ignoring metadata provided key %s: non-string type %s',
- METADATA_CLOUD_NAME_KEY, type(cloud_name))
+ "Ignoring metadata provided key %s: non-string type %s",
+ METADATA_CLOUD_NAME_KEY,
+ type(cloud_name),
+ )
else:
self._cloud_name = self._get_cloud_name().lower()
return self._cloud_name
@@ -534,8 +579,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def launch_index(self):
if not self.metadata:
return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
+ if "launch-index" in self.metadata:
+ return self.metadata["launch-index"]
return None
def _filter_xdata(self, processed_ud):
@@ -567,7 +612,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
return {}
def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
+ return normalize_pubkey_data(self.metadata.get("public-keys"))
def publish_host_keys(self, hostkeys):
"""Publish the public SSH host keys (found in /etc/ssh/*.pub).
@@ -589,7 +634,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if not short_name.startswith(nfrom):
continue
for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom) :])
if os.path.exists(cand):
return cand
return None
@@ -614,20 +659,21 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
@property
def availability_zone(self):
top_level_az = self.metadata.get(
- 'availability-zone', self.metadata.get('availability_zone'))
+ "availability-zone", self.metadata.get("availability_zone")
+ )
if top_level_az:
return top_level_az
- return self.metadata.get('placement', {}).get('availability-zone')
+ return self.metadata.get("placement", {}).get("availability-zone")
@property
def region(self):
- return self.metadata.get('region')
+ return self.metadata.get("region")
def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
+ if not self.metadata or "instance-id" not in self.metadata:
# Return a magic not really instance id string
return "iid-datasource"
- return str(self.metadata['instance-id'])
+ return str(self.metadata["instance-id"])
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""Get hostname or fqdn from the datasource. Look it up if desired.
@@ -645,7 +691,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
defhost = "localhost"
domain = defdomain
- if not self.metadata or not self.metadata.get('local-hostname'):
+ if not self.metadata or not self.metadata.get("local-hostname"):
if metadata_only:
return None
# this is somewhat questionable really.
@@ -666,14 +712,14 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
else:
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
+ lhost = self.metadata["local-hostname"]
if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
if toks:
- toks = str(toks).split('.')
+ toks = str(toks).split(".")
else:
toks = ["ip-%s" % lhost.replace(".", "-")]
else:
@@ -681,7 +727,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if len(toks) > 1:
hostname = toks[0]
- domain = '.'.join(toks[1:])
+ domain = ".".join(toks[1:])
else:
hostname = toks[0]
@@ -696,7 +742,10 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
def get_supported_events(self, source_event_types: List[EventType]):
supported_events = {} # type: Dict[EventScope, set]
for event in source_event_types:
- for update_scope, update_events in self.supported_update_events.items(): # noqa: E501
+ for (
+ update_scope,
+ update_events,
+ ) in self.supported_update_events.items():
if event in update_events:
if not supported_events.get(update_scope):
supported_events[update_scope] = set()
@@ -723,18 +772,22 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
scope.value,
- ', '.join([event.value for event in matched_events]))
+ ", ".join([event.value for event in matched_events]),
+ )
# Each datasource has a cached config property which needs clearing
# Once cleared that config property will be regenerated from
# current metadata.
- self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
+ self.clear_cached_attrs((("_%s_config" % scope, UNSET),))
if supported_events:
self.clear_cached_attrs()
result = self.get_data()
if result:
return True
- LOG.debug("Datasource %s not updated for events: %s", self,
- ', '.join([event.value for event in source_event_types]))
+ LOG.debug(
+ "Datasource %s not updated for events: %s",
+ self,
+ ", ".join([event.value for event in source_event_types]),
+ )
return False
def check_instance_id(self, sys_cfg):
@@ -756,8 +809,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
if candidate in valid:
return candidate
else:
- LOG.warning("invalid dsmode '%s', using default=%s",
- candidate, default)
+ LOG.warning(
+ "invalid dsmode '%s', using default=%s", candidate, default
+ )
return default
return default
@@ -836,7 +890,8 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
name="search-%s" % name.replace("DataSource", ""),
description="searching for %s data from %s" % (mode, name),
message="no %s data found from %s" % (mode, name),
- parent=reporter)
+ parent=reporter,
+ )
try:
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
@@ -849,8 +904,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
+ msg = "Did not find any data source, searched classes: (%s)" % ", ".join(
+ ds_names
+ )
raise DataSourceNotFoundException(msg)
@@ -860,15 +916,19 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
# Return an ordered list of classes that match (if any)
def list_sources(cfg_list, depends, pkg_list):
src_list = []
- LOG.debug(("Looking for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
+ LOG.debug(
+ "Looking for data source in: %s,"
+ " via packages %s that matches dependencies %s",
+ cfg_list,
+ pkg_list,
+ depends,
+ )
for ds_name in cfg_list:
if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
+ ds_name = "%s%s" % (DS_PREFIX, ds_name)
+ m_locs, _looked_locs = importer.find_module(
+ ds_name, pkg_list, ["get_datasource_list"]
+ )
for m_loc in m_locs:
mod = importer.import_module(m_loc)
lister = getattr(mod, "get_datasource_list")
@@ -879,7 +939,7 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
+def instance_id_matches_system_uuid(instance_id, field="system-uuid"):
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
if not instance_id:
@@ -929,8 +989,7 @@ def convert_vendordata(data, recurse=True):
return copy.deepcopy(data)
if isinstance(data, dict):
if recurse is True:
- return convert_vendordata(data.get('cloud-init'),
- recurse=False)
+ return convert_vendordata(data.get("cloud-init"), recurse=False)
raise ValueError("vendordata['cloud-init'] cannot be dict")
raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index a5ac1d57..50058fe0 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,27 +6,28 @@ import os
import re
import socket
import struct
-import time
import textwrap
+import time
import zlib
-from errno import ENOENT
-
-from cloudinit.settings import CFG_BUILTIN
-from cloudinit.net import dhcp
-from cloudinit import stages
-from cloudinit import temp_utils
from contextlib import contextmanager
+from datetime import datetime
+from errno import ENOENT
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import version
-from cloudinit import distros
-from cloudinit.reporting import events
+from cloudinit import (
+ distros,
+ stages,
+ subp,
+ temp_utils,
+ url_helper,
+ util,
+ version,
+)
+from cloudinit.net import dhcp
from cloudinit.net.dhcp import EphemeralDHCPv4
-from datetime import datetime
+from cloudinit.reporting import events
+from cloudinit.settings import CFG_BUILTIN
LOG = logging.getLogger(__name__)
@@ -34,10 +35,10 @@ LOG = logging.getLogger(__name__)
# value is applied if the endpoint can't be found within a lease file
DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
-BOOT_EVENT_TYPE = 'boot-telemetry'
-SYSTEMINFO_EVENT_TYPE = 'system-info'
-DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-COMPRESSED_EVENT_TYPE = 'compressed'
+BOOT_EVENT_TYPE = "boot-telemetry"
+SYSTEMINFO_EVENT_TYPE = "system-info"
+DIAGNOSTIC_EVENT_TYPE = "diagnostic"
+COMPRESSED_EVENT_TYPE = "compressed"
# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
# at once. This number is based on the analysis done on a large sample of
# cloud-init.log files where the P95 of the file sizes was 537KB and the time
@@ -45,25 +46,29 @@ COMPRESSED_EVENT_TYPE = 'compressed'
MAX_LOG_TO_KVP_LENGTH = 512000
# File to store the last byte of cloud-init.log that was pushed to KVP. This
# file will be deleted with every VM reboot.
-LOG_PUSHED_TO_KVP_INDEX_FILE = '/run/cloud-init/log_pushed_to_kvp_index'
+LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index"
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
- reporting_enabled=True)
+ reporting_enabled=True,
+)
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = (
- 'The VM encountered an error during deployment. '
- 'Please visit https://aka.ms/linuxprovisioningerror '
- 'for more information on remediation.')
+ "The VM encountered an error during deployment. "
+ "Please visit https://aka.ms/linuxprovisioningerror "
+ "for more information on remediation."
+)
def azure_ds_telemetry_reporter(func):
def impl(*args, **kwargs):
with events.ReportEventStack(
- name=func.__name__,
- description=func.__name__,
- parent=azure_ds_reporter):
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter,
+ ):
return func(*args, **kwargs)
+
return impl
@@ -79,16 +84,16 @@ def is_byte_swapped(previous_id, current_id):
def swap_bytestring(s, width=2):
dd = [byte for byte in textwrap.wrap(s, 2)]
dd.reverse()
- return ''.join(dd)
+ return "".join(dd)
- parts = current_id.split('-')
- swapped_id = '-'.join(
+ parts = current_id.split("-")
+ swapped_id = "-".join(
[
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
- parts[4]
+ parts[4],
]
)
@@ -98,31 +103,29 @@ def is_byte_swapped(previous_id, current_id):
@azure_ds_telemetry_reporter
def get_boot_telemetry():
"""Report timestamps related to kernel initialization and systemd
- activation of cloud-init"""
+ activation of cloud-init"""
if not distros.uses_systemd():
- raise RuntimeError(
- "distro not using systemd, skipping boot telemetry")
+ raise RuntimeError("distro not using systemd, skipping boot telemetry")
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
except ValueError as e:
- raise RuntimeError(
- "Failed to determine kernel start timestamp"
- ) from e
+ raise RuntimeError("Failed to determine kernel start timestamp") from e
try:
- out, _ = subp.subp(['/bin/systemctl',
- 'show', '-p',
- 'UserspaceTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd"
+ )
user_start = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -135,16 +138,23 @@ def get_boot_telemetry():
) from e
try:
- out, _ = subp.subp(['/bin/systemctl', 'show',
- 'cloud-init-local', '-p',
- 'InactiveExitTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ [
+ "/bin/systemctl",
+ "show",
+ "cloud-init-local",
+ "-p",
+ "InactiveExitTimestampMonotonic",
+ ],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd"
+ )
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -158,12 +168,16 @@ def get_boot_telemetry():
) from e
evt = events.ReportingEvent(
- BOOT_EVENT_TYPE, 'boot-telemetry',
- "kernel_start=%s user_start=%s cloudinit_activation=%s" %
- (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
- events.DEFAULT_EVENT_ORIGIN)
+ BOOT_EVENT_TYPE,
+ "boot-telemetry",
+ "kernel_start=%s user_start=%s cloudinit_activation=%s"
+ % (
+ datetime.utcfromtimestamp(kernel_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(user_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + "Z",
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -175,13 +189,22 @@ def get_system_info():
"""Collect and report system information"""
info = util.system_info()
evt = events.ReportingEvent(
- SYSTEMINFO_EVENT_TYPE, 'system information',
+ SYSTEMINFO_EVENT_TYPE,
+ "system information",
"cloudinit_version=%s, kernel_version=%s, variant=%s, "
"distro_name=%s, distro_version=%s, flavor=%s, "
- "python_version=%s" %
- (version.version_string(), info['release'], info['variant'],
- info['dist'][0], info['dist'][1], info['dist'][2],
- info['python']), events.DEFAULT_EVENT_ORIGIN)
+ "python_version=%s"
+ % (
+ version.version_string(),
+ info["release"],
+ info["variant"],
+ info["dist"][0],
+ info["dist"][1],
+ info["dist"][2],
+ info["python"],
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -189,13 +212,17 @@ def get_system_info():
def report_diagnostic_event(
- msg: str, *, logger_func=None) -> events.ReportingEvent:
+ msg: str, *, logger_func=None
+) -> events.ReportingEvent:
"""Report a diagnostic event"""
if callable(logger_func):
logger_func(msg)
evt = events.ReportingEvent(
- DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
- msg, events.DEFAULT_EVENT_ORIGIN)
+ DIAGNOSTIC_EVENT_TYPE,
+ "diagnostic message",
+ msg,
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt, excluded_handler_types={"log"})
# return the event for unit testing purpose
@@ -205,21 +232,26 @@ def report_diagnostic_event(
def report_compressed_event(event_name, event_content):
"""Report a compressed event"""
compressed_data = base64.encodebytes(zlib.compress(event_content))
- event_data = {"encoding": "gz+b64",
- "data": compressed_data.decode('ascii')}
+ event_data = {
+ "encoding": "gz+b64",
+ "data": compressed_data.decode("ascii"),
+ }
evt = events.ReportingEvent(
- COMPRESSED_EVENT_TYPE, event_name,
+ COMPRESSED_EVENT_TYPE,
+ event_name,
json.dumps(event_data),
- events.DEFAULT_EVENT_ORIGIN)
- events.report_event(evt,
- excluded_handler_types={"log", "print", "webhook"})
+ events.DEFAULT_EVENT_ORIGIN,
+ )
+ events.report_event(
+ evt, excluded_handler_types={"log", "print", "webhook"}
+ )
# return the event for unit testing purpose
return evt
@azure_ds_telemetry_reporter
-def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]):
"""Push a portion of cloud-init.log file or the whole file to KVP
based on the file size.
The first time this function is called after VM boot, It will push the last
@@ -237,23 +269,26 @@ def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
report_diagnostic_event(
"Dumping last {0} bytes of cloud-init.log file to KVP starting"
" from index: {1}".format(f.tell() - seek_index, seek_index),
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
f.seek(seek_index, os.SEEK_SET)
report_compressed_event("cloud-init.log", f.read())
util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell()))
except Exception as ex:
report_diagnostic_event(
"Exception when dumping log file: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
LOG.debug("Dumping dmesg log to KVP")
try:
- out, _ = subp.subp(['dmesg'], decode=False, capture=True)
+ out, _ = subp.subp(["dmesg"], decode=False, capture=True)
report_compressed_event("dmesg", out)
except Exception as ex:
report_diagnostic_event(
"Exception when dumping dmesg log: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
@azure_ds_telemetry_reporter
@@ -263,16 +298,20 @@ def get_last_log_byte_pushed_to_kvp_index():
return int(f.read())
except IOError as e:
if e.errno != ENOENT:
- report_diagnostic_event("Reading LOG_PUSHED_TO_KVP_INDEX_FILE"
- " failed: %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except ValueError as e:
- report_diagnostic_event("Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE"
- ": %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except Exception as e:
- report_diagnostic_event("Failed to get the last log byte pushed to KVP"
- ": %s." % repr(e), logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Failed to get the last log byte pushed to KVP: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
return 0
@@ -306,28 +345,31 @@ def http_with_retries(url, **kwargs) -> str:
sleep_duration_between_retries = 5
periodic_logging_attempts = 12
- if 'timeout' not in kwargs:
- kwargs['timeout'] = default_readurl_timeout
+ if "timeout" not in kwargs:
+ kwargs["timeout"] = default_readurl_timeout
# remove kwargs that cause url_helper.readurl to retry,
# since we are already implementing our own retry logic.
- if kwargs.pop('retries', None):
+ if kwargs.pop("retries", None):
LOG.warning(
- 'Ignoring retries kwarg passed in for '
- 'communication with Azure endpoint.')
- if kwargs.pop('infinite', None):
+ "Ignoring retries kwarg passed in for "
+ "communication with Azure endpoint."
+ )
+ if kwargs.pop("infinite", None):
LOG.warning(
- 'Ignoring infinite kwarg passed in for communication '
- 'with Azure endpoint.')
+ "Ignoring infinite kwarg passed in for communication "
+ "with Azure endpoint."
+ )
for attempt in range(1, max_readurl_attempts + 1):
try:
ret = url_helper.readurl(url, **kwargs)
report_diagnostic_event(
- 'Successful HTTP request with Azure endpoint %s after '
- '%d attempts' % (url, attempt),
- logger_func=LOG.debug)
+ "Successful HTTP request with Azure endpoint %s after "
+ "%d attempts" % (url, attempt),
+ logger_func=LOG.debug,
+ )
return ret
@@ -335,20 +377,20 @@ def http_with_retries(url, **kwargs) -> str:
exc = e
if attempt % periodic_logging_attempts == 0:
report_diagnostic_event(
- 'Failed HTTP request with Azure endpoint %s during '
- 'attempt %d with exception: %s' %
- (url, attempt, e),
- logger_func=LOG.debug)
+ "Failed HTTP request with Azure endpoint %s during "
+ "attempt %d with exception: %s" % (url, attempt, e),
+ logger_func=LOG.debug,
+ )
time.sleep(sleep_duration_between_retries)
raise exc
def build_minimal_ovf(
- username: str,
- hostname: str,
- disableSshPwd: str) -> bytes:
- OVF_ENV_TEMPLATE = textwrap.dedent('''\
+ username: str, hostname: str, disableSshPwd: str
+) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent(
+ """\
<ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
xmlns:ns1="http://schemas.microsoft.com/windowsazure"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
@@ -370,19 +412,19 @@ def build_minimal_ovf(
</ns1:PlatformSettings>
</ns1:PlatformSettingsSection>
</ns0:Environment>
- ''')
+ """
+ )
ret = OVF_ENV_TEMPLATE.format(
- username=username,
- hostname=hostname,
- disableSshPwd=disableSshPwd)
- return ret.encode('utf-8')
+ username=username, hostname=hostname, disableSshPwd=disableSshPwd
+ )
+ return ret.encode("utf-8")
class AzureEndpointHttpClient:
headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def __init__(self, certificate):
@@ -403,8 +445,7 @@ class AzureEndpointHttpClient:
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return http_with_retries(
- url, data=data, headers=headers)
+ return http_with_retries(url, data=data, headers=headers)
class InvalidGoalStateXMLException(Exception):
@@ -412,12 +453,12 @@ class InvalidGoalStateXMLException(Exception):
class GoalState:
-
def __init__(
- self,
- unparsed_xml: str,
- azure_endpoint_client: AzureEndpointHttpClient,
- need_certificate: bool = True) -> None:
+ self,
+ unparsed_xml: str,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ need_certificate: bool = True,
+ ) -> None:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_xml: string representing a GoalState XML.
@@ -431,36 +472,41 @@ class GoalState:
self.root = ElementTree.fromstring(unparsed_xml)
except ElementTree.ParseError as e:
report_diagnostic_event(
- 'Failed to parse GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Failed to parse GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.container_id = self._text_from_xpath("./Container/ContainerId")
self.instance_id = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
- self.incarnation = self._text_from_xpath('./Incarnation')
+ "./Container/RoleInstanceList/RoleInstance/InstanceId"
+ )
+ self.incarnation = self._text_from_xpath("./Incarnation")
for attr in ("container_id", "instance_id", "incarnation"):
if getattr(self, attr) is None:
- msg = 'Missing %s in GoalState XML' % attr
+ msg = "Missing %s in GoalState XML" % attr
report_diagnostic_event(msg, logger_func=LOG.warning)
raise InvalidGoalStateXMLException(msg)
self.certificates_xml = None
url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
+ "./Container/RoleInstanceList/RoleInstance"
+ "/Configuration/Certificates"
+ )
if url is not None and need_certificate:
with events.ReportEventStack(
- name="get-certificates-xml",
- description="get certificates xml",
- parent=azure_ds_reporter):
- self.certificates_xml = \
- self.azure_endpoint_client.get(
- url, secure=True).contents
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter,
+ ):
+ self.certificates_xml = self.azure_endpoint_client.get(
+ url, secure=True
+ ).contents
if self.certificates_xml is None:
raise InvalidGoalStateXMLException(
- 'Azure endpoint returned empty certificates xml.')
+ "Azure endpoint returned empty certificates xml."
+ )
def _text_from_xpath(self, xpath):
element = self.root.find(xpath)
@@ -472,8 +518,8 @@ class GoalState:
class OpenSSLManager:
certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
+ "private_key": "TransportPrivate.pem",
+ "certificate": "TransportCert.pem",
}
def __init__(self):
@@ -494,35 +540,47 @@ class OpenSSLManager:
@azure_ds_telemetry_reporter
def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
+ LOG.debug("Generating certificate for communication with fabric...")
if self.certificate is not None:
- LOG.debug('Certificate already generated.')
+ LOG.debug("Certificate already generated.")
return
with cd(self.tmpdir):
- subp.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
+ subp.subp(
+ [
+ "openssl",
+ "req",
+ "-x509",
+ "-nodes",
+ "-subj",
+ "/CN=LinuxTransport",
+ "-days",
+ "32768",
+ "-newkey",
+ "rsa:2048",
+ "-keyout",
+ self.certificate_names["private_key"],
+ "-out",
+ self.certificate_names["certificate"],
+ ]
+ )
+ certificate = ""
+ for line in open(self.certificate_names["certificate"]):
if "CERTIFICATE" not in line:
certificate += line.rstrip()
self.certificate = certificate
- LOG.debug('New certificate generated.')
+ LOG.debug("New certificate generated.")
@staticmethod
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
- cmd = ['openssl', 'x509', '-noout', action]
+ cmd = ["openssl", "x509", "-noout", action]
result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
- pub_key = self._run_x509_action('-pubkey', certificate)
- keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ pub_key = self._run_x509_action("-pubkey", certificate)
+ keygen_cmd = ["ssh-keygen", "-i", "-m", "PKCS8", "-f", "/dev/stdin"]
ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@@ -535,48 +593,50 @@ class OpenSSLManager:
Azure control plane passes that fingerprint as so:
'073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
"""
- raw_fp = self._run_x509_action('-fingerprint', certificate)
- eq = raw_fp.find('=')
- octets = raw_fp[eq+1:-1].split(':')
- return ''.join(octets)
+ raw_fp = self._run_x509_action("-fingerprint", certificate)
+ eq = raw_fp.find("=")
+ octets = raw_fp[eq + 1 : -1].split(":")
+ return "".join(octets)
@azure_ds_telemetry_reporter
def _decrypt_certs_from_xml(self, certificates_xml):
"""Decrypt the certificates XML document using the our private key;
- return the list of certs and private keys contained in the doc.
+ return the list of certs and private keys contained in the doc.
"""
- tag = ElementTree.fromstring(certificates_xml).find('.//Data')
+ tag = ElementTree.fromstring(certificates_xml).find(".//Data")
certificates_content = tag.text
lines = [
- b'MIME-Version: 1.0',
+ b"MIME-Version: 1.0",
b'Content-Disposition: attachment; filename="Certificates.p7m"',
b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
+ b"Content-Transfer-Encoding: base64",
+ b"",
+ certificates_content.encode("utf-8"),
]
with cd(self.tmpdir):
out, _ = subp.subp(
- 'openssl cms -decrypt -in /dev/stdin -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True, data=b'\n'.join(lines))
+ "openssl cms -decrypt -in /dev/stdin -inkey"
+ " {private_key} -recip {certificate} | openssl pkcs12 -nodes"
+ " -password pass:".format(**self.certificate_names),
+ shell=True,
+ data=b"\n".join(lines),
+ )
return out
@azure_ds_telemetry_reporter
def parse_certificates(self, certificates_xml):
"""Given the Certificates XML document, return a dictionary of
- fingerprints and associated SSH keys derived from the certs."""
+ fingerprints and associated SSH keys derived from the certs."""
out = self._decrypt_certs_from_xml(certificates_xml)
current = []
keys = {}
for line in out.splitlines():
current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
+ if re.match(r"[-]+END .*?KEY[-]+$", line):
# ignore private_keys
current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificate = '\n'.join(current)
+ elif re.match(r"[-]+END .*?CERTIFICATE[-]+$", line):
+ certificate = "\n".join(current)
ssh_key = self._get_ssh_key_from_cert(certificate)
fingerprint = self._get_fingerprint_from_cert(certificate)
keys[fingerprint] = ssh_key
@@ -586,7 +646,8 @@ class OpenSSLManager:
class GoalStateHealthReporter:
- HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent(
+ """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -604,25 +665,30 @@ class GoalStateHealthReporter:
</RoleInstanceList>
</Container>
</Health>
- ''')
+ """
+ )
- HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+ )
- PROVISIONING_SUCCESS_STATUS = 'Ready'
- PROVISIONING_NOT_READY_STATUS = 'NotReady'
- PROVISIONING_FAILURE_SUBSTATUS = 'ProvisioningFailed'
+ PROVISIONING_SUCCESS_STATUS = "Ready"
+ PROVISIONING_NOT_READY_STATUS = "NotReady"
+ PROVISIONING_FAILURE_SUBSTATUS = "ProvisioningFailed"
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
def __init__(
- self, goal_state: GoalState,
- azure_endpoint_client: AzureEndpointHttpClient,
- endpoint: str) -> None:
+ self,
+ goal_state: GoalState,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ endpoint: str,
+ ) -> None:
"""Creates instance that will report provisioning status to an endpoint
@param goal_state: An instance of class GoalState that contains
@@ -644,17 +710,19 @@ class GoalStateHealthReporter:
incarnation=self._goal_state.incarnation,
container_id=self._goal_state.container_id,
instance_id=self._goal_state.instance_id,
- status=self.PROVISIONING_SUCCESS_STATUS)
- LOG.debug('Reporting ready to Azure fabric.')
+ status=self.PROVISIONING_SUCCESS_STATUS,
+ )
+ LOG.debug("Reporting ready to Azure fabric.")
try:
self._post_health_report(document=document)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
- LOG.info('Reported ready to Azure fabric.')
+ LOG.info("Reported ready to Azure fabric.")
@azure_ds_telemetry_reporter
def send_failure_signal(self, description: str) -> None:
@@ -664,7 +732,8 @@ class GoalStateHealthReporter:
instance_id=self._goal_state.instance_id,
status=self.PROVISIONING_NOT_READY_STATUS,
substatus=self.PROVISIONING_FAILURE_SUBSTATUS,
- description=description)
+ description=description,
+ )
try:
self._post_health_report(document=document)
except Exception as e:
@@ -672,24 +741,33 @@ class GoalStateHealthReporter:
report_diagnostic_event(msg, logger_func=LOG.error)
raise
- LOG.warning('Reported failure to Azure fabric.')
+ LOG.warning("Reported failure to Azure fabric.")
def build_report(
- self, incarnation: str, container_id: str, instance_id: str,
- status: str, substatus=None, description=None) -> str:
- health_detail = ''
+ self,
+ incarnation: str,
+ container_id: str,
+ instance_id: str,
+ status: str,
+ substatus=None,
+ description=None,
+ ) -> str:
+ health_detail = ""
if substatus is not None:
health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
health_substatus=escape(substatus),
health_description=escape(
- description[:self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]))
+ description[: self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]
+ ),
+ )
health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(str(incarnation)),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(status),
- health_detail_subsection=health_detail)
+ health_detail_subsection=health_detail,
+ )
return health_report
@@ -717,20 +795,22 @@ class GoalStateHealthReporter:
# reporting handler that writes to the special KVP files.
time.sleep(0)
- LOG.debug('Sending health report to Azure fabric.')
+ LOG.debug("Sending health report to Azure fabric.")
url = "http://{}/machine?comp=health".format(self._endpoint)
self._azure_endpoint_client.post(
url,
data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
- LOG.debug('Successfully sent health report to Azure fabric')
+ extra_headers={"Content-Type": "text/xml; charset=utf-8"},
+ )
+ LOG.debug("Successfully sent health report to Azure fabric")
class WALinuxAgentShim:
-
def __init__(self, fallback_lease_file=None, dhcp_options=None):
- LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
- fallback_lease_file)
+ LOG.debug(
+ "WALinuxAgentShim instantiated, fallback_lease_file=%s",
+ fallback_lease_file,
+ )
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
@@ -749,30 +829,33 @@ class WALinuxAgentShim:
@property
def endpoint(self):
if self._endpoint is None:
- self._endpoint = self.find_endpoint(self.lease_file,
- self.dhcpoptions)
+ self._endpoint = self.find_endpoint(
+ self.lease_file, self.dhcpoptions
+ )
return self._endpoint
@staticmethod
def get_ip_from_lease_value(fallback_lease_value):
- unescaped_value = fallback_lease_value.replace('\\', '')
+ unescaped_value = fallback_lease_value.replace("\\", "")
if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
+ hex_string = ""
+ for hex_pair in unescaped_value.split(":"):
if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
+ hex_pair = "0" + hex_pair
hex_string += hex_pair
packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
+ ">L", int(hex_string.replace(":", ""), 16)
+ )
else:
- packed_bytes = unescaped_value.encode('utf-8')
+ packed_bytes = unescaped_value.encode("utf-8")
return socket.inet_ntoa(packed_bytes)
@staticmethod
@azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
- 'OPTION_245', leases_d=leases_d)
+ "OPTION_245", leases_d=leases_d
+ )
@staticmethod
@azure_ds_telemetry_reporter
@@ -790,7 +873,7 @@ class WALinuxAgentShim:
if option_name in line:
# Example line from Ubuntu
# option unknown-245 a8:3f:81:10;
- leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"'))
# Return the "most recent" one in the list
if len(leases) < 1:
return None
@@ -805,15 +888,16 @@ class WALinuxAgentShim:
if not os.path.exists(hooks_dir):
LOG.debug("%s not found.", hooks_dir)
return None
- hook_files = [os.path.join(hooks_dir, x)
- for x in os.listdir(hooks_dir)]
+ hook_files = [
+ os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir)
+ ]
for hook_file in hook_files:
try:
- name = os.path.basename(hook_file).replace('.json', '')
+ name = os.path.basename(hook_file).replace(".json", "")
dhcp_options[name] = json.loads(util.load_file((hook_file)))
except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file)
+ "{_file} is not valid JSON data".format(_file=hook_file)
) from e
return dhcp_options
@@ -825,7 +909,7 @@ class WALinuxAgentShim:
# the MS endpoint server is given to us as DHPC option 245
_value = None
for interface in dhcp_options:
- _value = dhcp_options[interface].get('unknown_245', None)
+ _value = dhcp_options[interface].get("unknown_245", None)
if _value is not None:
LOG.debug("Endpoint server found in dhclient options")
break
@@ -855,63 +939,73 @@ class WALinuxAgentShim:
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
report_diagnostic_event(
- 'No Azure endpoint from dhcp options. '
- 'Finding Azure endpoint from networkd...',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhcp options. "
+ "Finding Azure endpoint from networkd...",
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
report_diagnostic_event(
- 'No Azure endpoint from networkd. '
- 'Finding Azure endpoint from hook json...',
- logger_func=LOG.debug)
+ "No Azure endpoint from networkd. "
+ "Finding Azure endpoint from hook json...",
+ logger_func=LOG.debug,
+ )
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
report_diagnostic_event(
- 'No Azure endpoint from dhclient logs. '
- 'Unable to find endpoint in dhclient logs. '
- 'Falling back to check lease files',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhclient logs. "
+ "Unable to find endpoint in dhclient logs. "
+ "Falling back to check lease files",
+ logger_func=LOG.debug,
+ )
if fallback_lease_file is None:
report_diagnostic_event(
- 'No fallback lease file was specified.',
- logger_func=LOG.warning)
+ "No fallback lease file was specified.",
+ logger_func=LOG.warning,
+ )
value = None
else:
report_diagnostic_event(
- 'Looking for endpoint in lease file %s'
- % fallback_lease_file, logger_func=LOG.debug)
+ "Looking for endpoint in lease file %s"
+ % fallback_lease_file,
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._get_value_from_leases_file(
- fallback_lease_file)
+ fallback_lease_file
+ )
if value is None:
value = DEFAULT_WIRESERVER_ENDPOINT
report_diagnostic_event(
- 'No lease found; using default endpoint: %s' % value,
- logger_func=LOG.warning)
+ "No lease found; using default endpoint: %s" % value,
+ logger_func=LOG.warning,
+ )
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
report_diagnostic_event(
- 'Azure endpoint found at %s' % endpoint_ip_address,
- logger_func=LOG.debug)
+ "Azure endpoint found at %s" % endpoint_ip_address,
+ logger_func=LOG.debug,
+ )
return endpoint_ip_address
@azure_ds_telemetry_reporter
def eject_iso(self, iso_dev) -> None:
try:
LOG.debug("Ejecting the provisioning iso")
- subp.subp(['eject', iso_dev])
+ subp.subp(["eject", iso_dev])
except Exception as e:
report_diagnostic_event(
"Failed ejecting the provisioning iso: %s" % e,
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
- def register_with_azure_and_fetch_data(self,
- pubkey_info=None,
- iso_dev=None) -> dict:
+ def register_with_azure_and_fetch_data(
+ self, pubkey_info=None, iso_dev=None
+ ) -> dict:
"""Gets the VM's GoalState from Azure, uses the GoalState information
to report ready/send the ready signal/provisioning complete signal to
Azure, and then uses pubkey_info to filter and obtain the user's
@@ -928,7 +1022,8 @@ class WALinuxAgentShim:
http_client_certificate = self.openssl_manager.certificate
if self.azure_endpoint_client is None:
self.azure_endpoint_client = AzureEndpointHttpClient(
- http_client_certificate)
+ http_client_certificate
+ )
goal_state = self._fetch_goal_state_from_azure(
need_certificate=http_client_certificate is not None
)
@@ -936,13 +1031,14 @@ class WALinuxAgentShim:
if pubkey_info is not None:
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
if iso_dev is not None:
self.eject_iso(iso_dev)
health_reporter.send_ready_signal()
- return {'public-keys': ssh_keys}
+ return {"public-keys": ssh_keys}
@azure_ds_telemetry_reporter
def register_with_azure_and_report_failure(self, description: str) -> None:
@@ -955,13 +1051,14 @@ class WALinuxAgentShim:
self.azure_endpoint_client = AzureEndpointHttpClient(None)
goal_state = self._fetch_goal_state_from_azure(need_certificate=False)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
health_reporter.send_failure_signal(description=description)
@azure_ds_telemetry_reporter
def _fetch_goal_state_from_azure(
- self,
- need_certificate: bool) -> GoalState:
+ self, need_certificate: bool
+ ) -> GoalState:
"""Fetches the GoalState XML from the Azure endpoint, parses the XML,
and returns a GoalState object.
@@ -970,8 +1067,7 @@ class WALinuxAgentShim:
"""
unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
return self._parse_raw_goal_state_xml(
- unparsed_goal_state_xml,
- need_certificate
+ unparsed_goal_state_xml, need_certificate
)
@azure_ds_telemetry_reporter
@@ -982,27 +1078,29 @@ class WALinuxAgentShim:
@return: GoalState XML string
"""
- LOG.info('Registering with Azure...')
- url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ LOG.info("Registering with Azure...")
+ url = "http://{}/machine/?comp=goalstate".format(self.endpoint)
try:
with events.ReportEventStack(
- name="goalstate-retrieval",
- description="retrieve goalstate",
- parent=azure_ds_reporter):
+ name="goalstate-retrieval",
+ description="retrieve goalstate",
+ parent=azure_ds_reporter,
+ ):
response = self.azure_endpoint_client.get(url)
except Exception as e:
report_diagnostic_event(
- 'failed to register with Azure and fetch GoalState XML: %s'
- % e, logger_func=LOG.warning)
+ "failed to register with Azure and fetch GoalState XML: %s"
+ % e,
+ logger_func=LOG.warning,
+ )
raise
- LOG.debug('Successfully fetched GoalState XML.')
+ LOG.debug("Successfully fetched GoalState XML.")
return response.contents
@azure_ds_telemetry_reporter
def _parse_raw_goal_state_xml(
- self,
- unparsed_goal_state_xml: str,
- need_certificate: bool) -> GoalState:
+ self, unparsed_goal_state_xml: str, need_certificate: bool
+ ) -> GoalState:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_goal_state_xml: GoalState XML string
@@ -1013,23 +1111,28 @@ class WALinuxAgentShim:
goal_state = GoalState(
unparsed_goal_state_xml,
self.azure_endpoint_client,
- need_certificate
+ need_certificate,
)
except Exception as e:
report_diagnostic_event(
- 'Error processing GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Error processing GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- msg = ', '.join([
- 'GoalState XML container id: %s' % goal_state.container_id,
- 'GoalState XML instance id: %s' % goal_state.instance_id,
- 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ msg = ", ".join(
+ [
+ "GoalState XML container id: %s" % goal_state.container_id,
+ "GoalState XML instance id: %s" % goal_state.instance_id,
+ "GoalState XML incarnation: %s" % goal_state.incarnation,
+ ]
+ )
report_diagnostic_event(msg, logger_func=LOG.debug)
return goal_state
@azure_ds_telemetry_reporter
def _get_user_pubkeys(
- self, goal_state: GoalState, pubkey_info: list) -> list:
+ self, goal_state: GoalState, pubkey_info: list
+ ) -> list:
"""Gets and filters the VM admin user's authorized pubkeys.
The admin user in this case is the username specified as "admin"
@@ -1057,15 +1160,16 @@ class WALinuxAgentShim:
"""
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
+ LOG.debug("Certificate XML found; parsing out public keys.")
keys_by_fingerprint = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
+ goal_state.certificates_xml
+ )
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
return ssh_keys
@staticmethod
def _filter_pubkeys(keys_by_fingerprint: dict, pubkey_info: list) -> list:
- """ Filter and return only the user's actual pubkeys.
+ """Filter and return only the user's actual pubkeys.
@param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
that was obtained from GoalState Certificates XML. May contain
@@ -1078,71 +1182,84 @@ class WALinuxAgentShim:
"""
keys = []
for pubkey in pubkey_info:
- if 'value' in pubkey and pubkey['value']:
- keys.append(pubkey['value'])
- elif 'fingerprint' in pubkey and pubkey['fingerprint']:
- fingerprint = pubkey['fingerprint']
+ if "value" in pubkey and pubkey["value"]:
+ keys.append(pubkey["value"])
+ elif "fingerprint" in pubkey and pubkey["fingerprint"]:
+ fingerprint = pubkey["fingerprint"]
if fingerprint in keys_by_fingerprint:
keys.append(keys_by_fingerprint[fingerprint])
else:
- LOG.warning("ovf-env.xml specified PublicKey fingerprint "
- "%s not found in goalstate XML", fingerprint)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML",
+ fingerprint,
+ )
else:
- LOG.warning("ovf-env.xml specified PublicKey with neither "
- "value nor fingerprint: %s", pubkey)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s",
+ pubkey,
+ )
return keys
@azure_ds_telemetry_reporter
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
- pubkey_info=None, iso_dev=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def get_metadata_from_fabric(
+ fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
try:
return shim.register_with_azure_and_fetch_data(
- pubkey_info=pubkey_info, iso_dev=iso_dev)
+ pubkey_info=pubkey_info, iso_dev=iso_dev
+ )
finally:
shim.clean_up()
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(fallback_lease_file=None, dhcp_opts=None,
- description=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def report_failure_to_fabric(
+ fallback_lease_file=None, dhcp_opts=None, description=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
if not description:
description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
- shim.register_with_azure_and_report_failure(
- description=description)
+ shim.register_with_azure_and_report_failure(description=description)
finally:
shim.clean_up()
def dhcp_log_cb(out, err):
report_diagnostic_event(
- "dhclient output stream: %s" % out, logger_func=LOG.debug)
+ "dhclient output stream: %s" % out, logger_func=LOG.debug
+ )
report_diagnostic_event(
- "dhclient error stream: %s" % err, logger_func=LOG.debug)
+ "dhclient error stream: %s" % err, logger_func=LOG.debug
+ )
class EphemeralDHCPv4WithReporting:
def __init__(self, reporter, nic=None):
self.reporter = reporter
self.ephemeralDHCPv4 = EphemeralDHCPv4(
- iface=nic, dhcp_log_func=dhcp_log_cb)
+ iface=nic, dhcp_log_func=dhcp_log_cb
+ )
def __enter__(self):
with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=self.reporter):
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=self.reporter,
+ ):
return self.ephemeralDHCPv4.__enter__()
def __exit__(self, excp_type, excp_value, excp_traceback):
- self.ephemeralDHCPv4.__exit__(
- excp_type, excp_value, excp_traceback)
+ self.ephemeralDHCPv4.__exit__(excp_type, excp_value, excp_traceback)
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index f9be4ecb..72515caf 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,20 +8,18 @@ import random
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import url_helper
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, url_helper, util
-NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+NIC_MAP = {"public": "eth0", "private": "eth1"}
LOG = logging.getLogger(__name__)
def assign_ipv4_link_local(distro, nic=None):
- """Bring up NIC using an address using link-local (ip4LL) IPs. On
- DigitalOcean, the link-local domain is per-droplet routed, so there
- is no risk of collisions. However, to be more safe, the ip4LL
- address is random.
+ """Bring up NIC using an address using link-local (ip4LL) IPs.
+ On DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
"""
if not nic:
@@ -29,18 +27,22 @@ def assign_ipv4_link_local(distro, nic=None):
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
- raise RuntimeError("unable to find interfaces to access the"
- "meta-data server. This droplet is broken.")
+ raise RuntimeError(
+ "unable to find interfaces to access the"
+ "meta-data server. This droplet is broken."
+ )
- addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
- random.randint(0, 255))
+ addr = "169.254.{0}.{1}/16".format(
+ random.randint(1, 168), random.randint(0, 255)
+ )
- ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
- ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+ ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic]
+ ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"]
- if not subp.which('ip'):
- raise RuntimeError("No 'ip' command available to configure ip4LL "
- "address")
+ if not subp.which("ip"):
+ raise RuntimeError(
+ "No 'ip' command available to configure ip4LL address"
+ )
try:
subp.subp(ip_addr_cmd)
@@ -48,8 +50,13 @@ def assign_ipv4_link_local(distro, nic=None):
subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
- util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
- " Droplet networking will be broken", addr, nic)
+ util.logexc(
+ LOG,
+ "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken",
+ addr,
+ nic,
+ )
raise
return nic
@@ -63,21 +70,23 @@ def get_link_local_nic(distro):
]
if not nics:
return None
- return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
+ return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex"))
def del_ipv4_link_local(nic=None):
"""Remove the ip4LL address. While this is not necessary, the ip4LL
- address is extraneous and confusing to users.
+ address is extraneous and confusing to users.
"""
if not nic:
- LOG.debug("no link_local address interface defined, skipping link "
- "local address cleanup")
+ LOG.debug(
+ "no link_local address interface defined, skipping link "
+ "local address cleanup"
+ )
return
LOG.debug("cleaning up ipv4LL address")
- ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+ ip_addr_cmd = ["ip", "addr", "flush", "dev", nic]
try:
subp.subp(ip_addr_cmd)
@@ -89,44 +98,47 @@ def del_ipv4_link_local(nic=None):
def convert_network_configuration(config, dns_servers):
"""Convert the DigitalOcean Network description into Cloud-init's netconfig
- format.
-
- Example JSON:
- {'public': [
- {'mac': '04:01:58:27:7f:01',
- 'ipv4': {'gateway': '45.55.32.1',
- 'netmask': '255.255.224.0',
- 'ip_address': '45.55.50.93'},
- 'anchor_ipv4': {
- 'gateway': '10.17.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.17.0.9'},
- 'type': 'public',
- 'ipv6': {'gateway': '....',
- 'ip_address': '....',
- 'cidr': 64}}
- ],
- 'private': [
- {'mac': '04:01:58:27:7f:02',
- 'ipv4': {'gateway': '10.132.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.132.75.35'},
- 'type': 'private'}
- ]
- }
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
"""
def _get_subnet_part(pcfg):
- subpart = {'type': 'static',
- 'control': 'auto',
- 'address': pcfg.get('ip_address'),
- 'gateway': pcfg.get('gateway')}
-
- if ":" in pcfg.get('ip_address'):
- subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
- pcfg.get('cidr'))
+ subpart = {
+ "type": "static",
+ "control": "auto",
+ "address": pcfg.get("ip_address"),
+ "gateway": pcfg.get("gateway"),
+ }
+
+ if ":" in pcfg.get("ip_address"):
+ subpart["address"] = "{0}/{1}".format(
+ pcfg.get("ip_address"), pcfg.get("cidr")
+ )
else:
- subpart['netmask'] = pcfg.get('netmask')
+ subpart["netmask"] = pcfg.get("netmask")
return subpart
@@ -138,54 +150,66 @@ def convert_network_configuration(config, dns_servers):
nic = config[n][0]
LOG.debug("considering %s", nic)
- mac_address = nic.get('mac')
+ mac_address = nic.get("mac")
if mac_address not in macs_to_nics:
- raise RuntimeError("Did not find network interface on system "
- "with mac '%s'. Cannot apply configuration: %s"
- % (mac_address, nic))
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, nic)
+ )
sysfs_name = macs_to_nics.get(mac_address)
- nic_type = nic.get('type', 'unknown')
+ nic_type = nic.get("type", "unknown")
if_name = NIC_MAP.get(nic_type, sysfs_name)
if if_name != sysfs_name:
- LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'",
- nic_type, mac_address, sysfs_name, if_name)
+ LOG.debug(
+ "Found %s interface '%s' on '%s', assigned name of '%s'",
+ nic_type,
+ mac_address,
+ sysfs_name,
+ if_name,
+ )
else:
- msg = ("Found interface '%s' on '%s', which is not a public "
- "or private interface. Using default system naming.")
+ msg = (
+ "Found interface '%s' on '%s', which is not a public "
+ "or private interface. Using default system naming."
+ )
LOG.debug(msg, mac_address, sysfs_name)
- ncfg = {'type': 'physical',
- 'mac_address': mac_address,
- 'name': if_name}
+ ncfg = {
+ "type": "physical",
+ "mac_address": mac_address,
+ "name": if_name,
+ }
subnets = []
- for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ for netdef in ("ipv4", "ipv6", "anchor_ipv4", "anchor_ipv6"):
raw_subnet = nic.get(netdef, None)
if not raw_subnet:
continue
sub_part = _get_subnet_part(raw_subnet)
if nic_type != "public" or "anchor" in netdef:
- del sub_part['gateway']
+ del sub_part["gateway"]
subnets.append(sub_part)
- ncfg['subnets'] = subnets
+ ncfg["subnets"] = subnets
nic_configs.append(ncfg)
LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
if dns_servers:
LOG.debug("added dns servers: %s", dns_servers)
- nic_configs.append({'type': 'nameserver', 'address': dns_servers})
+ nic_configs.append({"type": "nameserver", "address": dns_servers})
- return {'version': 1, 'config': nic_configs}
+ return {"version": 1, "config": nic_configs}
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return json.loads(response.contents.decode())
@@ -202,16 +226,21 @@ def read_sysinfo():
droplet_id = dmi.read_dmi_data("system-serial-number")
if droplet_id:
- LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
- droplet_id)
+ LOG.debug(
+ "system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id,
+ )
else:
- msg = ("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/new")
+ msg = (
+ "system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new"
+ )
LOG.critical(msg)
raise RuntimeError(msg)
return (True, droplet_id)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 33dc4c53..592ae80b 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -3,24 +3,25 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import url_helper
-from cloudinit import util
-
import base64
import binascii
+from cloudinit import url_helper, util
+
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return util.load_yaml(response.contents.decode())
def read_userdata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index e13d6834..2953e858 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -2,14 +2,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import util
-from collections import namedtuple
-
import os
import select
import socket
import struct
+from collections import namedtuple
+
+from cloudinit import log as logging
+from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -47,29 +47,30 @@ OPER_TESTING = 4
OPER_DORMANT = 5
OPER_UP = 6
-RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data'])
-InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate'])
-NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
- 'pid'])
+RTAAttr = namedtuple("RTAAttr", ["length", "rta_type", "data"])
+InterfaceOperstate = namedtuple("InterfaceOperstate", ["ifname", "operstate"])
+NetlinkHeader = namedtuple(
+ "NetlinkHeader", ["length", "type", "flags", "seq", "pid"]
+)
class NetlinkCreateSocketError(RuntimeError):
- '''Raised if netlink socket fails during create or bind.'''
+ """Raised if netlink socket fails during create or bind."""
def create_bound_netlink_socket():
- '''Creates netlink socket and bind on netlink group to catch interface
+ """Creates netlink socket and bind on netlink group to catch interface
down/up events. The socket will bound only on RTMGRP_LINK (which only
includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to
non-blocking mode since we're only receiving messages.
:returns: netlink socket in non-blocking mode
:raises: NetlinkCreateSocketError
- '''
+ """
try:
- netlink_socket = socket.socket(socket.AF_NETLINK,
- socket.SOCK_RAW,
- socket.NETLINK_ROUTE)
+ netlink_socket = socket.socket(
+ socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE
+ )
netlink_socket.bind((os.getpid(), RTMGRP_LINK))
netlink_socket.setblocking(0)
except socket.error as e:
@@ -80,7 +81,7 @@ def create_bound_netlink_socket():
def get_netlink_msg_header(data):
- '''Gets netlink message type and length
+ """Gets netlink message type and length
:param: data read from netlink socket
:returns: netlink message type
@@ -92,18 +93,20 @@ def get_netlink_msg_header(data):
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sender port ID */
};
- '''
- assert (data is not None), ("data is none")
- assert (len(data) >= NLMSGHDR_SIZE), (
- "data is smaller than netlink message header")
- msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT,
- data[:MSG_TYPE_OFFSET])
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) >= NLMSGHDR_SIZE
+ ), "data is smaller than netlink message header"
+ msg_len, msg_type, flags, seq, pid = struct.unpack(
+ NLMSGHDR_FMT, data[:MSG_TYPE_OFFSET]
+ )
LOG.debug("Got netlink msg of type %d", msg_type)
return NetlinkHeader(msg_len, msg_type, flags, seq, pid)
def read_netlink_socket(netlink_socket, timeout=None):
- '''Select and read from the netlink socket if ready.
+ """Select and read from the netlink socket if ready.
:param: netlink_socket: specify which socket object to read from
:param: timeout: specify a timeout value (integer) to wait while reading,
@@ -111,8 +114,8 @@ def read_netlink_socket(netlink_socket, timeout=None):
:returns: string of data read (max length = <MAX_SIZE>) from socket,
if no data read, returns None
:raises: AssertionError if netlink_socket is None
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
read_set, _, _ = select.select([netlink_socket], [], [], timeout)
# Incase of timeout,read_set doesn't contain netlink socket.
# just return from this function
@@ -126,32 +129,33 @@ def read_netlink_socket(netlink_socket, timeout=None):
def unpack_rta_attr(data, offset):
- '''Unpack a single rta attribute.
+ """Unpack a single rta attribute.
:param: data: string of data read from netlink socket
:param: offset: starting offset of RTA Attribute
:return: RTAAttr object with length, type and data. On error, return None.
:raises: AssertionError if data is None or offset is not integer.
- '''
- assert (data is not None), ("data is none")
- assert (type(offset) == int), ("offset is not integer")
- assert (offset >= RTATTR_START_OFFSET), (
- "rta offset is less than expected length")
+ """
+ assert data is not None, "data is none"
+ assert type(offset) == int, "offset is not integer"
+ assert (
+ offset >= RTATTR_START_OFFSET
+ ), "rta offset is less than expected length"
length = rta_type = 0
attr_data = None
try:
length = struct.unpack_from("H", data, offset=offset)[0]
- rta_type = struct.unpack_from("H", data, offset=offset+2)[0]
+ rta_type = struct.unpack_from("H", data, offset=offset + 2)[0]
except struct.error:
return None # Should mean our offset is >= remaining data
# Unpack just the attribute's data. Offset by 4 to skip length/type header
- attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length]
+ attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length]
return RTAAttr(length, rta_type, attr_data)
def read_rta_oper_state(data):
- '''Reads Interface name and operational state from RTA Data.
+ """Reads Interface name and operational state from RTA Data.
:param: data: string of data read from netlink socket
:returns: InterfaceOperstate object containing if_name and oper_state.
@@ -159,10 +163,11 @@ def read_rta_oper_state(data):
IFLA_IFNAME messages.
:raises: AssertionError if data is None or length of data is
smaller than RTATTR_START_OFFSET.
- '''
- assert (data is not None), ("data is none")
- assert (len(data) > RTATTR_START_OFFSET), (
- "length of data is smaller than RTATTR_START_OFFSET")
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) > RTATTR_START_OFFSET
+ ), "length of data is smaller than RTATTR_START_OFFSET"
ifname = operstate = None
offset = RTATTR_START_OFFSET
while offset <= len(data):
@@ -170,15 +175,16 @@ def read_rta_oper_state(data):
if not attr or attr.length == 0:
break
# Each attribute is 4-byte aligned. Determine pad length.
- padlen = (PAD_ALIGNMENT -
- (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT
+ padlen = (
+ PAD_ALIGNMENT - (attr.length % PAD_ALIGNMENT)
+ ) % PAD_ALIGNMENT
offset += attr.length + padlen
if attr.rta_type == IFLA_OPERSTATE:
operstate = ord(attr.data)
elif attr.rta_type == IFLA_IFNAME:
- interface_name = util.decode_binary(attr.data, 'utf-8')
- ifname = interface_name.strip('\0')
+ interface_name = util.decode_binary(attr.data, "utf-8")
+ ifname = interface_name.strip("\0")
if not ifname or operstate is None:
return None
LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate)
@@ -186,12 +192,12 @@ def read_rta_oper_state(data):
def wait_for_nic_attach_event(netlink_socket, existing_nics):
- '''Block until a single nic is attached.
+ """Block until a single nic is attached.
:param: netlink_socket: netlink_socket to receive events
:param: existing_nics: List of existing nics so that we can skip them.
:raises: AssertionError if netlink_socket is none.
- '''
+ """
LOG.debug("Preparing to wait for nic attach.")
ifname = None
@@ -204,19 +210,21 @@ def wait_for_nic_attach_event(netlink_socket, existing_nics):
# We can return even if the operational state of the new nic is DOWN
# because we set it to UP before doing dhcp.
- read_netlink_messages(netlink_socket,
- None,
- [RTM_NEWLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket,
+ None,
+ [RTM_NEWLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
return ifname
def wait_for_nic_detach_event(netlink_socket):
- '''Block until a single nic is detached and its operational state is down.
+ """Block until a single nic is detached and its operational state is down.
:param: netlink_socket: netlink_socket to receive events.
- '''
+ """
LOG.debug("Preparing to wait for nic detach.")
ifname = None
@@ -225,16 +233,14 @@ def wait_for_nic_detach_event(netlink_socket):
ifname = iname
return False
- read_netlink_messages(netlink_socket,
- None,
- [RTM_DELLINK],
- [OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket, None, [RTM_DELLINK], [OPER_DOWN], should_continue_cb
+ )
return ifname
def wait_for_media_disconnect_connect(netlink_socket, ifname):
- '''Block until media disconnect and connect has happened on an interface.
+ """Block until media disconnect and connect has happened on an interface.
Listens on netlink socket to receive netlink events and when the carrier
changes from 0 to 1, it considers event has happened and
return from this function
@@ -242,10 +248,10 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
:param: netlink_socket: netlink_socket to receive events
:param: ifname: Interface name to lookout for netlink events
:raises: AssertionError if netlink_socket is None or ifname is None.
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
- assert (ifname is not None), ("interface name is none")
- assert (len(ifname) > 0), ("interface name cannot be empty")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
+ assert ifname is not None, "interface name is none"
+ assert len(ifname) > 0, "interface name cannot be empty"
def should_continue_cb(iname, carrier, prevCarrier):
# check for carrier down, up sequence
@@ -256,19 +262,23 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
return True
LOG.debug("Wait for media disconnect and reconnect to happen")
- read_netlink_messages(netlink_socket,
- ifname,
- [RTM_NEWLINK, RTM_DELLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
-
-
-def read_netlink_messages(netlink_socket,
- ifname_filter,
- rtm_types,
- operstates,
- should_continue_callback):
- ''' Reads from the netlink socket until the condition specified by
+ read_netlink_messages(
+ netlink_socket,
+ ifname,
+ [RTM_NEWLINK, RTM_DELLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
+
+
+def read_netlink_messages(
+ netlink_socket,
+ ifname_filter,
+ rtm_types,
+ operstates,
+ should_continue_callback,
+):
+ """Reads from the netlink socket until the condition specified by
the continuation callback is met.
:param: netlink_socket: netlink_socket to receive events.
@@ -276,7 +286,7 @@ def read_netlink_messages(netlink_socket,
:param: rtm_types: Type of netlink events to listen for.
:param: operstates: Operational states to listen.
:param: should_continue_callback: Specifies when to stop listening.
- '''
+ """
if netlink_socket is None:
raise RuntimeError("Netlink socket is none")
data = bytes()
@@ -286,9 +296,9 @@ def read_netlink_messages(netlink_socket,
recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT)
if recv_data is None:
continue
- LOG.debug('read %d bytes from socket', len(recv_data))
+ LOG.debug("read %d bytes from socket", len(recv_data))
data += recv_data
- LOG.debug('Length of data after concat %d', len(data))
+ LOG.debug("Length of data after concat %d", len(data))
offset = 0
datalen = len(data)
while offset < datalen:
@@ -300,30 +310,37 @@ def read_netlink_messages(netlink_socket,
if len(nl_msg) < nlheader.length:
LOG.debug("Partial data. Smaller than netlink message")
break
- padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1)
+ padlen = (nlheader.length + PAD_ALIGNMENT - 1) & ~(
+ PAD_ALIGNMENT - 1
+ )
offset = offset + padlen
- LOG.debug('offset to next netlink message: %d', offset)
+ LOG.debug("offset to next netlink message: %d", offset)
# Continue if we are not interested in this message.
if nlheader.type not in rtm_types:
continue
interface_state = read_rta_oper_state(nl_msg)
if interface_state is None:
- LOG.debug('Failed to read rta attributes: %s', interface_state)
+ LOG.debug("Failed to read rta attributes: %s", interface_state)
continue
- if (ifname_filter is not None and
- interface_state.ifname != ifname_filter):
+ if (
+ ifname_filter is not None
+ and interface_state.ifname != ifname_filter
+ ):
LOG.debug(
"Ignored netlink event on interface %s. Waiting for %s.",
- interface_state.ifname, ifname_filter)
+ interface_state.ifname,
+ ifname_filter,
+ )
continue
if interface_state.operstate not in operstates:
continue
prevCarrier = carrier
carrier = interface_state.operstate
- if not should_continue_callback(interface_state.ifname,
- carrier,
- prevCarrier):
+ if not should_continue_callback(
+ interface_state.ifname, carrier, prevCarrier
+ ):
return
data = data[offset:]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 4f566e64..a42543e4 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -14,11 +14,7 @@ import os
from cloudinit import ec2_utils
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import net, sources, subp, url_helper, util
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -27,30 +23,30 @@ LOG = logging.getLogger(__name__)
FILES_V1 = {
# Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
+ "etc/network/interfaces": ("network_config", lambda x: x, ""),
+ "meta.js": ("meta_js", util.load_json, {}),
+ "root/.ssh/authorized_keys": ("authorized_keys", lambda x: x, ""),
}
KEY_COPIES = (
# Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
+ ("local-hostname", "hostname", False),
+ ("instance-id", "uuid", True),
)
# Versions and names taken from nova source nova/api/metadata/base.py
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
+OS_LATEST = "latest"
+OS_FOLSOM = "2012-08-10"
+OS_GRIZZLY = "2013-04-04"
+OS_HAVANA = "2013-10-17"
+OS_LIBERTY = "2015-10-15"
# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
-OS_NEWTON_ONE = '2016-06-30'
+OS_NEWTON_ONE = "2016-06-30"
# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
-OS_NEWTON_TWO = '2016-10-06'
+OS_NEWTON_TWO = "2016-10-06"
# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
-OS_OCATA = '2017-02-22'
+OS_OCATA = "2017-02-22"
# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
-OS_ROCKY = '2018-08-27'
+OS_ROCKY = "2018-08-27"
# keep this in chronological order. new supported versions go at the end.
@@ -67,18 +63,18 @@ OS_VERSIONS = (
KNOWN_PHYSICAL_TYPES = (
None,
- 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
- 'bridge',
- 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
- 'dvs',
- 'ethernet',
- 'hw_veb',
- 'hyperv',
- 'ovs',
- 'phy',
- 'tap',
- 'vhostuser',
- 'vif',
+ "bgpovs", # not present in OpenStack upstream but used on OVH cloud.
+ "bridge",
+ "cascading", # not present in OpenStack upstream, used on OpenTelekomCloud
+ "dvs",
+ "ethernet",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "phy",
+ "tap",
+ "vhostuser",
+ "vif",
)
@@ -90,7 +86,7 @@ class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
+ bdm = self.ec2_metadata.get("block-device-mapping", {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
@@ -105,9 +101,9 @@ class SourceMixin(object):
def _os_name_to_device(self, name):
device = None
try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
+ criteria = "LABEL=%s" % (name)
+ if name == "swap":
+ criteria = "TYPE=%s" % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
@@ -135,10 +131,10 @@ class SourceMixin(object):
return None
# Try the ec2 mapping first
names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
+ if name == "root":
+ names.insert(0, "ami")
+ if name == "ami":
+ names.append("root")
device = None
LOG.debug("Using ec2 style lookup to find device %s", names)
for n in names:
@@ -163,7 +159,6 @@ class SourceMixin(object):
class BaseReader(metaclass=abc.ABCMeta):
-
def __init__(self, base_path):
self.base_path = base_path
@@ -187,8 +182,11 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
versions_available = self._fetch_available_versions()
except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
+ LOG.debug(
+ "Unable to read openstack versions from %s due to: %s",
+ self.base_path,
+ e,
+ )
versions_available = []
# openstack.OS_VERSIONS is stored in chronological order, so
@@ -202,12 +200,15 @@ class BaseReader(metaclass=abc.ABCMeta):
selected_version = potential_version
break
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
+ LOG.debug(
+ "Selected version '%s' from %s",
+ selected_version,
+ versions_available,
+ )
return selected_version
def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
+ path = item.get("content_path", "").lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
@@ -225,43 +226,44 @@ class BaseReader(metaclass=abc.ABCMeta):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list, str))
+ util.load_json, root_types=(dict, list, str)
+ )
def datafiles(version):
files = {}
- files['metadata'] = (
+ files["metadata"] = (
# File path to read
- self._path_join("openstack", version, 'meta_data.json'),
+ self._path_join("openstack", version, "meta_data.json"),
# Is it required?
True,
# Translator function (applied after loading)
util.load_json,
)
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
+ files["userdata"] = (
+ self._path_join("openstack", version, "user_data"),
False,
lambda x: x,
)
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
+ files["vendordata"] = (
+ self._path_join("openstack", version, "vendor_data.json"),
False,
load_json_anytype,
)
- files['vendordata2'] = (
- self._path_join("openstack", version, 'vendor_data2.json'),
+ files["vendordata2"] = (
+ self._path_join("openstack", version, "vendor_data2.json"),
False,
load_json_anytype,
)
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
+ files["networkdata"] = (
+ self._path_join("openstack", version, "network_data.json"),
False,
load_json_anytype,
)
return files
results = {
- 'userdata': '',
- 'version': 2,
+ "userdata": "",
+ "version": 2,
}
data = datafiles(self._find_working_version())
for (name, (path, required, translator)) in data.items():
@@ -272,11 +274,13 @@ class BaseReader(metaclass=abc.ABCMeta):
data = self._path_read(path)
except IOError as e:
if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading optional path %s due to: %s", path, e
+ )
else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading mandatory path %s due to: %s", path, e
+ )
else:
found = True
if required and not found:
@@ -291,11 +295,11 @@ class BaseReader(metaclass=abc.ABCMeta):
if found:
results[name] = data
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
+ metadata = results["metadata"]
+ if "random_seed" in metadata:
+ random_seed = metadata["random_seed"]
try:
- metadata['random_seed'] = base64.b64decode(random_seed)
+ metadata["random_seed"] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
raise BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e
@@ -303,18 +307,18 @@ class BaseReader(metaclass=abc.ABCMeta):
# load any files that were provided
files = {}
- metadata_files = metadata.get('files', [])
+ metadata_files = metadata.get("files", [])
for item in metadata_files:
- if 'path' not in item:
+ if "path" not in item:
continue
- path = item['path']
+ path = item["path"]
try:
files[path] = self._read_content_path(item)
except Exception as e:
raise BrokenMetadata(
"Failed to read provided file %s: %s" % (path, e)
) from e
- results['files'] = files
+ results["files"] = files
# The 'network_config' item in metadata is a content pointer
# to the network config that should be applied. It is just a
@@ -323,7 +327,7 @@ class BaseReader(metaclass=abc.ABCMeta):
if net_item:
try:
content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
+ results["network_config"] = content
except IOError as e:
raise BrokenMetadata(
"Failed to read network configuration: %s" % (e)
@@ -334,12 +338,12 @@ class BaseReader(metaclass=abc.ABCMeta):
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
- results['dsmode'] = metadata['meta']['dsmode']
+ results["dsmode"] = metadata["meta"]["dsmode"]
except KeyError:
pass
# Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
+ results["ec2-metadata"] = self._read_ec2_metadata()
# Perform some misc. metadata key renames...
for (target_key, source_key, is_required) in KEY_COPIES:
@@ -364,15 +368,19 @@ class ConfigDriveReader(BaseReader):
def _fetch_available_versions(self):
if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
+ path = self._path_join(self.base_path, "openstack")
+ found = [
+ d
+ for d in os.listdir(path)
+ if os.path.isdir(os.path.join(path))
+ ]
self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
+ path = self._path_join(
+ self.base_path, "ec2", "latest", "meta-data.json"
+ )
if not os.path.exists(path):
return {}
else:
@@ -419,14 +427,14 @@ class ConfigDriveReader(BaseReader):
else:
md[key] = copy.deepcopy(default)
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
+ keydata = md["authorized_keys"]
+ meta_js = md["meta_js"]
# keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
+ keydata = meta_js.get("public-keys", keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [
+ md["public-keys"] = [
line
for line in lines
if len(line) and not line.startswith("#")
@@ -434,25 +442,25 @@ class ConfigDriveReader(BaseReader):
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
+ if "instance-id" in meta_js:
+ md["instance-id"] = meta_js["instance-id"]
results = {
- 'version': 1,
- 'metadata': md,
+ "version": 1,
+ "metadata": md,
}
# allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
+ if "dsmode" in meta_js:
+ results["dsmode"] = meta_js["dsmode"]
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
+ results["userdata"] = meta_js.get("user-data", "")
# this implementation does not support files other than
# network/interfaces and authorized_keys...
- results['files'] = {}
+ results["files"] = {}
return results
@@ -481,7 +489,6 @@ class MetadataReader(BaseReader):
return self._versions
def _path_read(self, path, decode=False):
-
def should_retry_cb(_request_args, cause):
try:
code = int(cause.code)
@@ -492,11 +499,13 @@ class MetadataReader(BaseReader):
pass
return True
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
+ response = url_helper.readurl(
+ path,
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ exception_cb=should_retry_cb,
+ )
if decode:
return response.contents.decode()
else:
@@ -506,9 +515,11 @@ class MetadataReader(BaseReader):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
+ return ec2_utils.get_instance_metadata(
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ retries=self.retries,
+ )
# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
@@ -544,32 +555,32 @@ def convert_net_json(network_json=None, known_macs=None):
# dict of network_config key for filtering network_json
valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
+ "physical": [
+ "name",
+ "type",
+ "mac_address",
+ "subnets",
+ "params",
+ "mtu",
],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
+ "subnet": [
+ "type",
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "scope",
+ "dns_nameservers",
+ "dns_search",
+ "routes",
],
}
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
+ links = network_json.get("links", [])
+ networks = network_json.get("networks", [])
+ services = network_json.get("services", [])
link_updates = []
link_id_info = {}
@@ -578,65 +589,77 @@ def convert_net_json(network_json=None, known_macs=None):
config = []
for link in links:
subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
+ cfg = dict(
+ (k, v) for k, v in link.items() if k in valid_keys["physical"]
+ )
# 'name' is not in openstack spec yet, but we will support it if it is
# present. The 'id' in the spec is currently implemented as the host
# nic's name, meaning something like 'tap-adfasdffd'. We do not want
# to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
+ if "name" in link:
+ cfg["name"] = link["name"]
link_mac_addr = None
- if link.get('ethernet_mac_address'):
- link_mac_addr = link.get('ethernet_mac_address').lower()
- link_id_info[link['id']] = link_mac_addr
-
- curinfo = {'name': cfg.get('name'), 'mac': link_mac_addr,
- 'id': link['id'], 'type': link['type']}
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
-
- if network['type'] == 'ipv4_dhcp':
- subnet.update({'type': 'dhcp4'})
- elif network['type'] == 'ipv6_dhcp':
- subnet.update({'type': 'dhcp6'})
- elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']:
- subnet.update({'type': network['type']})
- elif network['type'] in ['ipv4', 'static']:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- elif network['type'] in ['ipv6', 'static6']:
- cfg.update({'accept-ra': False})
- subnet.update({
- 'type': 'static6',
- 'address': network.get('ip_address'),
- })
+ if link.get("ethernet_mac_address"):
+ link_mac_addr = link.get("ethernet_mac_address").lower()
+ link_id_info[link["id"]] = link_mac_addr
+
+ curinfo = {
+ "name": cfg.get("name"),
+ "mac": link_mac_addr,
+ "id": link["id"],
+ "type": link["type"],
+ }
+
+ for network in [n for n in networks if n["link"] == link["id"]]:
+ subnet = dict(
+ (k, v) for k, v in network.items() if k in valid_keys["subnet"]
+ )
+
+ if network["type"] == "ipv4_dhcp":
+ subnet.update({"type": "dhcp4"})
+ elif network["type"] == "ipv6_dhcp":
+ subnet.update({"type": "dhcp6"})
+ elif network["type"] in [
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+ ]:
+ subnet.update({"type": network["type"]})
+ elif network["type"] in ["ipv4", "static"]:
+ subnet.update(
+ {
+ "type": "static",
+ "address": network.get("ip_address"),
+ }
+ )
+ elif network["type"] in ["ipv6", "static6"]:
+ cfg.update({"accept-ra": False})
+ subnet.update(
+ {
+ "type": "static6",
+ "address": network.get("ip_address"),
+ }
+ )
# Enable accept_ra for stateful and legacy ipv6_dhcp types
- if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
- cfg.update({'accept-ra': True})
+ if network["type"] in ["ipv6_dhcpv6-stateful", "ipv6_dhcp"]:
+ cfg.update({"accept-ra": True})
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
+ if network["type"] == "ipv4":
+ subnet["ipv4"] = True
+ if network["type"] == "ipv6":
+ subnet["ipv6"] = True
subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['bond']:
+ cfg.update({"subnets": subnets})
+ if link["type"] in ["bond"]:
params = {}
if link_mac_addr:
- params['mac_address'] = link_mac_addr
+ params["mac_address"] = link_mac_addr
for k, v in link.items():
- if k == 'bond_links':
+ if k == "bond_links":
continue
- elif k.startswith('bond'):
+ elif k.startswith("bond"):
params.update({k: v})
# openstack does not provide a name for the bond.
@@ -649,35 +672,45 @@ def convert_net_json(network_json=None, known_macs=None):
# to the network config by their nic name.
# store that in bond_links_needed, and update these later.
link_updates.append(
- (cfg, 'bond_interfaces', '%s',
- copy.deepcopy(link['bond_links']))
+ (
+ cfg,
+ "bond_interfaces",
+ "%s",
+ copy.deepcopy(link["bond_links"]),
+ )
+ )
+ cfg.update({"params": params, "name": link_name})
+
+ curinfo["name"] = link_name
+ elif link["type"] in ["vlan"]:
+ name = "%s.%s" % (link["vlan_link"], link["vlan_id"])
+ cfg.update(
+ {
+ "name": name,
+ "vlan_id": link["vlan_id"],
+ "mac_address": link["vlan_mac_address"],
+ }
+ )
+ link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"]))
+ link_updates.append(
+ (cfg, "name", "%%s.%s" % link["vlan_id"], link["vlan_link"])
)
- cfg.update({'params': params, 'name': link_name})
-
- curinfo['name'] = link_name
- elif link['type'] in ['vlan']:
- name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
- cfg.update({
- 'name': name,
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
- link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
- link['vlan_link']))
- curinfo.update({'mac': link['vlan_mac_address'],
- 'name': name})
+ curinfo.update({"mac": link["vlan_mac_address"], "name": name})
else:
- if link['type'] not in KNOWN_PHYSICAL_TYPES:
- LOG.warning('Unknown network_data link type (%s); treating as'
- ' physical', link['type'])
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
+ if link["type"] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning(
+ "Unknown network_data link type (%s); treating as"
+ " physical",
+ link["type"],
+ )
+ cfg.update({"type": "physical", "mac_address": link_mac_addr})
config.append(cfg)
- link_id_info[curinfo['id']] = curinfo
+ link_id_info[curinfo["id"]] = curinfo
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
+ need_names = [
+ d for d in config if d.get("type") == "physical" and "name" not in d
+ ]
if need_names or link_updates:
if known_macs is None:
@@ -685,26 +718,26 @@ def convert_net_json(network_json=None, known_macs=None):
# go through and fill out the link_id_info with names
for _link_id, info in link_id_info.items():
- if info.get('name'):
+ if info.get("name"):
continue
- if info.get('mac') in known_macs:
- info['name'] = known_macs[info['mac']]
+ if info.get("mac") in known_macs:
+ info["name"] = known_macs[info["mac"]]
for d in need_names:
- mac = d.get('mac_address')
+ mac = d.get("mac_address")
if not mac:
raise ValueError("No mac_address or name entry for %s" % d)
if mac not in known_macs:
raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
+ d["name"] = known_macs[mac]
for cfg, key, fmt, targets in link_updates:
if isinstance(targets, (list, tuple)):
cfg[key] = [
- fmt % link_id_info[target]['name'] for target in targets
+ fmt % link_id_info[target]["name"] for target in targets
]
else:
- cfg[key] = fmt % link_id_info[targets]['name']
+ cfg[key] = fmt % link_id_info[targets]["name"]
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
@@ -713,15 +746,16 @@ def convert_net_json(network_json=None, known_macs=None):
ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
if ib_known_hwaddrs:
for cfg in config:
- if cfg['name'] in ib_known_hwaddrs:
- cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
- cfg['type'] = 'infiniband'
+ if cfg["name"] in ib_known_hwaddrs:
+ cfg["mac_address"] = ib_known_hwaddrs[cfg["name"]]
+ cfg["type"] = "infiniband"
for service in services:
cfg = service
- cfg.update({'type': 'nameserver'})
+ cfg.update({"type": "nameserver"})
config.append(cfg)
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
index 199baa58..e7b95a5e 100644
--- a/cloudinit/sources/helpers/upcloud.py
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -169,7 +169,7 @@ def convert_to_network_config_v1(config):
interface = {
"type": "physical",
"name": sysfs_name,
- "mac_address": mac_address
+ "mac_address": mac_address,
}
subnets = []
@@ -182,10 +182,9 @@ def convert_to_network_config_v1(config):
if config.get("dns"):
LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
- nic_configs.append({
- "type": "nameserver",
- "address": config.get("dns")
- })
+ nic_configs.append(
+ {"type": "nameserver", "address": config.get("dns")}
+ )
return {"version": 1, "config": nic_configs}
@@ -216,8 +215,7 @@ def read_sysinfo():
server_uuid = dmi.read_dmi_data("system-uuid")
if server_uuid:
LOG.debug(
- "system identified via SMBIOS as UpCloud server: %s",
- server_uuid
+ "system identified via SMBIOS as UpCloud server: %s", server_uuid
)
else:
msg = (
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index 9a07eafa..a5c67bb7 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -9,7 +9,8 @@
class BootProtoEnum(object):
"""Specifies the NIC Boot Settings."""
- DHCP = 'dhcp'
- STATIC = 'static'
+ DHCP = "dhcp"
+ STATIC = "static"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index bdfab5a0..39dacee0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -15,20 +15,20 @@ class Config(object):
Specification file.
"""
- CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
- DNS = 'DNS|NAMESERVER|'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
- HOSTNAME = 'NETWORK|HOSTNAME'
- MARKERID = 'MISC|MARKER-ID'
- PASS = 'PASSWORD|-PASS'
- RESETPASS = 'PASSWORD|RESET'
- SUFFIX = 'DNS|SUFFIX|'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- POST_GC_STATUS = 'MISC|POST-GC-STATUS'
- DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
- CLOUDINIT_META_DATA = 'CLOUDINIT|METADATA'
- CLOUDINIT_USER_DATA = 'CLOUDINIT|USERDATA'
+ CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME"
+ DNS = "DNS|NAMESERVER|"
+ DOMAINNAME = "NETWORK|DOMAINNAME"
+ HOSTNAME = "NETWORK|HOSTNAME"
+ MARKERID = "MISC|MARKER-ID"
+ PASS = "PASSWORD|-PASS"
+ RESETPASS = "PASSWORD|RESET"
+ SUFFIX = "DNS|SUFFIX|"
+ TIMEZONE = "DATETIME|TIMEZONE"
+ UTC = "DATETIME|UTC"
+ POST_GC_STATUS = "MISC|POST-GC-STATUS"
+ DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
+ CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
+ CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
def __init__(self, configFile):
self._configFile = configFile
@@ -84,8 +84,8 @@ class Config(object):
def nics(self):
"""Return the list of associated NICs."""
res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
+ nics = self._configFile["NIC-CONFIG|NICS"]
+ for nic in nics.split(","):
res.append(Nic(nic, self._configFile))
return res
@@ -93,11 +93,11 @@ class Config(object):
@property
def reset_password(self):
"""Retreives if the root password needs to be reset."""
- resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = self._configFile.get(Config.RESETPASS, "no")
resetPass = resetPass.lower()
- if resetPass not in ('yes', 'no'):
- raise ValueError('ResetPassword value should be yes/no')
- return resetPass == 'yes'
+ if resetPass not in ("yes", "no"):
+ raise ValueError("ResetPassword value should be yes/no")
+ return resetPass == "yes"
@property
def marker_id(self):
@@ -112,11 +112,11 @@ class Config(object):
@property
def post_gc_status(self):
"""Return whether to post guestinfo.gc.status VMX property."""
- postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no")
postGcStatus = postGcStatus.lower()
- if postGcStatus not in ('yes', 'no'):
- raise ValueError('PostGcStatus value should be yes/no')
- return postGcStatus == 'yes'
+ if postGcStatus not in ("yes", "no"):
+ raise ValueError("PostGcStatus value should be yes/no")
+ return postGcStatus == "yes"
@property
def default_run_post_script(self):
@@ -125,12 +125,12 @@ class Config(object):
is absent in VM Tools configuration
"""
defaultRunPostScript = self._configFile.get(
- Config.DEFAULT_RUN_POST_SCRIPT,
- 'no')
+ Config.DEFAULT_RUN_POST_SCRIPT, "no"
+ )
defaultRunPostScript = defaultRunPostScript.lower()
- if defaultRunPostScript not in ('yes', 'no'):
- raise ValueError('defaultRunPostScript value should be yes/no')
- return defaultRunPostScript == 'yes'
+ if defaultRunPostScript not in ("yes", "no"):
+ raise ValueError("defaultRunPostScript value should be yes/no")
+ return defaultRunPostScript == "yes"
@property
def meta_data_name(self):
@@ -142,4 +142,5 @@ class Config(object):
"""Return the name of cloud-init user data."""
return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 2ab22de9..8240ea8f 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,8 +9,7 @@ import logging
import os
import stat
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -24,8 +23,7 @@ class CustomScriptConstant(object):
# The user defined custom script
CUSTOM_SCRIPT_NAME = "customize.sh"
- CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
- CUSTOM_SCRIPT_NAME)
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, CUSTOM_SCRIPT_NAME)
POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
# The cc_scripts_per_instance script to launch custom script
POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
@@ -39,22 +37,25 @@ class RunCustomScript(object):
def prepare_script(self):
if not os.path.exists(self.scriptpath):
- raise CustomScriptNotFound("Script %s not found!! "
- "Cannot execute custom script!"
- % self.scriptpath)
+ raise CustomScriptNotFound(
+ "Script %s not found!! Cannot execute custom script!"
+ % self.scriptpath
+ )
util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
- LOG.debug("Copying custom script to %s",
- CustomScriptConstant.CUSTOM_SCRIPT)
+ LOG.debug(
+ "Copying custom script to %s", CustomScriptConstant.CUSTOM_SCRIPT
+ )
util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
# Strip any CR characters from the decoded script
- content = util.load_file(
- CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
- util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
- content,
- mode=0o544)
+ content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace(
+ "\r", ""
+ )
+ util.write_file(
+ CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544
+ )
class PreCustomScript(RunCustomScript):
@@ -70,8 +71,8 @@ class PostCustomScript(RunCustomScript):
super(PostCustomScript, self).__init__(scriptname, directory)
self.ccScriptsDir = ccScriptsDir
self.ccScriptPath = os.path.join(
- ccScriptsDir,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
+ ccScriptsDir, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ )
def execute(self):
"""
@@ -81,15 +82,17 @@ class PostCustomScript(RunCustomScript):
"""
self.prepare_script()
- LOG.debug("Copying post customize run script to %s",
- self.ccScriptPath)
+ LOG.debug("Copying post customize run script to %s", self.ccScriptPath)
util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
- self.ccScriptPath)
+ os.path.join(
+ self.directory, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ ),
+ self.ccScriptPath,
+ )
st = os.stat(self.ccScriptPath)
os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
LOG.info("Creating post customization pending marker")
util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index fc034c95..845294ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -35,7 +35,7 @@ class ConfigFile(ConfigSource, dict):
key = key.strip()
val = val.strip()
- if key.startswith('-') or '|-' in key:
+ if key.startswith("-") or "|-" in key:
canLog = False
else:
canLog = True
@@ -59,7 +59,7 @@ class ConfigFile(ConfigSource, dict):
Keyword arguments:
filename - The full path to the config file.
"""
- logger.info('Parsing the config file %s.', filename)
+ logger.info("Parsing the config file %s.", filename)
config = configparser.ConfigParser()
config.optionxform = str
@@ -71,7 +71,7 @@ class ConfigFile(ConfigSource, dict):
logger.debug("FOUND CATEGORY = '%s'", category)
for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
+ self._insertKey(category + "|" + key, value)
def should_keep_current_value(self, key):
"""
@@ -115,4 +115,5 @@ class ConfigFile(ConfigSource, dict):
"""
return len([key for key in self if key.startswith(prefix)])
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 5899d8f7..3b3b2d5a 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -11,4 +11,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index f5a0ebe4..df621f20 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,9 +9,8 @@ import logging
import os
import re
+from cloudinit import subp, util
from cloudinit.net.network_state import mask_to_net_prefix
-from cloudinit import subp
-from cloudinit import util
logger = logging.getLogger(__name__)
@@ -63,8 +62,10 @@ class NicConfigurator(object):
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
+ raise Exception(
+ "There can only be one primary nic",
+ [nic.mac for nic in primary_nics],
+ )
else:
return primary_nics[0]
@@ -73,17 +74,17 @@ class NicConfigurator(object):
Create the mac2Name dictionary
The mac address(es) are in the lower case
"""
- cmd = ['ip', 'addr', 'show']
+ cmd = ["ip", "addr", "show"]
output, _err = subp.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+ sections = re.split(r"\n\d+: ", "\n" + output)[1:]
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ macPat = r"link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))"
for section in sections:
match = re.search(macPat, section)
if not match: # Only keep info about nics
continue
mac = match.group(1).lower()
- name = section.split(':', 1)[0]
+ name = section.split(":", 1)[0]
self.mac2Name[mac] = name
def gen_one_nic(self, nic):
@@ -95,11 +96,11 @@ class NicConfigurator(object):
mac = nic.mac.lower()
name = self.mac2Name.get(mac)
if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
+ raise ValueError("No known device has MACADDR: %s" % nic.mac)
nics_cfg_list = []
- cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+ cfg = {"type": "physical", "name": name, "mac_address": mac}
subnet_list = []
route_list = []
@@ -114,7 +115,7 @@ class NicConfigurator(object):
subnet_list.extend(subnets)
route_list.extend(routes)
- cfg.update({'subnets': subnet_list})
+ cfg.update({"subnets": subnet_list})
nics_cfg_list.append(cfg)
if route_list:
@@ -135,17 +136,17 @@ class NicConfigurator(object):
route_list = []
if nic.onboot:
- subnet.update({'control': 'auto'})
+ subnet.update({"control": "auto"})
bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
+ if nic.ipv4_mode.lower() == "disabled":
+ bootproto = "manual"
- if bootproto != 'static':
- subnet.update({'type': 'dhcp'})
+ if bootproto != "static":
+ subnet.update({"type": "dhcp"})
return ([subnet], route_list)
else:
- subnet.update({'type': 'static'})
+ subnet.update({"type": "static"})
# Static Ipv4
addrs = nic.staticIpv4
@@ -154,20 +155,21 @@ class NicConfigurator(object):
v4 = addrs[0]
if v4.ip:
- subnet.update({'address': v4.ip})
+ subnet.update({"address": v4.ip})
if v4.netmask:
- subnet.update({'netmask': v4.netmask})
+ subnet.update({"netmask": v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- subnet.update({'gateway': self.ipv4PrimaryGateway})
+ subnet.update({"gateway": self.ipv4PrimaryGateway})
return ([subnet], route_list)
# Add routes if there is no primary nic
if not self._primaryNic and v4.gateways:
subnet.update(
- {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)})
+ {"routes": self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}
+ )
return ([subnet], route_list)
@@ -184,10 +186,14 @@ class NicConfigurator(object):
for gateway in gateways:
destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
- route_list.append({'destination': destination,
- 'type': 'route',
- 'gateway': gateway,
- 'metric': 10000})
+ route_list.append(
+ {
+ "destination": destination,
+ "type": "route",
+ "gateway": gateway,
+ "metric": 10000,
+ }
+ )
return route_list
@@ -208,9 +214,11 @@ class NicConfigurator(object):
addrs = nic.staticIpv6
for addr in addrs:
- subnet = {'type': 'static6',
- 'address': addr.ip,
- 'netmask': addr.netmask}
+ subnet = {
+ "type": "static6",
+ "address": addr.ip,
+ "netmask": addr.netmask,
+ }
subnet_list.append(subnet)
# TODO: Add the primary gateway
@@ -226,9 +234,9 @@ class NicConfigurator(object):
route_list = []
for addr in addrs:
- route_list.append({'type': 'route',
- 'gateway': addr.gateway,
- 'metric': 10000})
+ route_list.append(
+ {"type": "route", "gateway": addr.gateway, "metric": 10000}
+ )
return route_list
@@ -246,7 +254,7 @@ class NicConfigurator(object):
return nics_cfg_list
def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
+ logger.info("Clearing DHCP leases")
# Ignore the return code 1.
subp.subp(["pkill", "dhclient"], rcs=[0, 1])
@@ -262,11 +270,12 @@ class NicConfigurator(object):
logger.info("Debian OS not detected. Skipping the configure step")
return
- containingDir = '/etc/network'
+ containingDir = "/etc/network"
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
+ interfaceFile = os.path.join(containingDir, "interfaces")
+ originalFile = os.path.join(
+ containingDir, "interfaces.before_vmware_customization"
+ )
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
@@ -278,8 +287,9 @@ class NicConfigurator(object):
"source-directory /etc/network/interfaces.d",
]
- util.write_file(interfaceFile, content='\n'.join(lines))
+ util.write_file(interfaceFile, content="\n".join(lines))
self.clear_dhcp()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index d16a7690..4d3967a1 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,8 +9,7 @@
import logging
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -20,6 +19,7 @@ class PasswordConfigurator(object):
Class for changing configurations related to passwords in a VM. Includes
setting and expiring passwords.
"""
+
def configure(self, passwd, resetPasswd, distro):
"""
Main method to perform all functionalities based on configuration file
@@ -28,25 +28,25 @@ class PasswordConfigurator(object):
@param resetPasswd: boolean to determine if password needs to be reset.
@return cfg: dict to be used by cloud-init set_passwd code.
"""
- LOG.info('Starting password configuration')
+ LOG.info("Starting password configuration")
if passwd:
passwd = util.b64d(passwd)
allRootUsers = []
- for line in open('/etc/passwd', 'r'):
- if line.split(':')[2] == '0':
- allRootUsers.append(line.split(':')[0])
+ for line in open("/etc/passwd", "r"):
+ if line.split(":")[2] == "0":
+ allRootUsers.append(line.split(":")[0])
# read shadow file and check for each user, if its uid0 or root.
uidUsersList = []
- for line in open('/etc/shadow', 'r'):
- user = line.split(':')[0]
+ for line in open("/etc/shadow", "r"):
+ user = line.split(":")[0]
if user in allRootUsers:
uidUsersList.append(user)
if passwd:
- LOG.info('Setting admin password')
- distro.set_passwd('root', passwd)
+ LOG.info("Setting admin password")
+ distro.set_passwd("root", passwd)
if resetPasswd:
self.reset_password(uidUsersList)
- LOG.info('Configure Password completed!')
+ LOG.info("Configure Password completed!")
def reset_password(self, uidUserList):
"""
@@ -54,15 +54,19 @@ class PasswordConfigurator(object):
not succeeded using passwd command. Log failure message otherwise.
@param: list of users for which to expire password.
"""
- LOG.info('Expiring password.')
+ LOG.info("Expiring password.")
for user in uidUserList:
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except subp.ProcessExecutionError as e:
- if os.path.exists('/usr/bin/chage'):
- subp.subp(['chage', '-d', '0', user])
+ if os.path.exists("/usr/bin/chage"):
+ subp.subp(["chage", "-d", "0", user])
else:
- LOG.warning('Failed to expire password for %s with error: '
- '%s', user, e)
+ LOG.warning(
+ "Failed to expire password for %s with error: %s",
+ user,
+ e,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 7ec06a9c..e99f9b43 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -9,4 +9,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index 96d839b8..eda84cfb 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -13,4 +13,5 @@ class GuestCustErrorEnum(object):
GUESTCUST_ERROR_SCRIPT_DISABLED = 6
GUESTCUST_ERROR_WRONG_META_FORMAT = 9
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
index e84c1cb0..33169a7e 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -14,4 +14,5 @@ class GuestCustEventEnum(object):
GUESTCUST_EVENT_ENABLE_NICS = 103
GUESTCUST_EVENT_QUERY_NICS = 104
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
index a8211dea..c74fbc8b 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -12,4 +12,5 @@ class GuestCustStateEnum(object):
GUESTCUST_STATE_RUNNING = 4
GUESTCUST_STATE_DONE = 5
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index d919f693..08763e62 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -73,7 +73,7 @@ def get_nics_to_enable(nicsfilepath):
if not os.path.exists(nicsfilepath):
return None
- with open(nicsfilepath, 'r') as fp:
+ with open(nicsfilepath, "r") as fp:
nics = fp.read(NICS_SIZE)
return nics
@@ -95,7 +95,8 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
+ nics,
+ )
if not out:
time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
continue
@@ -108,32 +109,36 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
+ nics,
+ )
if out and out == NICS_STATUS_CONNECTED:
logger.info("NICS are connected on %d second", count)
return
time.sleep(enableNicsWaitSeconds)
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
+ logger.warning(
+ "Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries,
+ )
def get_tools_config(section, key, defaultVal):
- """ Return the value of [section] key from VMTools configuration.
+ """Return the value of [section] key from VMTools configuration.
- @param section: String of section to read from VMTools config
- @returns: String value from key in [section] or defaultVal if
- [section] is not present or vmware-toolbox-cmd is
- not installed.
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
"""
- if not subp.which('vmware-toolbox-cmd'):
+ if not subp.which("vmware-toolbox-cmd"):
logger.debug(
- 'vmware-toolbox-cmd not installed, returning default value')
+ "vmware-toolbox-cmd not installed, returning default value"
+ )
return defaultVal
- cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+ cmd = ["vmware-toolbox-cmd", "config", "get", section, key]
try:
(outText, _) = subp.subp(cmd)
@@ -141,22 +146,27 @@ def get_tools_config(section, key, defaultVal):
if e.exit_code == 69:
logger.debug(
"vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
- " Return default value: %s", " ".join(cmd), defaultVal)
+ " Return default value: %s",
+ " ".join(cmd),
+ defaultVal,
+ )
else:
logger.error("Failed running %s[%s]", cmd, e.exit_code)
logger.exception(e)
return defaultVal
retValue = defaultVal
- m = re.match(r'([^=]+)=(.*)', outText)
+ m = re.match(r"([^=]+)=(.*)", outText)
if m:
retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
+ logger.debug("Get tools config: [%s] %s = %s", section, key, retValue)
else:
logger.debug(
"Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
+ section,
+ key,
+ retValue,
+ )
return retValue
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index d793bdeb..673204a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -18,18 +18,19 @@ class Ipv4ModeEnum(object):
# The legacy mode which only allows dhcp/static based on whether IPv4
# addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+ IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
# IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
+ IPV4_MODE_STATIC = "STATIC"
# IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
+ IPV4_MODE_DHCP = "DHCP"
# IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
+ IPV4_MODE_DISABLED = "DISABLED"
# IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
+ IPV4_MODE_AS_IS = "AS_IS"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index ef8f87f7..7b742d0f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -20,7 +20,7 @@ class Nic(NicBase):
self._configFile = configFile
def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
+ return self._configFile.get(self.name + "|" + what, None)
def _get_count_with_prefix(self, prefix):
return self._configFile.get_count_with_prefix(self.name + prefix)
@@ -31,29 +31,29 @@ class Nic(NicBase):
@property
def mac(self):
- return self._get('MACADDR').lower()
+ return self._get("MACADDR").lower()
@property
def primary(self):
- value = self._get('PRIMARY')
+ value = self._get("PRIMARY")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def onboot(self):
- value = self._get('ONBOOT')
+ value = self._get("ONBOOT")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def bootProto(self):
- value = self._get('BOOTPROTO')
+ value = self._get("BOOTPROTO")
if value:
return value.lower()
else:
@@ -61,7 +61,7 @@ class Nic(NicBase):
@property
def ipv4_mode(self):
- value = self._get('IPv4_MODE')
+ value = self._get("IPv4_MODE")
if value:
return value.lower()
else:
@@ -80,7 +80,7 @@ class Nic(NicBase):
@property
def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
+ cnt = self._get_count_with_prefix("|IPv6ADDR|")
if not cnt:
return None
@@ -100,17 +100,17 @@ class StaticIpv4Addr(StaticIpv4Base):
@property
def ip(self):
- return self._nic._get('IPADDR')
+ return self._nic._get("IPADDR")
@property
def netmask(self):
- return self._nic._get('NETMASK')
+ return self._nic._get("NETMASK")
@property
def gateways(self):
- value = self._nic._get('GATEWAY')
+ value = self._nic._get("GATEWAY")
if value:
- return [x.strip() for x in value.split(',')]
+ return [x.strip() for x in value.split(",")]
else:
return None
@@ -124,14 +124,15 @@ class StaticIpv6Addr(StaticIpv6Base):
@property
def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
+ return self._nic._get("IPv6ADDR|" + str(self._index))
@property
def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
+ return self._nic._get("IPv6NETMASK|" + str(self._index))
@property
def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
+ return self._nic._get("IPv6GATEWAY|" + str(self._index))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
index de7b866d..37d9602f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -18,7 +18,7 @@ class NicBase(object):
Retrieves the mac address of the nic
@return (str) : the MACADDR setting
"""
- raise NotImplementedError('MACADDR')
+ raise NotImplementedError("MACADDR")
@property
def primary(self):
@@ -29,7 +29,7 @@ class NicBase(object):
be set.
@return (bool): the PRIMARY setting
"""
- raise NotImplementedError('PRIMARY')
+ raise NotImplementedError("PRIMARY")
@property
def onboot(self):
@@ -37,7 +37,7 @@ class NicBase(object):
Retrieves whether the nic should be up at the boot time
@return (bool) : the ONBOOT setting
"""
- raise NotImplementedError('ONBOOT')
+ raise NotImplementedError("ONBOOT")
@property
def bootProto(self):
@@ -45,7 +45,7 @@ class NicBase(object):
Retrieves the boot protocol of the nic
@return (str): the BOOTPROTO setting, valid values: dhcp and static.
"""
- raise NotImplementedError('BOOTPROTO')
+ raise NotImplementedError("BOOTPROTO")
@property
def ipv4_mode(self):
@@ -54,7 +54,7 @@ class NicBase(object):
@return (str): the IPv4_MODE setting, valid values:
backwards_compatible, static, dhcp, disabled, as_is
"""
- raise NotImplementedError('IPv4_MODE')
+ raise NotImplementedError("IPv4_MODE")
@property
def staticIpv4(self):
@@ -62,7 +62,7 @@ class NicBase(object):
Retrieves the static IPv4 configuration of the nic
@return (StaticIpv4Base list): the static ipv4 setting
"""
- raise NotImplementedError('Static IPv4')
+ raise NotImplementedError("Static IPv4")
@property
def staticIpv6(self):
@@ -70,7 +70,7 @@ class NicBase(object):
Retrieves the IPv6 configuration of the nic
@return (StaticIpv6Base list): the static ipv6 setting
"""
- raise NotImplementedError('Static Ipv6')
+ raise NotImplementedError("Static Ipv6")
def validate(self):
"""
@@ -78,7 +78,7 @@ class NicBase(object):
For example, the staticIpv4 property is required and should not be
empty when ipv4Mode is STATIC
"""
- raise NotImplementedError('Check constraints on properties')
+ raise NotImplementedError("Check constraints on properties")
class StaticIpv4Base(object):
@@ -93,7 +93,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 address
@return (str): the IPADDR setting
"""
- raise NotImplementedError('Ipv4 Address')
+ raise NotImplementedError("Ipv4 Address")
@property
def netmask(self):
@@ -101,7 +101,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 NETMASK setting
@return (str): the NETMASK setting
"""
- raise NotImplementedError('Ipv4 NETMASK')
+ raise NotImplementedError("Ipv4 NETMASK")
@property
def gateways(self):
@@ -109,7 +109,7 @@ class StaticIpv4Base(object):
Retrieves the gateways on this Ipv4 subnet
@return (str list): the GATEWAY setting
"""
- raise NotImplementedError('Ipv4 GATEWAY')
+ raise NotImplementedError("Ipv4 GATEWAY")
class StaticIpv6Base(object):
@@ -123,7 +123,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 address
@return (str): the IPv6ADDR setting
"""
- raise NotImplementedError('Ipv6 Address')
+ raise NotImplementedError("Ipv6 Address")
@property
def netmask(self):
@@ -131,7 +131,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 NETMASK setting
@return (str): the IPv6NETMASK setting
"""
- raise NotImplementedError('Ipv6 NETMASK')
+ raise NotImplementedError("Ipv6 NETMASK")
@property
def gateway(self):
@@ -139,6 +139,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 GATEWAY setting
@return (str): the IPv6GATEWAY setting
"""
- raise NotImplementedError('Ipv6 GATEWAY')
+ raise NotImplementedError("Ipv6 GATEWAY")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index ad347bea..eb504eba 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -3,16 +3,12 @@
# This file is part of cloud-init. See LICENSE file for license information.
import json
+from functools import lru_cache
-from cloudinit import log as log
-from cloudinit import url_helper
from cloudinit import dmi
-from cloudinit import util
-from cloudinit import net
-from cloudinit import netinfo
-from cloudinit import subp
+from cloudinit import log as log
+from cloudinit import net, netinfo, subp, url_helper, util
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from functools import lru_cache
# Get LOG
LOG = log.getLogger(__name__)
@@ -41,21 +37,21 @@ def set_route():
routes = netinfo.route_info()
# If no tools exist and empty dict is returned
- if 'ipv4' not in routes:
+ if "ipv4" not in routes:
return
# We only care about IPv4
- routes = routes['ipv4']
+ routes = routes["ipv4"]
# Searchable list
dests = []
# Parse each route into a more searchable format
for route in routes:
- dests.append(route['destination'])
+ dests.append(route["destination"])
- gw_present = '100.64.0.0' in dests or '100.64.0.0/10' in dests
- dest_present = '169.254.169.254' in dests
+ gw_present = "100.64.0.0" in dests or "100.64.0.0/10" in dests
+ dest_present = "169.254.169.254" in dests
# If not IPv6 only (No link local)
# or the route is already present
@@ -63,36 +59,32 @@ def set_route():
return
# Set metadata route
- if subp.which('ip'):
- subp.subp([
- 'ip',
- 'route',
- 'add',
- '169.254.169.254/32',
- 'dev',
- net.find_fallback_nic()
- ])
- elif subp.which('route'):
- subp.subp([
- 'route',
- 'add',
- '-net',
- '169.254.169.254/32',
- '100.64.0.1'
- ])
+ if subp.which("ip"):
+ subp.subp(
+ [
+ "ip",
+ "route",
+ "add",
+ "169.254.169.254/32",
+ "dev",
+ net.find_fallback_nic(),
+ ]
+ )
+ elif subp.which("route"):
+ subp.subp(["route", "add", "-net", "169.254.169.254/32", "100.64.0.1"])
# Read the system information from SMBIOS
def get_sysinfo():
return {
- 'manufacturer': dmi.read_dmi_data("system-manufacturer"),
- 'subid': dmi.read_dmi_data("system-serial-number")
+ "manufacturer": dmi.read_dmi_data("system-manufacturer"),
+ "subid": dmi.read_dmi_data("system-serial-number"),
}
# Assumes is Vultr is already checked
def is_baremetal():
- if get_sysinfo()['manufacturer'] != "Vultr":
+ if get_sysinfo()["manufacturer"] != "Vultr":
return True
return False
@@ -102,7 +94,7 @@ def is_vultr():
# VC2, VDC, and HFC use DMI
sysinfo = get_sysinfo()
- if sysinfo['manufacturer'] == "Vultr":
+ if sysinfo["manufacturer"] == "Vultr":
return True
# Baremetal requires a kernel parameter
@@ -118,20 +110,20 @@ def read_metadata(url, timeout, retries, sec_between, agent):
# Announce os details so we can handle non Vultr origin
# images and provide correct vendordata generation.
- headers = {
- 'Metadata-Token': 'cloudinit',
- 'User-Agent': agent
- }
+ headers = {"Metadata-Token": "cloudinit", "User-Agent": agent}
- response = url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- sec_between=sec_between)
+ response = url_helper.readurl(
+ url,
+ timeout=timeout,
+ retries=retries,
+ headers=headers,
+ sec_between=sec_between,
+ )
if not response.ok():
- raise RuntimeError("Failed to connect to %s: Code: %s" %
- url, response.code)
+ raise RuntimeError(
+ "Failed to connect to %s: Code: %s" % url, response.code
+ )
return response.contents.decode()
@@ -156,95 +148,82 @@ def get_interface_name(mac):
def generate_network_config(interfaces):
network = {
"version": 1,
- "config": [
- {
- "type": "nameserver",
- "address": [
- "108.61.10.10"
- ]
- }
- ]
+ "config": [{"type": "nameserver", "address": ["108.61.10.10"]}],
}
# Prepare interface 0, public
if len(interfaces) > 0:
public = generate_public_network_interface(interfaces[0])
- network['config'].append(public)
+ network["config"].append(public)
# Prepare additional interfaces, private
for i in range(1, len(interfaces)):
private = generate_private_network_interface(interfaces[i])
- network['config'].append(private)
+ network["config"].append(private)
return network
# Input Metadata and generate public network config part
def generate_public_network_interface(interface):
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
netcfg = {
"name": interface_name,
"type": "physical",
- "mac_address": interface['mac'],
+ "mac_address": interface["mac"],
"accept-ra": 1,
"subnets": [
- {
- "type": "dhcp",
- "control": "auto"
- },
- {
- "type": "ipv6_slaac",
- "control": "auto"
- },
- ]
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ],
}
# Options that may or may not be used
if "mtu" in interface:
- netcfg['mtu'] = interface['mtu']
+ netcfg["mtu"] = interface["mtu"]
if "accept-ra" in interface:
- netcfg['accept-ra'] = interface['accept-ra']
+ netcfg["accept-ra"] = interface["accept-ra"]
if "routes" in interface:
- netcfg['subnets'][0]['routes'] = interface['routes']
+ netcfg["subnets"][0]["routes"] = interface["routes"]
# Check for additional IP's
- additional_count = len(interface['ipv4']['additional'])
+ additional_count = len(interface["ipv4"]["additional"])
if "ipv4" in interface and additional_count > 0:
- for additional in interface['ipv4']['additional']:
+ for additional in interface["ipv4"]["additional"]:
add = {
"type": "static",
"control": "auto",
- "address": additional['address'],
- "netmask": additional['netmask']
+ "address": additional["address"],
+ "netmask": additional["netmask"],
}
if "routes" in additional:
- add['routes'] = additional['routes']
+ add["routes"] = additional["routes"]
- netcfg['subnets'].append(add)
+ netcfg["subnets"].append(add)
# Check for additional IPv6's
- additional_count = len(interface['ipv6']['additional'])
+ additional_count = len(interface["ipv6"]["additional"])
if "ipv6" in interface and additional_count > 0:
- for additional in interface['ipv6']['additional']:
+ for additional in interface["ipv6"]["additional"]:
add = {
"type": "static6",
"control": "auto",
- "address": additional['address'],
- "netmask": additional['netmask']
+ "address": additional["address"],
+ "netmask": additional["netmask"],
}
if "routes" in additional:
- add['routes'] = additional['routes']
+ add["routes"] = additional["routes"]
- netcfg['subnets'].append(add)
+ netcfg["subnets"].append(add)
# Add config to template
return netcfg
@@ -252,35 +231,35 @@ def generate_public_network_interface(interface):
# Input Metadata and generate private network config part
def generate_private_network_interface(interface):
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
netcfg = {
"name": interface_name,
"type": "physical",
- "mac_address": interface['mac'],
+ "mac_address": interface["mac"],
"subnets": [
{
"type": "static",
"control": "auto",
- "address": interface['ipv4']['address'],
- "netmask": interface['ipv4']['netmask']
+ "address": interface["ipv4"]["address"],
+ "netmask": interface["ipv4"]["netmask"],
}
- ]
+ ],
}
# Options that may or may not be used
if "mtu" in interface:
- netcfg['mtu'] = interface['mtu']
+ netcfg["mtu"] = interface["mtu"]
if "accept-ra" in interface:
- netcfg['accept-ra'] = interface['accept-ra']
+ netcfg["accept-ra"] = interface["accept-ra"]
if "routes" in interface:
- netcfg['subnets'][0]['routes'] = interface['routes']
+ netcfg["subnets"][0]["routes"] = interface["routes"]
return netcfg
@@ -288,12 +267,13 @@ def generate_private_network_interface(interface):
# Make required adjustments to the network configs provided
def add_interface_names(interfaces):
for interface in interfaces:
- interface_name = get_interface_name(interface['mac'])
+ interface_name = get_interface_name(interface["mac"])
if not interface_name:
raise RuntimeError(
- "Interface: %s could not be found on the system" %
- interface['mac'])
- interface['name'] = interface_name
+ "Interface: %s could not be found on the system"
+ % interface["mac"]
+ )
+ interface["name"] = interface_name
return interfaces