# Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. # Copyright (C) 2012 Yahoo! Inc. # # Author: Scott Moser # Author: Juerg Haefliger # Author: Joshua Harlow # # This file is part of cloud-init. See LICENSE file for license information. import contextlib import copy as obj_copy import email import functools import glob import grp import gzip import hashlib import io import json import os import os.path import platform import pwd import random import re import shlex import shutil import socket import stat import string import subprocess import sys import time from base64 import b64decode, b64encode from collections import deque, namedtuple from contextlib import suppress from errno import EACCES, ENOENT from functools import lru_cache, total_ordering from pathlib import Path from typing import Callable, Deque, Dict, List, Optional, TypeVar from urllib import parse from cloudinit import features, importer from cloudinit import log as logging from cloudinit import ( mergers, net, safeyaml, subp, temp_utils, type_utils, url_helper, version, ) from cloudinit.settings import CFG_BUILTIN _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) # Helps cleanup filenames to ensure they aren't FS incompatible FN_REPLACEMENTS = { os.sep: "_", } FN_ALLOWED = "_-.()" + string.digits + string.ascii_letters TRUE_STRINGS = ("true", "1", "on", "yes") FALSE_STRINGS = ("off", "0", "no", "false") def kernel_version(): return tuple(map(int, os.uname().release.split(".")[:2])) @lru_cache() def get_dpkg_architecture(target=None): """Return the sanitized string output by `dpkg --print-architecture`. N.B. This function is wrapped in functools.lru_cache, so repeated calls won't shell out every time. """ out = subp.subp( ["dpkg", "--print-architecture"], capture=True, target=target ) return out.stdout.strip() @lru_cache() def lsb_release(target=None): fmap = { "Codename": "codename", "Description": "description", "Distributor ID": "id", "Release": "release", } data = {} try: out = subp.subp(["lsb_release", "--all"], capture=True, target=target) for line in out.stdout.splitlines(): fname, _, val = line.partition(":") if fname in fmap: data[fmap[fname]] = val.strip() missing = [k for k in fmap.values() if k not in data] if len(missing): LOG.warning( "Missing fields in lsb_release --all output: %s", ",".join(missing), ) except subp.ProcessExecutionError as err: LOG.warning("Unable to get lsb_release --all: %s", err) data = dict((v, "UNAVAILABLE") for v in fmap.values()) return data def decode_binary(blob, encoding="utf-8"): # Converts a binary type into a text type using given encoding. if isinstance(blob, str): return blob return blob.decode(encoding) def encode_text(text, encoding="utf-8"): # Converts a text string into a binary type using given encoding. if isinstance(text, bytes): return text return text.encode(encoding) def b64d(source): # Base64 decode some data, accepting bytes or unicode/str, and returning # str/unicode if the result is utf-8 compatible, otherwise returning bytes. decoded = b64decode(source) try: return decoded.decode("utf-8") except UnicodeDecodeError: return decoded def b64e(source): # Base64 encode some data, accepting bytes or unicode/str, and returning # str/unicode if the result is utf-8 compatible, otherwise returning bytes. if not isinstance(source, bytes): source = source.encode("utf-8") return b64encode(source).decode("utf-8") def fully_decoded_payload(part): # In Python 3, decoding the payload will ironically hand us a bytes object. # 'decode' means to decode according to Content-Transfer-Encoding, not # according to any charset in the Content-Type. So, if we end up with # bytes, first try to decode to str via CT charset, and failing that, try # utf-8 using surrogate escapes. cte_payload = part.get_payload(decode=True) if part.get_content_maintype() == "text" and isinstance( cte_payload, bytes ): charset = part.get_charset() if charset and charset.input_codec: encoding = charset.input_codec else: encoding = "utf-8" return cte_payload.decode(encoding, "surrogateescape") return cte_payload class SeLinuxGuard: def __init__(self, path, recursive=False): # Late import since it might not always # be possible to use this try: self.selinux = importer.import_module("selinux") except ImportError: self.selinux = None self.path = path self.recursive = recursive def __enter__(self): if self.selinux and self.selinux.is_selinux_enabled(): return True else: return False def __exit__(self, excp_type, excp_value, excp_traceback): if not self.selinux or not self.selinux.is_selinux_enabled(): return if not os.path.lexists(self.path): return path = os.path.realpath(self.path) try: stats = os.lstat(path) self.selinux.matchpathcon(path, stats[stat.ST_MODE]) except OSError: return LOG.debug( "Restoring selinux mode for %s (recursive=%s)", path, self.recursive, ) try: self.selinux.restorecon(path, recursive=self.recursive) except OSError as e: LOG.warning( "restorecon failed on %s,%s maybe badness? %s", path, self.recursive, e, ) class MountFailedError(Exception): pass class DecompressionError(Exception): pass def fork_cb(child_cb, *args, **kwargs): fid = os.fork() if fid == 0: try: child_cb(*args, **kwargs) os._exit(0) except Exception: logexc( LOG, "Failed forking and calling callback %s", type_utils.obj_name(child_cb), ) os._exit(1) else: LOG.debug( "Forked child %s who will run callback %s", fid, type_utils.obj_name(child_cb), ) def is_true(val, addons=None): if isinstance(val, (bool)): return val is True check_set = TRUE_STRINGS if addons: check_set = list(check_set) + addons if str(val).lower().strip() in check_set: return True return False def is_false(val, addons=None): if isinstance(val, (bool)): return val is False check_set = FALSE_STRINGS if addons: check_set = list(check_set) + addons if str(val).lower().strip() in check_set: return True return False def translate_bool(val, addons=None): if not val: # This handles empty lists and false and # other things that python believes are false return False # If its already a boolean skip if isinstance(val, (bool)): return val return is_true(val, addons) def rand_str(strlen=32, select_from=None): r = random.SystemRandom() if not select_from: select_from = string.ascii_letters + string.digits return "".join([r.choice(select_from) for _x in range(0, strlen)]) def rand_dict_key(dictionary, postfix=None): if not postfix: postfix = "" while True: newkey = rand_str(strlen=8) + "_" + postfix if newkey not in dictionary: break return newkey def read_conf(fname, *, instance_data_file=None) -> Dict: """Read a yaml config with optional template, and convert to dict""" # Avoid circular import from cloudinit.handlers.jinja_template import ( JinjaLoadError, NotJinjaError, render_jinja_payload_from_file, ) try: config_file = load_file(fname) except IOError as e: if e.errno == ENOENT: return {} else: raise if instance_data_file and os.path.exists(instance_data_file): try: config_file = render_jinja_payload_from_file( config_file, fname, instance_data_file, ) LOG.debug( "Applied instance data in '%s' to " "configuration loaded from '%s'", instance_data_file, fname, ) except NotJinjaError: # A log isn't appropriate here as we generally expect most # cloud.cfgs to not be templated. The other path is logged pass except JinjaLoadError as e: LOG.warning( "Could not apply Jinja template '%s' to '%s'. " "Exception: %s", instance_data_file, config_file, repr(e), ) if config_file is None: return {} return load_yaml(config_file, default={}) # pyright: ignore # Merges X lists, and then keeps the # unique ones, but orders by sort order # instead of by the original order def uniq_merge_sorted(*lists): return sorted(uniq_merge(*lists)) # Merges X lists and then iterates over those # and only keeps the unique items (order preserving) # and returns that merged and uniqued list as the # final result. # # Note: if any entry is a string it will be # split on commas and empty entries will be # evicted and merged in accordingly. def uniq_merge(*lists): combined_list = [] for a_list in lists: if isinstance(a_list, str): a_list = a_list.strip().split(",") # Kickout the empty ones a_list = [a for a in a_list if a] combined_list.extend(a_list) return uniq_list(combined_list) def clean_filename(fn): for (k, v) in FN_REPLACEMENTS.items(): fn = fn.replace(k, v) removals = [] for k in fn: if k not in FN_ALLOWED: removals.append(k) for k in removals: fn = fn.replace(k, "") fn = fn.strip() return fn def decomp_gzip(data, quiet=True, decode=True): try: buf = io.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: # E1101 is https://github.com/PyCQA/pylint/issues/1444 if decode: return decode_binary(gh.read()) # pylint: disable=E1101 else: return gh.read() # pylint: disable=E1101 except Exception as e: if quiet: return data else: raise DecompressionError(str(e)) from e def extract_usergroup(ug_pair): if not ug_pair: return (None, None) ug_parted = ug_pair.split(":", 1) u = ug_parted[0].strip() if len(ug_parted) == 2: g = ug_parted[1].strip() else: g = None if not u or u == "-1" or u.lower() == "none": u = None if not g or g == "-1" or g.lower() == "none": g = None return (u, g) def get_modules_from_dir(root_dir: str) -> dict: entries = dict() for fname in glob.glob(os.path.join(root_dir, "*.py")): if not os.path.isfile(fname): continue modname = os.path.basename(fname)[0:-3] modname = modname.strip() if modname and modname.find(".") == -1: entries[fname] = modname return entries def write_to_console(conpath, text): with open(conpath, "w") as wfh: wfh.write(text) wfh.flush() def multi_log( text, console=True, stderr=True, log=None, log_level=logging.DEBUG, fallback_to_stdout=True, ): if stderr: sys.stderr.write(text) if console: conpath = "/dev/console" writing_to_console_worked = False if os.path.exists(conpath): try: write_to_console(conpath, text) writing_to_console_worked = True except OSError: console_error = "Failed to write to /dev/console" sys.stdout.write(f"{console_error}\n") if log: log.log(logging.WARNING, console_error) if fallback_to_stdout and not writing_to_console_worked: # A container may lack /dev/console (arguably a container bug). # Additionally, /dev/console may not be writable to on a VM (again # likely a VM bug or virtualization bug). # # If either of these is the case, then write output to stdout. # This will result in duplicate stderr and stdout messages if # stderr was True. # # even though systemd might have set up output to go to # /dev/console, the user may have configured elsewhere via # cloud-config 'output'. If there is /dev/console, messages will # still get there. sys.stdout.write(text) if log: if text[-1] == "\n": log.log(log_level, text[:-1]) else: log.log(log_level, text) @lru_cache() def is_Linux(): return "Linux" in platform.system() @lru_cache() def is_BSD(): if "BSD" in platform.system(): return True if platform.system() == "DragonFly": return True return False @lru_cache() def is_FreeBSD(): return system_info()["variant"] == "freebsd" @lru_cache() def is_DragonFlyBSD(): return system_info()["variant"] == "dragonfly" @lru_cache() def is_NetBSD(): return system_info()["variant"] == "netbsd" @lru_cache() def is_OpenBSD(): return system_info()["variant"] == "openbsd" def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default return translate_bool(yobj[key]) def get_cfg_option_str(yobj, key, default=None): if key not in yobj: return default val = yobj[key] if not isinstance(val, str): val = str(val) return val def get_cfg_option_int(yobj, key, default=0): return int(get_cfg_option_str(yobj, key, default=default)) def _parse_redhat_release(release_file=None): """Return a dictionary of distro info fields from /etc/redhat-release. Dict keys will align with /etc/os-release keys: ID, VERSION_ID, VERSION_CODENAME """ if not release_file: release_file = "/etc/redhat-release" if not os.path.exists(release_file): return {} redhat_release = load_file(release_file) redhat_regex = ( r"(?P.+) release (?P[\d\.]+) " r"\((?P[^)]+)\)" ) # Virtuozzo deviates here if "Virtuozzo" in redhat_release: redhat_regex = r"(?P.+) release (?P[\d\.]+)" match = re.match(redhat_regex, redhat_release) if match: group = match.groupdict() # Virtuozzo has no codename in this file if "Virtuozzo" in group["name"]: group["codename"] = group["name"] group["name"] = group["name"].lower().partition(" linux")[0] if group["name"] == "red hat enterprise": group["name"] = "redhat" return { "ID": group["name"], "VERSION_ID": group["version"], "VERSION_CODENAME": group["codename"], } return {} @lru_cache() def get_linux_distro(): distro_name = "" distro_version = "" flavor = "" os_release = {} os_release_rhel = False if os.path.exists("/etc/os-release"): os_release = load_shell_content(load_file("/etc/os-release")) if not os_release: os_release_rhel = True os_release = _parse_redhat_release() if os_release: distro_name = os_release.get("ID", "") distro_version = os_release.get("VERSION_ID", "") if "sles" in distro_name or "suse" in distro_name: # RELEASE_BLOCKER: We will drop this sles divergent behavior in # the future so that get_linux_distro returns a named tuple # which will include both version codename and architecture # on all distributions. flavor = platform.machine() elif distro_name == "photon": flavor = os_release.get("PRETTY_NAME", "") elif distro_name == "virtuozzo" and not os_release_rhel: # Only use this if the redhat file is not parsed flavor = os_release.get("PRETTY_NAME", "") else: flavor = os_release.get("VERSION_CODENAME", "") if not flavor: match = re.match( r"[^ ]+ \((?P[^)]+)\)", os_release.get("VERSION", ""), ) if match: flavor = match.groupdict()["codename"] if distro_name == "rhel": distro_name = "redhat" elif is_BSD(): distro_name = platform.system().lower() distro_version = platform.release() else: dist = ("", "", "") try: # Was removed in 3.8 dist = platform.dist() # pylint: disable=W1505,E1101 except Exception: pass finally: found = None for entry in dist: if entry: found = 1 if not found: LOG.warning( "Unable to determine distribution, template " "expansion may have unexpected results" ) return dist return (distro_name, distro_version, flavor) def _get_variant(info): system = info["system"].lower() variant = "unknown" if system == "linux": linux_dist = info["dist"][0].lower() if linux_dist in ( "almalinux", "alpine", "arch", "centos", "cloudlinux", "debian", "eurolinux", "fedora", "mariner", "miraclelinux", "openeuler", "opencloudos", "openmandriva", "photon", "rhel", "rocky", "suse", "tencentos", "virtuozzo", ): variant = linux_dist elif linux_dist in ("ubuntu", "linuxmint", "mint"): variant = "ubuntu" elif linux_dist == "redhat": variant = "rhel" elif linux_dist in ( "opensuse", "opensuse-leap", "opensuse-microos", "opensuse-tumbleweed", "sle_hpc", "sle-micro", "sles", ): variant = "suse" else: variant = "linux" elif system in ( "windows", "darwin", "freebsd", "netbsd", "openbsd", "dragonfly", ): variant = system return variant @lru_cache() def system_info(): info = { "platform": platform.platform(), "system": platform.system(), "release": platform.release(), "python": platform.python_version(), "uname": list(platform.uname()), "dist": get_linux_distro(), } info["variant"] = _get_variant(info) return info def get_cfg_option_list(yobj, key, default=None): """ Gets the C{key} config option from C{yobj} as a list of strings. If the key is present as a single string it will be returned as a list with one string arg. @param yobj: The configuration object. @param key: The configuration key to get. @param default: The default to return if key is not found. @return: The configuration option as a list of strings or default if key is not found. """ if key not in yobj: return default if yobj[key] is None: return [] val = yobj[key] if isinstance(val, (list)): cval = [v for v in val] return cval if not isinstance(val, str): val = str(val) return [val] # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): """Return the value of the item at path C{keyp} in C{yobj}. example: get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4 get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None @param yobj: A dictionary. @param keyp: A path inside yobj. it can be a '/' delimited string, or an iterable. @param default: The default to return if the path does not exist. @return: The value of the item at keyp." is not found.""" if isinstance(keyp, str): keyp = keyp.split("/") cur = yobj for tok in keyp: if tok not in cur: return default cur = cur[tok] return cur def fixup_output(cfg, mode): (outfmt, errfmt) = get_output_cfg(cfg, mode) redirect_output(outfmt, errfmt) return (outfmt, errfmt) # redirect_output(outfmt, errfmt, orig_out, orig_err) # replace orig_out and orig_err with filehandles specified in outfmt or errfmt # fmt can be: # > FILEPATH # >> FILEPATH # | program [ arg1 [ arg2 [ ... ] ] ] # # with a '|', arguments are passed to shell, so one level of # shell escape is required. # # if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true # value then output input will not be closed (useful for debugging). # def redirect_output(outfmt, errfmt, o_out=None, o_err=None): if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")): LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT") return if not o_out: o_out = sys.stdout if not o_err: o_err = sys.stderr # pylint: disable=subprocess-popen-preexec-fn def set_subprocess_umask_and_gid(): """Reconfigure umask and group ID to create output files securely. This is passed to subprocess.Popen as preexec_fn, so it is executed in the context of the newly-created process. It: * sets the umask of the process so created files aren't world-readable * if an adm group exists in the system, sets that as the process' GID (so that the created file(s) are owned by root:adm) """ os.umask(0o037) try: group_id = grp.getgrnam("adm").gr_gid except KeyError: # No adm group, don't set a group pass else: os.setgid(group_id) if outfmt: LOG.debug("Redirecting %s to %s", o_out, outfmt) (mode, arg) = outfmt.split(" ", 1) if mode == ">" or mode == ">>": owith = "ab" if mode == ">": owith = "wb" new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen( arg, shell=True, stdin=subprocess.PIPE, preexec_fn=set_subprocess_umask_and_gid, ) new_fp = proc.stdin else: raise TypeError("Invalid type for output format: %s" % outfmt) if o_out: os.dup2(new_fp.fileno(), o_out.fileno()) if errfmt == outfmt: LOG.debug("Redirecting %s to %s", o_err, outfmt) os.dup2(new_fp.fileno(), o_err.fileno()) return if errfmt: LOG.debug("Redirecting %s to %s", o_err, errfmt) (mode, arg) = errfmt.split(" ", 1) if mode == ">" or mode == ">>": owith = "ab" if mode == ">": owith = "wb" new_fp = open(arg, owith) elif mode == "|": proc = subprocess.Popen( arg, shell=True, stdin=subprocess.PIPE, preexec_fn=set_subprocess_umask_and_gid, ) new_fp = proc.stdin else: raise TypeError("Invalid type for error format: %s" % errfmt) if o_err: os.dup2(new_fp.fileno(), o_err.fileno()) def mergemanydict(srcs, reverse=False) -> dict: if reverse: srcs = reversed(srcs) merged_cfg: dict = {} for cfg in srcs: if cfg: # Figure out which mergers to apply... mergers_to_apply = mergers.dict_extract_mergers(cfg) if not mergers_to_apply: mergers_to_apply = mergers.default_mergers() merger = mergers.construct(mergers_to_apply) merged_cfg = merger.merge(merged_cfg, cfg) return merged_cfg @contextlib.contextmanager def chdir(ndir): curr = os.getcwd() try: os.chdir(ndir) yield ndir finally: os.chdir(curr) @contextlib.contextmanager def umask(n_msk): old = os.umask(n_msk) try: yield old finally: os.umask(old) def center(text, fill, max_len): return "{0:{fill}{align}{size}}".format( text, fill=fill, align="^", size=max_len ) def del_dir(path): LOG.debug("Recursively deleting %s", path) shutil.rmtree(path) # read_optional_seed # returns boolean indicating success or failure (presense of files) # if files are present, populates 'fill' dictionary with 'user-data' and # 'meta-data' entries def read_optional_seed(fill, base="", ext="", timeout=5): try: (md, ud, vd) = read_seeded(base, ext, timeout) fill["user-data"] = ud fill["vendor-data"] = vd fill["meta-data"] = md return True except url_helper.UrlError as e: if e.code == url_helper.NOT_FOUND: return False raise def fetch_ssl_details(paths=None): ssl_details = {} # Lookup in these locations for ssl key/cert files if not paths: ssl_cert_paths = [ "/var/lib/cloud/data/ssl", "/var/lib/cloud/instance/data/ssl", ] else: ssl_cert_paths = [ os.path.join(paths.get_ipath_cur("data"), "ssl"), os.path.join(paths.get_cpath("data"), "ssl"), ] ssl_cert_paths = uniq_merge(ssl_cert_paths) ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)] cert_file = None for d in ssl_cert_paths: if os.path.isfile(os.path.join(d, "cert.pem")): cert_file = os.path.join(d, "cert.pem") break key_file = None for d in ssl_cert_paths: if os.path.isfile(os.path.join(d, "key.pem")): key_file = os.path.join(d, "key.pem") break if cert_file and key_file: ssl_details["cert_file"] = cert_file ssl_details["key_file"] = key_file elif cert_file: ssl_details["cert_file"] = cert_file return ssl_details def load_yaml(blob, default=None, allowed=(dict,)): loaded = default blob = decode_binary(blob) try: LOG.debug( "Attempting to load yaml from string " "of length %s with allowed root types %s", len(blob), allowed, ) converted = safeyaml.load(blob) if converted is None: LOG.debug("loaded blob returned None, returning default.") converted = default elif not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... raise TypeError( "Yaml load allows %s root types, but got %s instead" % (allowed, type_utils.obj_name(converted)) ) loaded = converted except (safeyaml.YAMLError, TypeError, ValueError) as e: msg = "Failed loading yaml blob" mark = None if hasattr(e, "context_mark") and getattr(e, "context_mark"): mark = getattr(e, "context_mark") elif hasattr(e, "problem_mark") and getattr(e, "problem_mark"): mark = getattr(e, "problem_mark") if mark: msg += ( '. Invalid format at line {line} column {col}: "{err}"'.format( line=mark.line + 1, col=mark.column + 1, err=e ) ) else: msg += ". {err}".format(err=e) LOG.warning(msg) return loaded def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): if base.find("%s") >= 0: ud_url = base.replace("%s", "user-data" + ext) vd_url = base.replace("%s", "vendor-data" + ext) md_url = base.replace("%s", "meta-data" + ext) else: if features.NOCLOUD_SEED_URL_APPEND_FORWARD_SLASH: if base[-1] != "/" and parse.urlparse(base).query == "": # Append fwd slash when no query string and no %s base += "/" ud_url = "%s%s%s" % (base, "user-data", ext) vd_url = "%s%s%s" % (base, "vendor-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) md_resp = url_helper.read_file_or_url( md_url, timeout=timeout, retries=retries ) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) ud_resp = url_helper.read_file_or_url( ud_url, timeout=timeout, retries=retries ) ud = None if ud_resp.ok(): ud = ud_resp.contents vd = None try: vd_resp = url_helper.read_file_or_url( vd_url, timeout=timeout, retries=retries ) except url_helper.UrlError as e: LOG.debug("Error in vendor-data response: %s", e) else: if vd_resp.ok(): vd = vd_resp.contents else: LOG.debug("Error in vendor-data response") return (md, ud, vd) def read_conf_d(confd, *, instance_data_file=None) -> dict: """Read configuration directory.""" # Get reverse sorted list (later trumps newer) confs = sorted(os.listdir(confd), reverse=True) # Remove anything not ending in '.cfg' confs = [f for f in confs if f.endswith(".cfg")] # Remove anything not a file confs = [f for f in confs if os.path.isfile(os.path.join(confd, f))] # Load them all so that they can be merged cfgs = [] for fn in confs: try: cfgs.append( read_conf( os.path.join(confd, fn), instance_data_file=instance_data_file, ) ) except OSError as e: if e.errno == EACCES: LOG.warning( "REDACTED config part %s/%s for non-root user", confd, fn ) return mergemanydict(cfgs) def read_conf_with_confd(cfgfile, *, instance_data_file=None) -> dict: """Read yaml file along with optional ".d" directory, return merged config Given a yaml file, load the file as a dictionary. Additionally, if there exists a same-named directory with .d extension, read all files from that directory in order and return the merged config. The template file is optional and will be applied to any applicable jinja file in the configs. For example, this function can read both /etc/cloud/cloud.cfg and all files in /etc/cloud/cloud.cfg.d and merge all configs into a single dict. """ cfgs: Deque[Dict] = deque() cfg: dict = {} try: cfg = read_conf(cfgfile, instance_data_file=instance_data_file) except OSError as e: if e.errno == EACCES: LOG.warning("REDACTED config part %s for non-root user", cfgfile) else: cfgs.append(cfg) confd = "" if "conf_d" in cfg: confd = cfg["conf_d"] if confd: if not isinstance(confd, str): raise TypeError( "Config file %s contains 'conf_d' with non-string type %s" % (cfgfile, type_utils.obj_name(confd)) ) else: confd = str(confd).strip() elif os.path.isdir(f"{cfgfile}.d"): confd = f"{cfgfile}.d" if confd and os.path.isdir(confd): # Conf.d settings override input configuration confd_cfg = read_conf_d(confd, instance_data_file=instance_data_file) cfgs.appendleft(confd_cfg) return mergemanydict(cfgs) def read_conf_from_cmdline(cmdline=None): # return a dictionary of config on the cmdline or None return load_yaml(read_cc_from_cmdline(cmdline=cmdline)) def read_cc_from_cmdline(cmdline=None): # this should support reading cloud-config information from # the kernel command line. It is intended to support content of the # format: # cc: [end_cc] # this would include: # cc: ssh_import_id: [smoser, kirkland]\\n # cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc # cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc # cc:ssh_import_id: %5Bsmoser%5D end_cc if cmdline is None: cmdline = get_cmdline() tag_begin = "cc:" tag_end = "end_cc" begin_l = len(tag_begin) end_l = len(tag_end) clen = len(cmdline) tokens = [] begin = cmdline.find(tag_begin) while begin >= 0: end = cmdline.find(tag_end, begin + begin_l) if end < 0: end = clen tokens.append( parse.unquote(cmdline[begin + begin_l : end].lstrip()).replace( "\\n", "\n" ) ) begin = cmdline.find(tag_begin, end + end_l) return "\n".join(tokens) def dos2unix(contents): # find first end of line pos = contents.find("\n") if pos <= 0 or contents[pos - 1] != "\r": return contents return contents.replace("\r\n", "\n") HostnameFqdnInfo = namedtuple( "HostnameFqdnInfo", ["hostname", "fqdn", "is_default"], ) def get_hostname_fqdn(cfg, cloud, metadata_only=False): """Get hostname and fqdn from config if present and fallback to cloud. @param cfg: Dictionary of merged user-data configuration (from init.cfg). @param cloud: Cloud instance from init.cloudify(). @param metadata_only: Boolean, set True to only query cloud meta-data, returning None if not present in meta-data. @return: a namedtuple of , , (str, str, bool). Values can be none when metadata_only is True and no cfg or metadata provides hostname info. is_default is a bool and it's true only if hostname is localhost and was returned by util.get_hostname() as a default. This is used to differentiate with a user-defined localhost hostname. """ is_default = False if "fqdn" in cfg: # user specified a fqdn. Default hostname then is based off that fqdn = cfg["fqdn"] hostname = get_cfg_option_str(cfg, "hostname", fqdn.split(".")[0]) else: if "hostname" in cfg and cfg["hostname"].find(".") > 0: # user specified hostname, and it had '.' in it # be nice to them. set fqdn and hostname from that fqdn = cfg["hostname"] hostname = cfg["hostname"][: fqdn.find(".")] else: # no fqdn set, get fqdn from cloud. # get hostname from cfg if available otherwise cloud fqdn = cloud.get_hostname( fqdn=True, metadata_only=metadata_only ).hostname if "hostname" in cfg: hostname = cfg["hostname"] else: hostname, is_default = cloud.get_hostname( metadata_only=metadata_only ) return HostnameFqdnInfo(hostname, fqdn, is_default) def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): """ For each host a single line should be present with the following information: IP_address canonical_hostname [aliases...] Fields of the entry are separated by any number of blanks and/or tab characters. Text from a "#" character until the end of the line is a comment, and is ignored. Host names may contain only alphanumeric characters, minus signs ("-"), and periods ("."). They must begin with an alphabetic character and end with an alphanumeric character. Optional aliases provide for name changes, alternate spellings, shorter hostnames, or generic hostnames (for example, localhost). """ fqdn = None try: for line in load_file(filename).splitlines(): hashpos = line.find("#") if hashpos >= 0: line = line[0:hashpos] line = line.strip() if not line: continue # If there there is less than 3 entries # (IP_address, canonical_hostname, alias) # then ignore this line toks = line.split() if len(toks) < 3: continue if hostname in toks[2:]: fqdn = toks[1] break except IOError: pass return fqdn def is_resolvable(url) -> bool: """determine if a url's network address is resolvable, return a boolean This also attempts to be resilent against dns redirection. Note, that normal nsswitch resolution is used here. So in order to avoid any utilization of 'search' entries in /etc/resolv.conf we have to append '.'. The top level 'invalid' domain is invalid per RFC. And example.com should also not exist. The '__cloud_init_expected_not_found__' entry will be resolved inside the search list. """ global _DNS_REDIRECT_IP parsed_url = parse.urlparse(url) name = parsed_url.hostname if _DNS_REDIRECT_IP is None: badips = set() badnames = ( "does-not-exist.example.com.", "example.invalid.", "__cloud_init_expected_not_found__", ) badresults: dict = {} for iname in badnames: try: result = socket.getaddrinfo( iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME ) badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) except (socket.gaierror, socket.error): pass _DNS_REDIRECT_IP = badips if badresults: LOG.debug("detected dns redirection: %s", badresults) try: # ip addresses need no resolution with suppress(ValueError): if net.is_ip_address(parsed_url.netloc.strip("[]")): return True result = socket.getaddrinfo(name, None) # check first result's sockaddr field addr = result[0][4][0] return addr not in _DNS_REDIRECT_IP except (socket.gaierror, socket.error): return False def get_hostname(): hostname = socket.gethostname() return hostname def gethostbyaddr(ip): try: return socket.gethostbyaddr(ip)[0] except socket.herror: return None def is_resolvable_url(url): """determine if this url is resolvable (existing or ip).""" return log_time( logfunc=LOG.debug, msg="Resolving URL: " + url, func=is_resolvable, args=(url,), ) def search_for_mirror(candidates): """ Search through a list of mirror urls for one that works This needs to return quickly. """ if candidates is None: return None LOG.debug("search for mirror in candidates: '%s'", candidates) for cand in candidates: try: if is_resolvable_url(cand): LOG.debug("found working mirror: '%s'", cand) return cand except Exception: pass return None def close_stdin(): """ reopen stdin as /dev/null so even subprocesses or other os level things get /dev/null as input. if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true value then input will not be closed (useful for debugging). """ if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")): return with open(os.devnull) as fp: os.dup2(fp.fileno(), sys.stdin.fileno()) def find_devs_with_freebsd( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): devlist = [] if not criteria: return glob.glob("/dev/msdosfs/*") + glob.glob("/dev/iso9660/*") if criteria.startswith("LABEL="): label = criteria.lstrip("LABEL=") devlist = [ p for p in ["/dev/msdosfs/" + label, "/dev/iso9660/" + label] if os.path.exists(p) ] elif criteria == "TYPE=vfat": devlist = glob.glob("/dev/msdosfs/*") elif criteria == "TYPE=iso9660": devlist = glob.glob("/dev/iso9660/*") return devlist def find_devs_with_netbsd( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): devlist = [] label = None _type = None if criteria: if criteria.startswith("LABEL="): label = criteria.lstrip("LABEL=") if criteria.startswith("TYPE="): _type = criteria.lstrip("TYPE=") out = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0]) for dev in out.stdout.split(): if label or _type: mscdlabel_out, _ = subp.subp(["mscdlabel", dev], rcs=[0, 1]) if label and not ('label "%s"' % label) in mscdlabel_out: continue if _type == "iso9660" and "ISO filesystem" not in mscdlabel_out: continue if _type == "vfat" and "ISO filesystem" in mscdlabel_out: continue devlist.append("/dev/" + dev) return devlist def find_devs_with_openbsd( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): out = subp.subp(["sysctl", "-n", "hw.disknames"], rcs=[0]) devlist = [] for entry in out.stdout.rstrip().split(","): if not entry.endswith(":"): # ffs partition with a serial, not a config-drive continue if entry == "fd0:": continue devlist.append(entry[:-1] + "a") if not entry.startswith("cd"): devlist.append(entry[:-1] + "i") return ["/dev/" + i for i in devlist] def find_devs_with_dragonflybsd( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): out = subp.subp(["sysctl", "-n", "kern.disks"], rcs=[0]) devlist = [ i for i in sorted(out.stdout.split(), reverse=True) if not i.startswith("md") and not i.startswith("vn") ] if criteria == "TYPE=iso9660": devlist = [ i for i in devlist if i.startswith("cd") or i.startswith("acd") ] elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: devlist = [ i for i in devlist if not (i.startswith("cd") or i.startswith("acd")) ] elif criteria: LOG.debug("Unexpected criteria: %s", criteria) return ["/dev/" + i for i in devlist] def find_devs_with( criteria=None, oformat="device", tag=None, no_cache=False, path=None ): """ find devices matching given criteria (via blkid) criteria can be *one* of: TYPE= LABEL=