summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/__init__.py58
-rw-r--r--cloudinit/config/cc_apt_configure.py319
-rw-r--r--cloudinit/config/cc_apt_pipelining.py57
-rw-r--r--cloudinit/config/cc_bootcmd.py54
-rw-r--r--cloudinit/config/cc_byobu.py80
-rw-r--r--cloudinit/config/cc_ca_certs.py104
-rw-r--r--cloudinit/config/cc_chef.py342
-rw-r--r--cloudinit/config/cc_debug.py109
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py36
-rw-r--r--cloudinit/config/cc_disk_setup.py863
-rw-r--r--cloudinit/config/cc_emit_upstart.py69
-rw-r--r--cloudinit/config/cc_fan.py101
-rw-r--r--cloudinit/config/cc_final_message.py73
-rw-r--r--cloudinit/config/cc_foo.py52
-rw-r--r--cloudinit/config/cc_growpart.py300
-rw-r--r--cloudinit/config/cc_grub_dpkg.py73
-rw-r--r--cloudinit/config/cc_keys_to_console.py62
-rw-r--r--cloudinit/config/cc_landscape.py99
-rw-r--r--cloudinit/config/cc_locale.py37
-rw-r--r--cloudinit/config/cc_lxd.py177
-rw-r--r--cloudinit/config/cc_mcollective.py106
-rw-r--r--cloudinit/config/cc_migrator.py85
-rw-r--r--cloudinit/config/cc_mounts.py405
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py99
-rw-r--r--cloudinit/config/cc_phone_home.py122
-rw-r--r--cloudinit/config/cc_power_state_change.py223
-rw-r--r--cloudinit/config/cc_puppet.py118
-rw-r--r--cloudinit/config/cc_resizefs.py185
-rw-r--r--cloudinit/config/cc_resolv_conf.py116
-rw-r--r--cloudinit/config/cc_rh_subscription.py408
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py102
-rw-r--r--cloudinit/config/cc_rsyslog.py366
-rw-r--r--cloudinit/config/cc_runcmd.py38
-rw-r--r--cloudinit/config/cc_salt_minion.py59
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py41
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py41
-rw-r--r--cloudinit/config/cc_scripts_per_once.py41
-rw-r--r--cloudinit/config/cc_scripts_user.py42
-rw-r--r--cloudinit/config/cc_scripts_vendor.py43
-rw-r--r--cloudinit/config/cc_seed_random.py94
-rw-r--r--cloudinit/config/cc_set_hostname.py37
-rw-r--r--cloudinit/config/cc_set_passwords.py167
-rw-r--r--cloudinit/config/cc_snappy.py304
-rw-r--r--cloudinit/config/cc_ssh.py142
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py105
-rw-r--r--cloudinit/config/cc_ssh_import_id.py99
-rw-r--r--cloudinit/config/cc_timezone.py39
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py162
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py60
-rw-r--r--cloudinit/config/cc_update_hostname.py43
-rw-r--r--cloudinit/config/cc_users_groups.py34
-rw-r--r--cloudinit/config/cc_write_files.py105
-rw-r--r--cloudinit/config/cc_yum_add_repo.py107
53 files changed, 0 insertions, 7203 deletions
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
deleted file mode 100644
index d57453be..00000000
--- a/cloudinit/config/__init__.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2008-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Chuck Short <chuck.short@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
-
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-# This prefix is used to make it less
-# of a chance that when importing
-# we will not find something else with the same
-# name in the lookup path...
-MOD_PREFIX = "cc_"
-
-
-def form_module_name(name):
- canon_name = name.replace("-", "_")
- if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0:(len(canon_name) - 3)]
- canon_name = canon_name.strip()
- if not canon_name:
- return None
- if not canon_name.startswith(MOD_PREFIX):
- canon_name = '%s%s' % (MOD_PREFIX, canon_name)
- return canon_name
-
-
-def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warn("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, 'distros'):
- setattr(mod, 'distros', [])
- if not hasattr(mod, 'osfamilies'):
- setattr(mod, 'osfamilies', [])
- return mod
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
deleted file mode 100644
index 05ad4b03..00000000
--- a/cloudinit/config/cc_apt_configure.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import templater
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
-APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
-
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.is_false(cfg.get('apt_configure_enabled', True)):
- log.debug("Skipping module named %s, disabled by config.", name)
- return
-
- release = get_release()
- mirrors = find_apt_mirror_info(cloud, cfg)
- if not mirrors or "primary" not in mirrors:
- log.debug(("Skipping module named %s,"
- " no package 'mirror' located"), name)
- return
-
- # backwards compatibility
- mirror = mirrors["primary"]
- mirrors["mirror"] = mirror
-
- log.debug("Mirror info: %s" % mirrors)
-
- if not util.get_cfg_option_bool(cfg,
- 'apt_preserve_sources_list', False):
- generate_sources_list(cfg, release, mirrors, cloud, log)
- old_mirrors = cfg.get('apt_old_mirrors',
- {"primary": "archive.ubuntu.com/ubuntu",
- "security": "security.ubuntu.com/ubuntu"})
- rename_apt_lists(old_mirrors, mirrors)
-
- try:
- apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
- except Exception as e:
- log.warn("failed to proxy or apt config info: %s", e)
-
- # Process 'apt_sources'
- if 'apt_sources' in cfg:
- params = mirrors
- params['RELEASE'] = release
- params['MIRROR'] = mirror
-
- matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
- if matchcfg:
- matcher = re.compile(matchcfg).search
- else:
- def matcher(x):
- return False
-
- errors = add_apt_sources(cfg['apt_sources'], params,
- aa_repo_match=matcher)
- for e in errors:
- log.warn("Add source error: %s", ':'.join(e))
-
- dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
- if dconf_sel:
- log.debug("Setting debconf selections per cloud config")
- try:
- util.subp(('debconf-set-selections', '-'), dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections")
-
-
-def mirrorurl_to_apt_fileprefix(mirror):
- string = mirror
- # take off http:// or ftp://
- if string.endswith("/"):
- string = string[0:-1]
- pos = string.find("://")
- if pos >= 0:
- string = string[pos + 3:]
- string = string.replace("/", "_")
- return string
-
-
-def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
- for (name, omirror) in old_mirrors.items():
- nmirror = new_mirrors.get(name)
- if not nmirror:
- continue
- oprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(nmirror))
- if oprefix == nprefix:
- continue
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
-
-
-def get_release():
- (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
- return stdout.strip()
-
-
-def generate_sources_list(cfg, codename, mirrors, cloud, log):
- params = {'codename': codename}
- for k in mirrors:
- params[k] = mirrors[k]
-
- custtmpl = cfg.get('apt_custom_sources_list', None)
- if custtmpl is not None:
- templater.render_string_to_file(custtmpl,
- '/etc/apt/sources.list', params)
- return
-
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
- if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
- if not template_fn:
- log.warn("No template found, not rendering /etc/apt/sources.list")
- return
-
- templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
-
-
-def add_apt_key_raw(key):
- """
- actual adding of a key as defined in key argument
- to the system
- """
- try:
- util.subp(('apt-key', 'add', '-'), key)
- except util.ProcessExecutionError:
- raise ValueError('failed to add apt GPG Key to apt keyring')
-
-
-def add_apt_key(ent):
- """
- add key to the system as defined in ent (if any)
- supports raw keys or keyid's
- The latter will as a first step fetch the raw key from a keyserver
- """
- if 'keyid' in ent and 'key' not in ent:
- keyserver = "keyserver.ubuntu.com"
- if 'keyserver' in ent:
- keyserver = ent['keyserver']
- ent['key'] = gpg.get_key_by_id(ent['keyid'], keyserver)
-
- if 'key' in ent:
- add_apt_key_raw(ent['key'])
-
-
-def convert_to_new_format(srclist):
- """convert_to_new_format
- convert the old list based format to the new dict based one
- """
- srcdict = {}
- if isinstance(srclist, list):
- for srcent in srclist:
- if 'filename' not in srcent:
- # file collides for multiple !filename cases for compatibility
- # yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
- key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
- else:
- # all with filename use that as key (matching new format)
- key = srcent['filename']
- srcdict[key] = srcent
- elif isinstance(srclist, dict):
- srcdict = srclist
- else:
- raise ValueError("unknown apt_sources format")
-
- return srcdict
-
-
-def add_apt_sources(srclist, template_params=None, aa_repo_match=None):
- """
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srclist'. When rendering template, also
- include the values in dictionary searchList
- """
- if template_params is None:
- template_params = {}
-
- if aa_repo_match is None:
- def _aa_repo_match(x):
- return False
- aa_repo_match = _aa_repo_match
-
- errorlist = []
- srcdict = convert_to_new_format(srclist)
-
- for filename in srcdict:
- ent = srcdict[filename]
- if 'filename' not in ent:
- ent['filename'] = filename
-
- # keys can be added without specifying a source
- try:
- add_apt_key(ent)
- except ValueError as detail:
- errorlist.append([ent, detail])
-
- if 'source' not in ent:
- errorlist.append(["", "missing source"])
- continue
- source = ent['source']
- source = templater.render_string(source, template_params)
-
- if not ent['filename'].startswith(os.path.sep):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
-
- if aa_repo_match(source):
- try:
- util.subp(["add-apt-repository", source])
- except util.ProcessExecutionError as e:
- errorlist.append([source,
- ("add-apt-repository failed. " + str(e))])
- continue
-
- try:
- contents = "%s\n" % (source)
- util.write_file(ent['filename'], contents, omode="ab")
- except Exception:
- errorlist.append([source,
- "failed write to file %s" % ent['filename']])
-
- return errorlist
-
-
-def find_apt_mirror_info(cloud, cfg):
- """find an apt_mirror given the cloud and cfg provided."""
-
- mirror = None
-
- # this is less preferred way of specifying mirror preferred would be to
- # use the distro's search or package_mirror.
- mirror = cfg.get("apt_mirror", None)
-
- search = cfg.get("apt_mirror_search", None)
- if not mirror and search:
- mirror = util.search_for_mirror(search)
-
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
- mydom = ""
- doms = []
-
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
-
- doms.extend((".localdomain", "",))
-
- mirror_list = []
- distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % (post))
-
- mirror = util.search_for_mirror(mirror_list)
-
- mirror_info = cloud.datasource.get_package_mirror_info()
-
- # this is a bit strange.
- # if mirror is set, then one of the legacy options above set it
- # but they do not cover security. so we need to get that from
- # get_package_mirror_info
- if mirror:
- mirror_info.update({'primary': mirror})
-
- return mirror_info
-
-
-def apply_apt_config(cfg, proxy_fname, config_fname):
- # Set up any apt proxy
- cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
- ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
-
- proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
- if len(proxies):
- util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
- elif os.path.isfile(proxy_fname):
- util.del_file(proxy_fname)
-
- if cfg.get('apt_config', None):
- util.write_file(config_fname, cfg.get('apt_config'))
- elif os.path.isfile(config_fname):
- util.del_file(config_fname)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
deleted file mode 100644
index 40c32c84..00000000
--- a/cloudinit/config/cc_apt_pipelining.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-distros = ['ubuntu', 'debian']
-
-DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-
-APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
- 'Acquire::http::Pipeline-Depth "%s";\n')
-
-# Acquire::http::Pipeline-Depth can be a value
-# from 0 to 5 indicating how many outstanding requests APT should send.
-# A value of zero MUST be specified if the remote host does not properly linger
-# on TCP connections - otherwise data corruption will occur.
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
- apt_pipe_value_s = str(apt_pipe_value).lower().strip()
-
- if apt_pipe_value_s == "false":
- write_apt_snippet("0", log, DEFAULT_FILE)
- elif apt_pipe_value_s in ("none", "unchanged", "os"):
- return
- elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
- write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
- else:
- log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
-
-
-def write_apt_snippet(setting, log, f_name):
- """Writes f_name with apt pipeline depth 'setting'."""
-
- file_contents = APT_PIPE_TPL % (setting)
- util.write_file(f_name, file_contents)
- log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
deleted file mode 100644
index b763a3c3..00000000
--- a/cloudinit/config/cc_bootcmd.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
-
- if "bootcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'bootcmd' key in configuration"), name)
- return
-
- with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
- try:
- content = util.shellify(cfg["bootcmd"])
- tmpf.write(util.encode_text(content))
- tmpf.flush()
- except Exception:
- util.logexc(log, "Failed to shellify bootcmd")
- raise
-
- try:
- env = os.environ.copy()
- iid = cloud.get_instance_id()
- if iid:
- env['INSTANCE_ID'] = str(iid)
- cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
- except Exception:
- util.logexc(log, "Failed to run bootcmd module %s", name)
- raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
deleted file mode 100644
index ef0ce7ab..00000000
--- a/cloudinit/config/cc_byobu.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- value = args[0]
- else:
- value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
-
- if not value:
- log.debug("Skipping module named %s, no 'byobu' values found", name)
- return
-
- if value == "user" or value == "system":
- value = "enable-%s" % value
-
- valid = ("enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable")
- if value not in valid:
- log.warn("Unknown value %s for byobu_by_default", value)
-
- mod_user = value.endswith("-user")
- mod_sys = value.endswith("-system")
- if value.startswith("enable"):
- bl_inst = "install"
- dc_val = "byobu byobu/launch-by-default boolean true"
- mod_sys = True
- else:
- if value == "disable":
- mod_user = True
- mod_sys = True
- bl_inst = "uninstall"
- dc_val = "byobu byobu/launch-by-default boolean false"
-
- shcmd = ""
- if mod_user:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- if not user:
- log.warn(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
- else:
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
- shcmd += " || X=$(($X+1)); "
- if mod_sys:
- shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
- shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
- shcmd += " || X=$(($X+1)); "
-
- if len(shcmd):
- cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
- log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
deleted file mode 100644
index 8248b020..00000000
--- a/cloudinit/config/cc_ca_certs.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Mike Milner <mike.milner@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-CA_CERT_PATH = "/usr/share/ca-certificates/"
-CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
-CA_CERT_CONFIG = "/etc/ca-certificates.conf"
-CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
-CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-
-distros = ['ubuntu', 'debian']
-
-
-def update_ca_certs():
- """
- Updates the CA certificate cache on the current machine.
- """
- util.subp(["update-ca-certificates"], capture=False)
-
-
-def add_ca_certs(certs):
- """
- Adds certificates to the system. To actually apply the new certificates
- you must also call L{update_ca_certs}.
-
- @param certs: A list of certificate strings.
- """
- if certs:
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
-
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([l for l in orig.splitlines()
- if l != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
- util.write_file(CA_CERT_CONFIG, out, omode="wb")
-
-
-def remove_default_ca_certs():
- """
- Removes all default trusted CA certificates from the system. To actually
- apply the change you must also call L{update_ca_certs}.
- """
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0o644)
- debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
-
-
-def handle(name, cfg, _cloud, log, _args):
- """
- Call to handle ca-cert sections in cloud-config file.
-
- @param name: The module name "ca-cert" from cloud.cfg
- @param cfg: A nested dict containing the entire cloud config contents.
- @param cloud: The L{CloudInit} object in use.
- @param log: Pre-initialized Python logger object to use for logging.
- @param args: Any module arguments from cloud.cfg
- """
- # If there isn't a ca-certs section in the configuration don't do anything
- if "ca-certs" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'ca-certs' key in configuration"), name)
- return
-
- ca_cert_cfg = cfg['ca-certs']
-
- # If there is a remove-defaults option set to true, remove the system
- # default trusted CA certs first.
- if ca_cert_cfg.get("remove-defaults", False):
- log.debug("Removing default certificates")
- remove_default_ca_certs()
-
- # If we are given any new trusted CA certs to add, add them.
- if "trusted" in ca_cert_cfg:
- trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
- if trusted_certs:
- log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
-
- # Update the system with the new cert configuration.
- log.debug("Updating certificates")
- update_ca_certs()
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
deleted file mode 100644
index 4c28be6a..00000000
--- a/cloudinit/config/cc_chef.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Avishai Ish-Shalom <avishai@fewbytes.com>
-# Author: Mike Moulton <mike@meltmedia.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** module that configures, starts and installs chef.
-
-**Description:** This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-It can be configured with the following option structure::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- client_key:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
-
-import itertools
-import json
-import os
-
-from cloudinit import templater
-from cloudinit import url_helper
-from cloudinit import util
-
-import six
-
-RUBY_VERSION_DEFAULT = "1.8"
-
-CHEF_DIRS = tuple([
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
-])
-REQUIRED_CHEF_DIRS = tuple([
- '/etc/chef',
-])
-
-# Used if fetching chef from a omnibus style package
-OMNIBUS_URL = "https://www.getchef.com/chef/install.sh"
-OMNIBUS_URL_RETRIES = 5
-
-CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
-CHEF_FB_PATH = '/etc/chef/firstboot.json'
-CHEF_RB_TPL_DEFAULTS = {
- # These are ruby symbols...
- 'ssl_verify_mode': ':verify_none',
- 'log_level': ':info',
- # These are not symbols...
- 'log_location': '/var/log/chef/client.log',
- 'validation_key': CHEF_VALIDATION_PEM_PATH,
- 'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
- 'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
- 'show_time': True,
-}
-CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
-CHEF_RB_TPL_PATH_KEYS = frozenset([
- 'log_location',
- 'validation_key',
- 'client_key',
- 'file_cache_path',
- 'json_attribs',
- 'file_cache_path',
- 'pid_file',
-])
-CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend([
- 'server_url',
- 'node_name',
- 'environment',
- 'validation_name',
-])
-CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
-CHEF_RB_PATH = '/etc/chef/client.rb'
-CHEF_EXEC_PATH = '/usr/bin/chef-client'
-CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-
-
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
-
-
-def post_run_chef(chef_cfg, log):
- delete_pem = util.get_cfg_option_bool(chef_cfg,
- 'delete_validation_post_exec',
- default=False)
- if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
- os.unlink(CHEF_VALIDATION_PEM_PATH)
-
-
-def get_template_params(iid, chef_cfg, log):
- params = CHEF_RB_TPL_DEFAULTS.copy()
- # Allow users to overwrite any of the keys they want (if they so choose),
- # when a value is None, then the value will be set to None and no boolean
- # or string version will be populated...
- for (k, v) in chef_cfg.items():
- if k not in CHEF_RB_TPL_KEYS:
- log.debug("Skipping unknown chef template key '%s'", k)
- continue
- if v is None:
- params[k] = None
- else:
- # This will make the value a boolean or string...
- if k in CHEF_RB_TPL_BOOL_KEYS:
- params[k] = util.get_cfg_option_bool(chef_cfg, k)
- else:
- params[k] = util.get_cfg_option_str(chef_cfg, k)
- # These ones are overwritten to be exact values...
- params.update({
- 'generated_by': util.make_header(),
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- default=iid),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- default='_default'),
- # These two are mandatory...
- 'server_url': chef_cfg['server_url'],
- 'validation_name': chef_cfg['validation_name'],
- })
- return params
-
-
-def handle(name, cfg, cloud, log, _args):
- """Handler method activated by cloud-init."""
-
- # If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
- return
- chef_cfg = cfg['chef']
-
- # Ensure the chef directories we use exist
- chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
- if not chef_dirs:
- chef_dirs = list(CHEF_DIRS)
- for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
- util.ensure_dir(d)
-
- vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
- vcert = chef_cfg.get('validation_cert')
- # special value 'system' means do not overwrite the file
- # but still render the template to contain 'validation_key'
- if vcert:
- if vcert != "system":
- util.write_file(vkey_path, vcert)
- elif not os.path.isfile(vkey_path):
- log.warn("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
-
- # Create the chef config from template
- template_fn = cloud.get_template_filename('chef_client.rb')
- if template_fn:
- iid = str(cloud.datasource.get_instance_id())
- params = get_template_params(iid, chef_cfg, log)
- # Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
- # before they are used by the chef-client itself.
- param_paths = set()
- for (k, v) in params.items():
- if k in CHEF_RB_TPL_PATH_KEYS and v:
- param_paths.add(os.path.dirname(v))
- util.ensure_dirs(param_paths)
- templater.render_to_file(template_fn, CHEF_RB_PATH, params)
- else:
- log.warn("No template found, not rendering to %s",
- CHEF_RB_PATH)
-
- # Set the firstboot json
- fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
- default=CHEF_FB_PATH)
- if not fb_filename:
- log.info("First boot path empty, not writing first boot json file")
- else:
- initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
- for k in list(initial_attributes.keys()):
- initial_json[k] = initial_attributes[k]
- util.write_file(fb_filename, json.dumps(initial_json))
-
- # Try to install chef, if its not already installed...
- force_install = util.get_cfg_option_bool(chef_cfg,
- 'force_install', default=False)
- if not is_installed() or force_install:
- run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
- else:
- run = False
- if run:
- run_chef(chef_cfg, log)
- post_run_chef(chef_cfg, log)
-
-
-def run_chef(chef_cfg, log):
- log.debug('Running chef-client')
- cmd = [CHEF_EXEC_PATH]
- if 'exec_arguments' in chef_cfg:
- cmd_args = chef_cfg['exec_arguments']
- if isinstance(cmd_args, (list, tuple)):
- cmd.extend(cmd_args)
- elif isinstance(cmd_args, six.string_types):
- cmd.append(cmd_args)
- else:
- log.warn("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
- cmd.extend(CHEF_EXEC_DEF_ARGS)
- else:
- cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
-
-
-def install_chef(cloud, chef_cfg, log):
- # If chef is not installed, we install chef based on 'install_type'
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
- if install_type == "gems":
- # This will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- RUBY_VERSION_DEFAULT)
- install_chef_from_gems(ruby_version, chef_version, cloud.distro)
- # Retain backwards compat, by preferring True instead of False
- # when not provided/overriden...
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
- elif install_type == 'packages':
- # This will install and run the chef-client from packages
- cloud.distro.install_packages(('chef',))
- elif install_type == 'omnibus':
- # This will install as a omnibus unified package
- url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
- retries = max(0, util.get_cfg_option_int(chef_cfg,
- "omnibus_url_retries",
- default=OMNIBUS_URL_RETRIES))
- content = url_helper.readurl(url=url, retries=retries)
- with util.tempdir() as tmpd:
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, content, mode=0o700)
- util.subp([tmpf], capture=False)
- else:
- log.warn("Unknown chef install type '%s'", install_type)
- run = False
- return run
-
-
-def get_ruby_packages(version):
- # return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
- if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
- return pkgs
-
-
-def install_chef_from_gems(ruby_version, chef_version, distro):
- distro.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
- if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
- else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
deleted file mode 100644
index bdc32fe6..00000000
--- a/cloudinit/config/cc_debug.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Yahoo! Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** helper to debug cloud-init *internal* datastructures.
-
-**Description:** This module will enable for outputting various internal
-information that cloud-init sources provide to either a file or to the output
-console/log location that this cloud-init has been configured with when
-running.
-
-It can be configured with the following option structure::
-
- debug:
- verbose: (defaulting to true)
- output: (location to write output, defaulting to console + log)
-
-.. note::
-
- Log configurations are not output.
-"""
-
-import copy
-
-from six import StringIO
-
-from cloudinit import type_utils
-from cloudinit import util
-
-SKIP_KEYS = frozenset(['log_cfgs'])
-
-
-def _make_header(text):
- header = StringIO()
- header.write("-" * 80)
- header.write("\n")
- header.write(text.center(80, ' '))
- header.write("\n")
- header.write("-" * 80)
- header.write("\n")
- return header.getvalue()
-
-
-def _dumps(obj):
- text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False)
- return text.rstrip()
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
- if args:
- # if args are provided (from cmdline) then explicitly set verbose
- out_file = args[0]
- verbose = True
- else:
- out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
-
- if not verbose:
- log.debug(("Skipping module named %s,"
- " verbose printing disabled"), name)
- return
- # Clean out some keys that we just don't care about showing...
- dump_cfg = copy.deepcopy(cfg)
- for k in SKIP_KEYS:
- dump_cfg.pop(k, None)
- all_keys = list(dump_cfg)
- for k in all_keys:
- if k.startswith("_"):
- dump_cfg.pop(k, None)
- # Now dump it...
- to_print = StringIO()
- to_print.write(_make_header("Config"))
- to_print.write(_dumps(dump_cfg))
- to_print.write("\n")
- to_print.write(_make_header("MetaData"))
- to_print.write(_dumps(cloud.datasource.metadata))
- to_print.write("\n")
- to_print.write(_make_header("Misc"))
- to_print.write("Datasource: %s\n" %
- (type_utils.obj_name(cloud.datasource)))
- to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
- to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
- to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
- to_print.write("Locale: %s\n" % (cloud.get_locale()))
- to_print.write("Launch IDX: %s\n" % (cloud.launch_index))
- contents = to_print.getvalue()
- content_to_file = []
- for line in contents.splitlines():
- line = "ci-info: %s\n" % (line)
- content_to_file.append(line)
- if out_file:
- util.write_file(out_file, "".join(content_to_file), 0o644, "w")
- else:
- util.multi_log("".join(content_to_file), console=True, stderr=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
deleted file mode 100644
index 3fd2c20f..00000000
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
-
-
-def handle(name, cfg, _cloud, log, _args):
- disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
- if disabled:
- util.subp(REJECT_CMD, capture=False)
- else:
- log.debug(("Skipping module named %s,"
- " disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
deleted file mode 100644
index b642f1f8..00000000
--- a/cloudinit/config/cc_disk_setup.py
+++ /dev/null
@@ -1,863 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-import logging
-import os
-import shlex
-
-frequency = PER_INSTANCE
-
-# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
-
-LOG = logging.getLogger(__name__)
-
-
-def handle(_name, cfg, cloud, log, _args):
- """
- See doc/examples/cloud-config_disk-setup.txt for documentation on the
- format.
- """
- disk_setup = cfg.get("disk_setup")
- if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
- log.debug("Partitioning disks: %s", str(disk_setup))
- for disk, definition in disk_setup.items():
- if not isinstance(definition, dict):
- log.warn("Invalid disk definition for %s" % disk)
- continue
-
- try:
- log.debug("Creating new partition table/disk")
- util.log_time(logfunc=LOG.debug,
- msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, definition))
- except Exception as e:
- util.logexc(LOG, "Failed partitioning operation\n%s" % e)
-
- fs_setup = cfg.get("fs_setup")
- if isinstance(fs_setup, list):
- log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
- for definition in fs_setup:
- if not isinstance(definition, dict):
- log.warn("Invalid file system definition: %s" % definition)
- continue
-
- try:
- log.debug("Creating new filesystem.")
- device = definition.get('device')
- util.log_time(logfunc=LOG.debug,
- msg="Creating fs for %s" % device,
- func=mkfs, args=(definition,))
- except Exception as e:
- util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
-
-
-def update_disk_setup_devices(disk_setup, tformer):
- # update 'disk_setup' dictionary anywhere were a device may occur
- # update it with the response from 'tformer'
- for origname in disk_setup.keys():
- transformed = tformer(origname)
- if transformed is None or transformed == origname:
- continue
- if transformed in disk_setup:
- LOG.info("Replacing %s in disk_setup for translation of %s",
- origname, transformed)
- del disk_setup[transformed]
-
- disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
- del disk_setup[origname]
- LOG.debug("updated disk_setup device entry '%s' to '%s'",
- origname, transformed)
-
-
-def update_fs_setup_devices(disk_setup, tformer):
- # update 'fs_setup' dictionary anywhere were a device may occur
- # update it with the response from 'tformer'
- for definition in disk_setup:
- if not isinstance(definition, dict):
- LOG.warn("entry in disk_setup not a dict: %s", definition)
- continue
-
- origname = definition.get('device')
-
- if origname is None:
- continue
-
- (dev, part) = util.expand_dotted_devname(origname)
-
- tformed = tformer(dev)
- if tformed is not None:
- dev = tformed
- LOG.debug("%s is mapped to disk=%s part=%s",
- origname, tformed, part)
- definition['_origname'] = origname
- definition['device'] = tformed
-
- if part and 'partition' in definition:
- definition['_partition'] = definition['partition']
- definition['partition'] = part
-
-
-def value_splitter(values, start=None):
- """
- Returns the key/value pairs of output sent as string
- like: FOO='BAR' HOME='127.0.0.1'
- """
- _values = shlex.split(values)
- if start:
- _values = _values[start:]
-
- for key, value in [x.split('=') for x in _values]:
- yield key, value
-
-
-def enumerate_disk(device, nodeps=False):
- """
- Enumerate the elements of a child device.
-
- Parameters:
- device: the kernel device name
- nodeps <BOOL>: don't enumerate children devices
-
- Return a dict describing the disk:
- type: the entry type, i.e disk or part
- fstype: the filesystem type, if it exists
- label: file system label, if it exists
- name: the device name, i.e. sda
- """
-
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
- device]
-
- if nodeps:
- lsblk_cmd.append('--nodeps')
-
- info = None
- try:
- info, _err = util.subp(lsblk_cmd)
- except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
-
- parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
-
- for part in parts:
- d = {
- 'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
- }
-
- for key, value in value_splitter(part):
- d[key.lower()] = value
-
- yield d
-
-
-def device_type(device):
- """
- Return the device type of the device by calling lsblk.
- """
-
- for d in enumerate_disk(device, nodeps=True):
- if "type" in d:
- return d["type"].lower()
- return None
-
-
-def is_device_valid(name, partition=False):
- """
- Check if the device is a valid device.
- """
- d_type = ""
- try:
- d_type = device_type(name)
- except Exception:
- LOG.warn("Query against device %s failed" % name)
- return False
-
- if partition and d_type == 'part':
- return True
- elif not partition and d_type == 'disk':
- return True
- return False
-
-
-def check_fs(device):
- """
- Check if the device has a filesystem on it
-
- Output of blkid is generally something like:
- /dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
-
- Return values are device, label, type, uuid
- """
- out, label, fs_type, uuid = None, None, None, None
-
- blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
- try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
- except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
-
- if out:
- if len(out.splitlines()) == 1:
- for key, value in value_splitter(out, start=1):
- if key.lower() == 'label':
- label = value
- elif key.lower() == 'type':
- fs_type = value
- elif key.lower() == 'uuid':
- uuid = value
-
- return label, fs_type, uuid
-
-
-def is_filesystem(device):
- """
- Returns true if the device has a file system.
- """
- _, fs_type, _ = check_fs(device)
- return fs_type
-
-
-def find_device_node(device, fs_type=None, label=None, valid_targets=None,
- label_match=True, replace_fs=None):
- """
- Find a device that is either matches the spec, or the first
-
- The return is value is (<device>, <bool>) where the device is the
- device to use and the bool is whether the device matches the
- fs_type and label.
-
- Note: This works with GPT partition tables!
- """
- # label of None is same as no label
- if label is None:
- label = ""
-
- if not valid_targets:
- valid_targets = ['disk', 'part']
-
- raw_device_used = False
- for d in enumerate_disk(device):
-
- if d['fstype'] == replace_fs and label_match is False:
- # We found a device where we want to replace the FS
- return ('/dev/%s' % d['name'], False)
-
- if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
- # If we find a matching device, we return that
- return ('/dev/%s' % d['name'], True)
-
- if d['type'] in valid_targets:
-
- if d['type'] != 'disk' or d['fstype']:
- raw_device_used = True
-
- if d['type'] == 'disk':
- # Skip the raw disk, its the default
- pass
-
- elif not d['fstype']:
- return ('/dev/%s' % d['name'], False)
-
- if not raw_device_used:
- return (device, False)
-
- LOG.warn("Failed to find device during available device search.")
- return (None, False)
-
-
-def is_disk_used(device):
- """
- Check if the device is currently used. Returns true if the device
- has either a file system or a partition entry
- is no filesystem found on the disk.
- """
-
- # If the child count is higher 1, then there are child nodes
- # such as partition or device mapper nodes
- if len(list(enumerate_disk(device))) > 1:
- return True
-
- # If we see a file system, then its used
- _, check_fstype, _ = check_fs(device)
- if check_fstype:
- return True
-
- return False
-
-
-def get_dyn_func(*args):
- """
- Call the appropriate function.
-
- The first value is the template for function name
- The second value is the template replacement
- The remain values are passed to the function
-
- For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
- would call "foo_bar" with args of 1, 2, 3
- """
- if len(args) < 2:
- raise Exception("Unable to determine dynamic funcation name")
-
- func_name = (args[0] % args[1])
- func_args = args[2:]
-
- try:
- if func_args:
- return globals()[func_name](*func_args)
- else:
- return globals()[func_name]
-
- except KeyError:
- raise Exception("No such function %s to call!" % func_name)
-
-
-def get_mbr_hdd_size(device):
- size_cmd = [SFDISK_CMD, '--show-size', device]
- size = None
- try:
- size, _err = util.subp(size_cmd)
- except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
-
- return int(size.strip())
-
-
-def get_gpt_hdd_size(device):
- out, _ = util.subp([SGDISK_CMD, '-p', device])
- return out.splitlines()[0].split()[2]
-
-
-def get_hdd_size(table_type, device):
- """
- Returns the hard disk size.
- This works with any disk type, including GPT.
- """
- return get_dyn_func("get_%s_hdd_size", table_type, device)
-
-
-def check_partition_mbr_layout(device, layout):
- """
- Returns true if the partition layout matches the one on the disk
-
- Layout should be a list of values. At this time, this only
- verifies that the number of partitions and their labels is correct.
- """
-
- read_parttbl(device)
- prt_cmd = [SFDISK_CMD, "-l", device]
- try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
- except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
-
- found_layout = []
- for line in out.splitlines():
- _line = line.split()
- if len(_line) == 0:
- continue
-
- if device in _line[0]:
- # We don't understand extended partitions yet
- if _line[-1].lower() in ['extended', 'empty']:
- continue
-
- # Find the partition types
- type_label = None
- for x in sorted(range(1, len(_line)), reverse=True):
- if _line[x].isdigit() and _line[x] != '/':
- type_label = _line[x]
- break
-
- found_layout.append(type_label)
- return found_layout
-
-
-def check_partition_gpt_layout(device, layout):
- prt_cmd = [SGDISK_CMD, '-p', device]
- try:
- out, _err = util.subp(prt_cmd)
- except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
-
- out_lines = iter(out.splitlines())
- # Skip header
- for line in out_lines:
- if line.strip().startswith('Number'):
- break
-
- return [line.strip().split()[-1] for line in out_lines]
-
-
-def check_partition_layout(table_type, device, layout):
- """
- See if the partition lay out matches.
-
- This is future a future proofing function. In order
- to add support for other disk layout schemes, add a
- function called check_partition_%s_layout
- """
- found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout)
-
- if isinstance(layout, bool):
- # if we are using auto partitioning, or "True" be happy
- # if a single partition exists.
- if layout and len(found_layout) >= 1:
- return True
- return False
-
- else:
- if len(found_layout) != len(layout):
- return False
- else:
- # This just makes sure that the number of requested
- # partitions and the type labels are right
- for x in range(1, len(layout) + 1):
- if isinstance(layout[x - 1], tuple):
- _, part_type = layout[x]
- if int(found_layout[x]) != int(part_type):
- return False
- return True
-
- return False
-
-
-def get_partition_mbr_layout(size, layout):
- """
- Calculate the layout of the partition table. Partition sizes
- are defined as percentage values or a tuple of percentage and
- partition type.
-
- For example:
- [ 33, [66: 82] ]
-
- Defines the first partition to be a size of 1/3 the disk,
- while the remaining 2/3's will be of type Linux Swap.
- """
-
- if not isinstance(layout, list) and isinstance(layout, bool):
- # Create a single partition
- return "0,"
-
- if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
- raise Exception("Partition layout is invalid")
-
- last_part_num = len(layout)
- if last_part_num > 4:
- raise Exception("Only simply partitioning is allowed.")
-
- part_definition = []
- part_num = 0
- for part in layout:
- part_type = 83 # Default to Linux
- percent = part
- part_num += 1
-
- if isinstance(part, list):
- if len(part) != 2:
- raise Exception("Partition was incorrectly defined: %s" % part)
- percent, part_type = part
-
- part_size = int((float(size) * (float(percent) / 100)) / 1024)
-
- if part_num == last_part_num:
- part_definition.append(",,%s" % part_type)
- else:
- part_definition.append(",%s,%s" % (part_size, part_type))
-
- sfdisk_definition = "\n".join(part_definition)
- if len(part_definition) > 4:
- raise Exception("Calculated partition definition is too big\n%s" %
- sfdisk_definition)
-
- return sfdisk_definition
-
-
-def get_partition_gpt_layout(size, layout):
- if isinstance(layout, bool):
- return [(None, [0, 0])]
-
- partition_specs = []
- for partition in layout:
- if isinstance(partition, list):
- if len(partition) != 2:
- raise Exception(
- "Partition was incorrectly defined: %s" % partition)
- percent, partition_type = partition
- else:
- percent = partition
- partition_type = None
-
- part_size = int(float(size) * (float(percent) / 100))
- partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
-
- # The last partition should use up all remaining space
- partition_specs[-1][-1][-1] = 0
- return partition_specs
-
-
-def purge_disk_ptable(device):
- # wipe the first and last megabyte of a disk (or file)
- # gpt stores partition table both at front and at end.
- null = '\0'
- start_len = 1024 * 1024
- end_len = 1024 * 1024
- with open(device, "rb+") as fp:
- fp.write(null * (start_len))
- fp.seek(-end_len, os.SEEK_END)
- fp.write(null * end_len)
- fp.flush()
-
- read_parttbl(device)
-
-
-def purge_disk(device):
- """
- Remove parition table entries
- """
-
- # wipe any file systems first
- for d in enumerate_disk(device):
- if d['type'] not in ["disk", "crypt"]:
- wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
- try:
- LOG.info("Purging filesystem on /dev/%s" % d['name'])
- util.subp(wipefs_cmd)
- except Exception:
- raise Exception("Failed FS purge of /dev/%s" % d['name'])
-
- purge_disk_ptable(device)
-
-
-def get_partition_layout(table_type, size, layout):
- """
- Call the appropriate function for creating the table
- definition. Returns the table definition
-
- This is a future proofing function. To add support for
- other layouts, simply add a "get_partition_%s_layout"
- function.
- """
- return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
-
-
-def read_parttbl(device):
- """
- Use partprobe instead of 'udevadm'. Partprobe is the only
- reliable way to probe the partition table.
- """
- blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
- udev_cmd = [UDEVADM_CMD, 'settle']
- try:
- util.subp(udev_cmd)
- util.subp(blkdev_cmd)
- util.subp(udev_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed reading the partition table %s" % e)
-
-
-def exec_mkpart_mbr(device, layout):
- """
- Break out of mbr partition to allow for future partition
- types, i.e. gpt
- """
- # Create the partitions
- prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
- try:
- util.subp(prt_cmd, data="%s\n" % layout)
- except Exception as e:
- raise Exception("Failed to partition device %s\n%s" % (device, e))
-
- read_parttbl(device)
-
-
-def exec_mkpart_gpt(device, layout):
- try:
- util.subp([SGDISK_CMD, '-Z', device])
- for index, (partition_type, (start, end)) in enumerate(layout):
- index += 1
- util.subp([SGDISK_CMD,
- '-n', '{}:{}:{}'.format(index, start, end), device])
- if partition_type is not None:
- util.subp(
- [SGDISK_CMD,
- '-t', '{}:{}'.format(index, partition_type), device])
- except Exception:
- LOG.warn("Failed to partition device %s" % device)
- raise
-
-
-def exec_mkpart(table_type, device, layout):
- """
- Fetches the function for creating the table type.
- This allows to dynamically find which function to call.
-
- Paramaters:
- table_type: type of partition table to use
- device: the device to work on
- layout: layout definition specific to partition table
- """
- return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-
-
-def mkpart(device, definition):
- """
- Creates the partition table.
-
- Parameters:
- definition: dictionary describing how to create the partition.
-
- The following are supported values in the dict:
- overwrite: Should the partition table be created regardless
- of any pre-exisiting data?
- layout: the layout of the partition table
- table_type: Which partition table to use, defaults to MBR
- device: the device to work on.
- """
- # ensure that we get a real device rather than a symbolic link
- device = os.path.realpath(device)
-
- LOG.debug("Checking values for %s definition" % device)
- overwrite = definition.get('overwrite', False)
- layout = definition.get('layout', False)
- table_type = definition.get('table_type', 'mbr')
-
- # Check if the default device is a partition or not
- LOG.debug("Checking against default devices")
-
- if (isinstance(layout, bool) and not layout) or not layout:
- LOG.debug("Device is not to be partitioned, skipping")
- return # Device is not to be partitioned
-
- # This prevents you from overwriting the device
- LOG.debug("Checking if device %s is a valid device", device)
- if not is_device_valid(device):
- raise Exception("Device %s is not a disk device!", device)
-
- # Remove the partition table entries
- if isinstance(layout, str) and layout.lower() == "remove":
- LOG.debug("Instructed to remove partition table entries")
- purge_disk(device)
- return
-
- LOG.debug("Checking if device layout matches")
- if check_partition_layout(table_type, device, layout):
- LOG.debug("Device partitioning layout matches")
- return True
-
- LOG.debug("Checking if device is safe to partition")
- if not overwrite and (is_disk_used(device) or is_filesystem(device)):
- LOG.debug("Skipping partitioning on configured device %s" % device)
- return
-
- LOG.debug("Checking for device size")
- device_size = get_hdd_size(table_type, device)
-
- LOG.debug("Calculating partition layout")
- part_definition = get_partition_layout(table_type, device_size, layout)
- LOG.debug(" Layout is: %s" % part_definition)
-
- LOG.debug("Creating partition table on %s", device)
- exec_mkpart(table_type, device, part_definition)
-
- LOG.debug("Partition table created for %s", device)
-
-
-def lookup_force_flag(fs):
- """
- A force flag might be -F or -F, this look it up
- """
- flags = {
- 'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- }
-
- if 'ext' in fs.lower():
- fs = 'ext'
-
- if fs.lower() in flags:
- return flags[fs]
-
- LOG.warn("Force flag for %s is unknown." % fs)
- return ''
-
-
-def mkfs(fs_cfg):
- """
- Create a file system on the device.
-
- label: defines the label to use on the device
- fs_cfg: defines how the filesystem is to look
- The following values are required generally:
- device: which device or cloud defined default_device
- filesystem: which file system type
- overwrite: indiscriminately create the file system
- partition: when device does not define a partition,
- setting this to a number will mean
- device + partition. When set to 'auto', the
- first free device or the first device which
- matches both label and type will be used.
-
- 'any' means the first filesystem that matches
- on the device.
-
- When 'cmd' is provided then no other parameter is required.
- """
- label = fs_cfg.get('label')
- device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition', 'any'))
- fs_type = fs_cfg.get('filesystem')
- fs_cmd = fs_cfg.get('cmd', [])
- fs_opts = fs_cfg.get('extra_opts', [])
- fs_replace = fs_cfg.get('replace_fs', False)
- overwrite = fs_cfg.get('overwrite', False)
-
- # ensure that we get a real device rather than a symbolic link
- device = os.path.realpath(device)
-
- # This allows you to define the default ephemeral or swap
- LOG.debug("Checking %s against default devices", device)
-
- if not partition or partition.isdigit():
- # Handle manual definition of partition
- if partition.isdigit():
- device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s",
- partition, device)
-
- # Check to see if the fs already exists
- LOG.debug("Checking device %s", device)
- check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
-
- if check_label == label and check_fstype == fs_type:
- LOG.debug("Existing file system found at %s", device)
-
- if not overwrite:
- LOG.debug("Device %s has required file system", device)
- return
- else:
- LOG.warn("Destroying filesystem on %s", device)
-
- else:
- LOG.debug("Device %s is cleared for formating", device)
-
- elif partition and str(partition).lower() in ('auto', 'any'):
- # For auto devices, we match if the filesystem does exist
- odevice = device
- LOG.debug("Identifying device to create %s filesytem on", label)
-
- # any mean pick the first match on the device with matching fs_type
- label_match = True
- if partition.lower() == 'any':
- label_match = False
-
- device, reuse = find_device_node(device, fs_type=fs_type, label=label,
- label_match=label_match,
- replace_fs=fs_replace)
- LOG.debug("Automatic device for %s identified as %s", odevice, device)
-
- if reuse:
- LOG.debug("Found filesystem match, skipping formating.")
- return
-
- if not reuse and fs_replace and device:
- LOG.debug("Replacing file system on %s as instructed." % device)
-
- if not device:
- LOG.debug("No device aviable that matches request. "
- "Skipping fs creation for %s", fs_cfg)
- return
- elif not partition or str(partition).lower() == 'none':
- LOG.debug("Using the raw device to place filesystem %s on" % label)
-
- else:
- LOG.debug("Error in device identification handling.")
- return
-
- LOG.debug("File system %s will be created on %s", label, device)
-
- # Make sure the device is defined
- if not device:
- LOG.warn("Device is not known: %s", device)
- return
-
- # Check that we can create the FS
- if not (fs_type or fs_cmd):
- raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
- "must be set.", label)
-
- # Create the commands
- if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {
- 'label': label,
- 'filesystem': fs_type,
- 'device': device,
- }
- else:
- # Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
- if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
-
- if not mkfs_cmd:
- LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
- fs_type)
- return
-
- fs_cmd = [mkfs_cmd, device]
-
- if label:
- fs_cmd.extend(["-L", label])
-
- # File systems that support the -F flag
- if overwrite or device_type(device) == "disk":
- fs_cmd.append(lookup_force_flag(fs_type))
-
- # Add the extends FS options
- if fs_opts:
- fs_cmd.extend(fs_opts)
-
- LOG.debug("Creating file system %s on %s", label, device)
- LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
- try:
- util.subp(fs_cmd)
- except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
deleted file mode 100644
index 98828b9e..00000000
--- a/cloudinit/config/cc_emit_upstart.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-distros = ['ubuntu', 'debian']
-LOG = logging.getLogger(__name__)
-
-
-def is_upstart_system():
- if not os.path.isfile("/sbin/initctl"):
- LOG.debug("no /sbin/initctl located")
- return False
-
- myenv = os.environ.copy()
- if 'UPSTART_SESSION' in myenv:
- del myenv['UPSTART_SESSION']
- check_cmd = ['initctl', 'version']
- try:
- (out, err) = util.subp(check_cmd, env=myenv)
- return 'upstart' in out
- except util.ProcessExecutionError as e:
- LOG.debug("'%s' returned '%s', not using upstart",
- ' '.join(check_cmd), e.exit_code)
- return False
-
-
-def handle(name, _cfg, cloud, log, args):
- event_names = args
- if not event_names:
- # Default to the 'cloud-config'
- # event for backwards compat.
- event_names = ['cloud-config']
-
- if not is_upstart_system():
- log.debug("not upstart system, '%s' disabled", name)
- return
-
- cfgpath = cloud.paths.get_ipath_cur("cloud_config")
- for n in event_names:
- cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
- try:
- util.subp(cmd)
- except Exception as e:
- # TODO(harlowja), use log exception from utils??
- log.warn("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
deleted file mode 100644
index 545fee22..00000000
--- a/cloudinit/config/cc_fan.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-fan module allows configuration of Ubuntu Fan
- https://wiki.ubuntu.com/FanNetworking
-
-Example config:
- #cloud-config
- fan:
- config: |
- # fan 240
- 10.0.0.0/8 eth0/16 dhcp
- 10.0.0.0/8 eth1/16 dhcp off
- # fan 241
- 241.0.0.0/8 eth0/16 dhcp
- config_path: /etc/network/fan
-
-If cloud-init sees a 'fan' entry in cloud-config it will
- a.) write 'config_path' with the contents
- b.) install the package 'ubuntu-fan' if it is not installed
- c.) ensure the service is started (or restarted if was previously running)
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-BUILTIN_CFG = {
- 'config': None,
- 'config_path': '/etc/network/fan',
-}
-
-
-def stop_update_start(service, config_file, content, systemd=False):
- if systemd:
- cmds = {'stop': ['systemctl', 'stop', service],
- 'start': ['systemctl', 'start', service],
- 'enable': ['systemctl', 'enable', service]}
- else:
- cmds = {'stop': ['service', 'stop'],
- 'start': ['service', 'start']}
-
- def run(cmd, msg):
- try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
- LOG.warn("failed: %s (%s): %s", service, cmd, e)
- return False
-
- stop_failed = not run(cmds['stop'], msg='stop %s' % service)
- if not content.endswith('\n'):
- content += '\n'
- util.write_file(config_file, content, omode="w")
-
- ret = run(cmds['start'], msg='start %s' % service)
- if ret and stop_failed:
- LOG.warn("success: %s started", service)
-
- if 'enable' in cmds:
- ret = run(cmds['enable'], msg='enable %s' % service)
-
- return ret
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('fan')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- if not mycfg.get('config'):
- LOG.debug("%s: no 'fan' config entry. disabling", name)
- return
-
- util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
- distro = cloud.distro
- if not util.which('fanctl'):
- distro.install_packages(['ubuntu-fan'])
-
- stop_update_start(
- service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'), systemd=distro.uses_systemd())
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
deleted file mode 100644
index c9021eb1..00000000
--- a/cloudinit/config/cc_final_message.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-# Jinja formated default message
-FINAL_MESSAGE_DEF = (
- "## template: jinja\n"
- "Cloud-init v. {{version}} finished at {{timestamp}}."
- " Datasource {{datasource}}. Up {{uptime}} seconds"
-)
-
-
-def handle(_name, cfg, cloud, log, args):
-
- msg_in = ''
- if len(args) != 0:
- msg_in = str(args[0])
- else:
- msg_in = util.get_cfg_option_str(cfg, "final_message", "")
-
- msg_in = msg_in.strip()
- if not msg_in:
- msg_in = FINAL_MESSAGE_DEF
-
- uptime = util.uptime()
- ts = util.time_rfc2822()
- cver = version.version_string()
- try:
- subs = {
- 'uptime': uptime,
- 'timestamp': ts,
- 'version': cver,
- 'datasource': str(cloud.datasource),
- }
- subs.update(dict([(k.upper(), v) for k, v in subs.items()]))
- util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True, log=log)
- except Exception:
- util.logexc(log, "Failed to render final message template")
-
- boot_fin_fn = cloud.paths.boot_finished
- try:
- contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
- except Exception:
- util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
-
- if cloud.datasource.is_disconnected:
- log.warn("Used fallback datasource")
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
deleted file mode 100644
index 95aab4dd..00000000
--- a/cloudinit/config/cc_foo.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-
-# Modules are expected to have the following attributes.
-# 1. A required 'handle' method which takes the following params.
-# a) The name will not be this files name, but instead
-# the name specified in configuration (which is the name
-# which will be used to find this module).
-# b) A configuration object that is the result of the merging
-# of cloud configs configuration with legacy configuration
-# as well as any datasource provided configuration
-# c) A cloud object that can be used to access various
-# datasource and paths for the given distro and data provided
-# by the various datasource instance types.
-# d) A argument list that may or may not be empty to this module.
-# Typically those are from module configuration where the module
-# is defined with some extra configuration that will eventually
-# be translated from yaml into arguments to this module.
-# 2. A optional 'frequency' that defines how often this module should be ran.
-# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
-# provided PER_INSTANCE will be assumed.
-# See settings.py for these constants.
-# 3. A optional 'distros' array/set/tuple that defines the known distros
-# this module will work with (if not all of them). This is used to write
-# a warning out if a module is being ran on a untested distribution for
-# informational purposes. If non existent all distros are assumed and
-# no warning occurs.
-
-frequency = PER_INSTANCE
-
-
-def handle(name, _cfg, _cloud, log, _args):
- log.debug("Hi from module %s", name)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
deleted file mode 100644
index 40560f11..00000000
--- a/cloudinit/config/cc_growpart.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import re
-import stat
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-DEFAULT_CONFIG = {
- 'mode': 'auto',
- 'devices': ['/'],
- 'ignore_growroot_disabled': False,
-}
-
-
-class RESIZE(object):
- SKIPPED = "SKIPPED"
- CHANGED = "CHANGED"
- NOCHANGE = "NOCHANGE"
- FAILED = "FAILED"
-
-
-LOG = logging.getLogger(__name__)
-
-
-def resizer_factory(mode):
- resize_class = None
- if mode == "auto":
- for (_name, resizer) in RESIZERS:
- cur = resizer()
- if cur.available():
- resize_class = cur
- break
-
- if not resize_class:
- raise ValueError("No resizers available")
-
- else:
- mmap = {}
- for (k, v) in RESIZERS:
- mmap[k] = v
-
- if mode not in mmap:
- raise TypeError("unknown resize mode %s" % mode)
-
- mclass = mmap[mode]()
- if mclass.available():
- resize_class = mclass
-
- if not resize_class:
- raise ValueError("mode %s not available" % mode)
-
- return resize_class
-
-
-class ResizeFailedException(Exception):
- pass
-
-
-class ResizeGrowPart(object):
- def available(self):
- myenv = os.environ.copy()
- myenv['LANG'] = 'C'
-
- try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
- if re.search(r"--update\s+", out, re.DOTALL):
- return True
-
- except util.ProcessExecutionError:
- pass
- return False
-
- def resize(self, diskdev, partnum, partdev):
- before = get_size(partdev)
- try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
- if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
- raise ResizeFailedException(e)
- return (before, before)
-
- try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e)
-
- return (before, get_size(partdev))
-
-
-class ResizeGpart(object):
- def available(self):
- if not util.which('gpart'):
- return False
- return True
-
- def resize(self, diskdev, partnum, partdev):
- """
- GPT disks store metadata at the beginning (primary) and at the
- end (secondary) of the disk. When launching an image with a
- larger disk compared to the original image, the secondary copy
- is lost. Thus, the metadata will be marked CORRUPT, and need to
- be recovered.
- """
- try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
- if e.exit_code != 0:
- util.logexc(LOG, "Failed: gpart recover %s", diskdev)
- raise ResizeFailedException(e)
-
- before = get_size(partdev)
- try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
- raise ResizeFailedException(e)
-
- # Since growing the FS requires a reboot, make sure we reboot
- # first when this module has finished.
- open('/var/run/reboot-required', 'a').close()
-
- return (before, get_size(partdev))
-
-
-def get_size(filename):
- fd = os.open(filename, os.O_RDONLY)
- try:
- return os.lseek(fd, 0, os.SEEK_END)
- finally:
- os.close(fd)
-
-
-def device_part_info(devpath):
- # convert an entry in /dev/ to parent disk and partition number
-
- # input of /dev/vdb or /dev/disk/by-label/foo
- # rpath is hopefully a real-ish path in /dev (vda, sdb..)
- rpath = os.path.realpath(devpath)
-
- bname = os.path.basename(rpath)
- syspath = "/sys/class/block/%s" % bname
-
- # FreeBSD doesn't know of sysfs so just get everything we need from
- # the device, like /dev/vtbd0p2.
- if util.system_info()["platform"].startswith('FreeBSD'):
- m = re.search('^(/dev/.+)p([0-9])$', devpath)
- return (m.group(1), m.group(2))
-
- if not os.path.exists(syspath):
- raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
-
- ptpath = os.path.join(syspath, "partition")
- if not os.path.exists(ptpath):
- raise TypeError("%s not a partition" % devpath)
-
- ptnum = util.load_file(ptpath).rstrip()
-
- # for a partition, real syspath is something like:
- # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1
- rsyspath = os.path.realpath(syspath)
- disksyspath = os.path.dirname(rsyspath)
-
- diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip()
- diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin)
-
- # diskdevpath has something like 253:0
- # and udev has put links in /dev/block/253:0 to the device name in /dev/
- return (diskdevpath, ptnum)
-
-
-def devent2dev(devent):
- if devent.startswith("/dev/"):
- return devent
- else:
- result = util.get_mount_info(devent)
- if not result:
- raise ValueError("Could not determine device of '%s' % dev_ent")
- return result[0]
-
-
-def resize_devices(resizer, devices):
- # returns a tuple of tuples containing (entry-in-devices, action, message)
- info = []
- for devent in devices:
- try:
- blockdev = devent2dev(devent)
- except ValueError as e:
- info.append((devent, RESIZE.SKIPPED,
- "unable to convert to device: %s" % e,))
- continue
-
- try:
- statret = os.stat(blockdev)
- except OSError as e:
- info.append((devent, RESIZE.SKIPPED,
- "stat of '%s' failed: %s" % (blockdev, e),))
- continue
-
- if (not stat.S_ISBLK(statret.st_mode) and
- not stat.S_ISCHR(statret.st_mode)):
- info.append((devent, RESIZE.SKIPPED,
- "device '%s' not a block device" % blockdev,))
- continue
-
- try:
- (disk, ptnum) = device_part_info(blockdev)
- except (TypeError, ValueError) as e:
- info.append((devent, RESIZE.SKIPPED,
- "device_part_info(%s) failed: %s" % (blockdev, e),))
- continue
-
- try:
- (old, new) = resizer.resize(disk, ptnum, blockdev)
- if old == new:
- info.append((devent, RESIZE.NOCHANGE,
- "no change necessary (%s, %s)" % (disk, ptnum),))
- else:
- info.append((devent, RESIZE.CHANGED,
- "changed (%s, %s) from %s to %s" %
- (disk, ptnum, old, new),))
-
- except ResizeFailedException as e:
- info.append((devent, RESIZE.FAILED,
- "failed to resize: disk=%s, ptnum=%s: %s" %
- (disk, ptnum, e),))
-
- return info
-
-
-def handle(_name, cfg, _cloud, log, _args):
- if 'growpart' not in cfg:
- log.debug("No 'growpart' entry in cfg. Using default: %s" %
- DEFAULT_CONFIG)
- cfg['growpart'] = DEFAULT_CONFIG
-
- mycfg = cfg.get('growpart')
- if not isinstance(mycfg, dict):
- log.warn("'growpart' in config was not a dict")
- return
-
- mode = mycfg.get('mode', "auto")
- if util.is_false(mode):
- log.debug("growpart disabled: mode=%s" % mode)
- return
-
- if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
- if os.path.isfile("/etc/growroot-disabled"):
- log.debug("growpart disabled: /etc/growroot-disabled exists")
- log.debug("use ignore_growroot_disabled to ignore")
- return
-
- devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
- if not len(devices):
- log.debug("growpart: empty device list")
- return
-
- try:
- resizer = resizer_factory(mode)
- except (ValueError, TypeError) as e:
- log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
- if mode != "auto":
- raise e
- return
-
- resized = util.log_time(logfunc=log.debug, msg="resize_devices",
- func=resize_devices, args=(resizer, devices))
- for (entry, action, msg) in resized:
- if action == RESIZE.CHANGED:
- log.info("'%s' resized: %s" % (entry, msg))
- else:
- log.debug("'%s' %s: %s" % (entry, action, msg))
-
-RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
deleted file mode 100644
index 156722d9..00000000
--- a/cloudinit/config/cc_grub_dpkg.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-
-def handle(name, cfg, _cloud, log, _args):
-
- mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
- if not mycfg:
- mycfg = {}
-
- enabled = mycfg.get('enabled', True)
- if util.is_false(enabled):
- log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
- return
-
- idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None)
-
- if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs is None:
- idevs = ""
- if idevs_empty is None:
- idevs_empty = "true"
- else:
- if idevs_empty is None:
- idevs_empty = "false"
- if idevs is None:
- idevs = "/dev/sda"
- for dev in ("/dev/sda", "/dev/vda", "/dev/xvda",
- "/dev/sda1", "/dev/vda1", "/dev/xvda1"):
- if os.path.exists(dev):
- idevs = dev
- break
-
- # now idevs and idevs_empty are set to determined values
- # or, those set by user
-
- dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n") %
- (idevs, idevs_empty))
-
- log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
-
- try:
- util.subp(['debconf-set-selections'], dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
deleted file mode 100644
index 9a02f056..00000000
--- a/cloudinit/config/cc_keys_to_console.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-# This is a tool that cloud init provides
-HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
-
-
-def _get_helper_tool_path(distro):
- try:
- base_lib = distro.usr_lib_exec
- except AttributeError:
- base_lib = '/usr/lib'
- return HELPER_TOOL_TPL % base_lib
-
-
-def handle(name, cfg, cloud, log, _args):
- helper_path = _get_helper_tool_path(cloud.distro)
- if not os.path.exists(helper_path):
- log.warn(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
- return
-
- fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist",
- ["ssh-dss"])
-
- try:
- cmd = [helper_path]
- cmd.append(','.join(fp_blacklist))
- cmd.append(','.join(key_blacklist))
- (stdout, _stderr) = util.subp(cmd)
- util.multi_log("%s\n" % (stdout.strip()),
- stderr=False, console=True)
- except Exception:
- log.warn("Writing keys to the system console failed!")
- raise
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
deleted file mode 100644
index 68fcb27f..00000000
--- a/cloudinit/config/cc_landscape.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from six import StringIO
-
-from configobj import ConfigObj
-
-from cloudinit import type_utils
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
-LS_DEFAULT_FILE = "/etc/default/landscape-client"
-
-distros = ['ubuntu']
-
-# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
- }
-}
-
-
-def handle(_name, cfg, cloud, log, _args):
- """
- Basically turn a top level 'landscape' entry with a 'client' dict
- and render it to ConfigObj format under '[client]' section in
- /etc/landscape/client.conf
- """
-
- ls_cloudcfg = cfg.get("landscape", {})
-
- if not isinstance(ls_cloudcfg, (dict)):
- raise RuntimeError(("'landscape' key existed in config,"
- " but not a dictionary type,"
- " is a %s instead"),
- type_utils.obj_name(ls_cloudcfg))
- if not ls_cloudcfg:
- return
-
- cloud.distro.install_packages(('landscape-client',))
-
- merge_data = [
- LSC_BUILTIN_CFG,
- LSC_CLIENT_CFG_FILE,
- ls_cloudcfg,
- ]
- merged = merge_together(merge_data)
- contents = StringIO()
- merged.write(contents)
-
- util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
- util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
- log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
-
- util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
-
-
-def merge_together(objs):
- """
- merge together ConfigObj objects or things that ConfigObj() will take in
- later entries override earlier
- """
- cfg = ConfigObj({})
- for obj in objs:
- if not obj:
- continue
- if isinstance(obj, ConfigObj):
- cfg.merge(obj)
- else:
- cfg.merge(ConfigObj(obj))
- return cfg
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
deleted file mode 100644
index bbe5fcae..00000000
--- a/cloudinit/config/cc_locale.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- locale = args[0]
- else:
- locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
-
- if util.is_false(locale):
- log.debug("Skipping module named %s, disabled by config: %s",
- name, locale)
- return
-
- log.debug("Setting locale to %s", locale)
- locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
- cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
deleted file mode 100644
index 70d4e7c3..00000000
--- a/cloudinit/config/cc_lxd.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module initializes lxd using 'lxd init'
-
-Example config:
- #cloud-config
- lxd:
- init:
- network_address: <ip addr>
- network_port: <port>
- storage_backend: <zfs/dir>
- storage_create_device: <dev>
- storage_create_loop: <size>
- storage_pool: <name>
- trust_password: <password>
- bridge:
- mode: <new, existing or none>
- name: <name>
- ipv4_address: <ip addr>
- ipv4_netmask: <cidr>
- ipv4_dhcp_first: <ip addr>
- ipv4_dhcp_last: <ip addr>
- ipv4_dhcp_leases: <size>
- ipv4_nat: <bool>
- ipv6_address: <ip addr>
- ipv6_netmask: <cidr>
- ipv6_nat: <bool>
- domain: <domain>
-"""
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, args):
- # Get config
- lxd_cfg = cfg.get('lxd')
- if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg",
- name)
- return
- if not isinstance(lxd_cfg, dict):
- log.warn("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
- return
-
- # Grab the configuration
- init_cfg = lxd_cfg.get('init')
- if not isinstance(init_cfg, dict):
- log.warn("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
- init_cfg = {}
-
- bridge_cfg = lxd_cfg.get('bridge')
- if not isinstance(bridge_cfg, dict):
- log.warn("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
- bridge_cfg = {}
-
- # Install the needed packages
- packages = []
- if not util.which("lxd"):
- packages.append('lxd')
-
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
- packages.append('zfs')
-
- if len(packages):
- try:
- cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
- log.warn("failed to install packages %s: %s", packages, exc)
- return
-
- # Set up lxd if init config is given
- if init_cfg:
- init_keys = (
- 'network_address', 'network_port', 'storage_backend',
- 'storage_create_device', 'storage_create_loop',
- 'storage_pool', 'trust_password')
- cmd = ['lxd', 'init', '--auto']
- for k in init_keys:
- if init_cfg.get(k):
- cmd.extend(["--%s=%s" %
- (k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
-
- # Set up lxd-bridge if bridge config is given
- dconf_comm = "debconf-communicate"
- if bridge_cfg and util.which(dconf_comm):
- debconf = bridge_to_debconf(bridge_cfg)
-
- # Update debconf database
- try:
- log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
- except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)
-
- # Remove the existing configuration file (forces re-generation)
- util.del_file("/etc/default/lxd-bridge")
-
- # Run reconfigure
- log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
- elif bridge_cfg:
- raise RuntimeError(
- "Unable to configure lxd bridge without %s." + dconf_comm)
-
-
-def bridge_to_debconf(bridge_cfg):
- debconf = {}
-
- if bridge_cfg.get("mode") == "none":
- debconf["lxd/setup-bridge"] = "false"
- debconf["lxd/bridge-name"] = ""
-
- elif bridge_cfg.get("mode") == "existing":
- debconf["lxd/setup-bridge"] = "false"
- debconf["lxd/use-existing-bridge"] = "true"
- debconf["lxd/bridge-name"] = bridge_cfg.get("name")
-
- elif bridge_cfg.get("mode") == "new":
- debconf["lxd/setup-bridge"] = "true"
- if bridge_cfg.get("name"):
- debconf["lxd/bridge-name"] = bridge_cfg.get("name")
-
- if bridge_cfg.get("ipv4_address"):
- debconf["lxd/bridge-ipv4"] = "true"
- debconf["lxd/bridge-ipv4-address"] = \
- bridge_cfg.get("ipv4_address")
- debconf["lxd/bridge-ipv4-netmask"] = \
- bridge_cfg.get("ipv4_netmask")
- debconf["lxd/bridge-ipv4-dhcp-first"] = \
- bridge_cfg.get("ipv4_dhcp_first")
- debconf["lxd/bridge-ipv4-dhcp-last"] = \
- bridge_cfg.get("ipv4_dhcp_last")
- debconf["lxd/bridge-ipv4-dhcp-leases"] = \
- bridge_cfg.get("ipv4_dhcp_leases")
- debconf["lxd/bridge-ipv4-nat"] = \
- bridge_cfg.get("ipv4_nat", "true")
-
- if bridge_cfg.get("ipv6_address"):
- debconf["lxd/bridge-ipv6"] = "true"
- debconf["lxd/bridge-ipv6-address"] = \
- bridge_cfg.get("ipv6_address")
- debconf["lxd/bridge-ipv6-netmask"] = \
- bridge_cfg.get("ipv6_netmask")
- debconf["lxd/bridge-ipv6-nat"] = \
- bridge_cfg.get("ipv6_nat", "false")
-
- if bridge_cfg.get("domain"):
- debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
-
- else:
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
-
- return debconf
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
deleted file mode 100644
index ada535f8..00000000
--- a/cloudinit/config/cc_mcollective.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Marc Cluet <marc.cluet@canonical.com>
-# Based on code by Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import BytesIO
-
-# Used since this can maintain comments
-# and doesn't need a top level section
-from configobj import ConfigObj
-
-from cloudinit import log as logging
-from cloudinit import util
-
-PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
-PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = '/etc/mcollective/server.cfg'
-
-LOG = logging.getLogger(__name__)
-
-
-def configure(config, server_cfg=SERVER_CFG,
- pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
- # Read server.cfg values from the
- # original file in order to be able to mix the rest up
- try:
- mcollective_config = ConfigObj(server_cfg, file_error=True)
- existed = True
- except IOError:
- LOG.debug("Did not find file %s", server_cfg)
- mcollective_config = ConfigObj()
- existed = False
-
- for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
- util.write_file(pubcert_file, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = pubcert_file
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
- util.write_file(pricert_file, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = pricert_file
- mcollective_config['securityprovider'] = 'ssl'
- else:
- if isinstance(cfg, six.string_types):
- # Just set it in the 'main' section
- mcollective_config[cfg_name] = cfg
- elif isinstance(cfg, (dict)):
- # Iterate through the config items, create a section if
- # it is needed and then add/or create items as needed
- if cfg_name not in mcollective_config.sections:
- mcollective_config[cfg_name] = {}
- for (o, v) in cfg.items():
- mcollective_config[cfg_name][o] = v
- else:
- # Otherwise just try to convert it to a string
- mcollective_config[cfg_name] = str(cfg)
-
- if existed:
- # We got all our config as wanted we'll rename
- # the previous server.cfg and create our new one
- util.rename(server_cfg, "%s.old" % (server_cfg))
-
- # Now we got the whole file, write to disk...
- contents = BytesIO()
- mcollective_config.write(contents)
- util.write_file(server_cfg, contents.getvalue(), mode=0o644)
-
-
-def handle(name, cfg, cloud, log, _args):
-
- # If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'mcollective' key in configuration"), name)
- return
-
- mcollective_cfg = cfg['mcollective']
-
- # Start by installing the mcollective package ...
- cloud.distro.install_packages(("mcollective",))
-
- # ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- configure(config=mcollective_cfg['conf'])
-
- # restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
deleted file mode 100644
index facaa538..00000000
--- a/cloudinit/config/cc_migrator.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import shutil
-
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-
-def _migrate_canon_sems(cloud):
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
- am_adjusted = 0
- for sem_path in paths:
- if not sem_path or not os.path.exists(sem_path):
- continue
- for p in os.listdir(sem_path):
- full_path = os.path.join(sem_path, p)
- if os.path.isfile(full_path):
- (name, ext) = os.path.splitext(p)
- canon_name = helpers.canon_sem_name(name)
- if canon_name != name:
- new_path = os.path.join(sem_path, canon_name + ext)
- shutil.move(full_path, new_path)
- am_adjusted += 1
- return am_adjusted
-
-
-def _migrate_legacy_sems(cloud, log):
- legacy_adjust = {
- 'apt-update-upgrade': [
- 'apt-configure',
- 'package-update-upgrade-install',
- ],
- }
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
- for sem_path in paths:
- if not sem_path or not os.path.exists(sem_path):
- continue
- sem_helper = helpers.FileSemaphores(sem_path)
- for (mod_name, migrate_to) in legacy_adjust.items():
- possibles = [mod_name, helpers.canon_sem_name(mod_name)]
- old_exists = []
- for p in os.listdir(sem_path):
- (name, _ext) = os.path.splitext(p)
- if name in possibles and os.path.isfile(p):
- old_exists.append(p)
- for p in old_exists:
- util.del_file(os.path.join(sem_path, p))
- (_name, freq) = os.path.splitext(p)
- for m in migrate_to:
- log.debug("Migrating %s => %s with the same frequency",
- p, m)
- with sem_helper.lock(m, freq):
- pass
-
-
-def handle(name, cfg, cloud, log, _args):
- do_migrate = util.get_cfg_option_str(cfg, "migrate", True)
- if not util.translate_bool(do_migrate):
- log.debug("Skipping module named %s, migration disabled", name)
- return
- sems_moved = _migrate_canon_sems(cloud)
- log.debug("Migrated %s semaphore files to there canonicalized names",
- sems_moved)
- _migrate_legacy_sems(cloud, log)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
deleted file mode 100644
index 2b981935..00000000
--- a/cloudinit/config/cc_mounts.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from string import whitespace
-
-import logging
-import os.path
-import re
-
-from cloudinit import type_utils
-from cloudinit import util
-
-# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
-DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
-DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
-WS = re.compile("[%s]+" % (whitespace))
-FSTAB_PATH = "/etc/fstab"
-
-LOG = logging.getLogger(__name__)
-
-
-def is_meta_device_name(name):
- # return true if this is a metadata service name
- if name in ["ami", "root", "swap"]:
- return True
- # names 'ephemeral0' or 'ephemeral1'
- # 'ebs[0-9]' appears when '--block-device-mapping sdf=snap-d4d90bbc'
- for enumname in ("ephemeral", "ebs"):
- if name.startswith(enumname) and name.find(":") == -1:
- return True
- return False
-
-
-def _get_nth_partition_for_device(device_path, partition_number):
- potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
- '-part%s' % (partition_number,)]
- for suffix in potential_suffixes:
- potential_partition_device = '%s%s' % (device_path, suffix)
- if os.path.exists(potential_partition_device):
- return potential_partition_device
- return None
-
-
-def _is_block_device(device_path, partition_path=None):
- device_name = os.path.realpath(device_path).split('/')[-1]
- sys_path = os.path.join('/sys/block/', device_name)
- if partition_path is not None:
- sys_path = os.path.join(
- sys_path, os.path.realpath(partition_path).split('/')[-1])
- return os.path.exists(sys_path)
-
-
-def sanitize_devname(startname, transformer, log):
- log.debug("Attempting to determine the real name of %s", startname)
-
- # workaround, allow user to specify 'ephemeral'
- # rather than more ec2 correct 'ephemeral0'
- devname = startname
- if devname == "ephemeral":
- devname = "ephemeral0"
- log.debug("Adjusted mount option from ephemeral to ephemeral0")
-
- device_path, partition_number = util.expand_dotted_devname(devname)
-
- if is_meta_device_name(device_path):
- orig = device_path
- device_path = transformer(device_path)
- if not device_path:
- return None
- if not device_path.startswith("/"):
- device_path = "/dev/%s" % (device_path,)
- log.debug("Mapped metadata name %s to %s", orig, device_path)
- else:
- if DEVICE_NAME_RE.match(startname):
- device_path = "/dev/%s" % (device_path,)
-
- partition_path = None
- if partition_number is None:
- partition_path = _get_nth_partition_for_device(device_path, 1)
- else:
- partition_path = _get_nth_partition_for_device(device_path,
- partition_number)
- if partition_path is None:
- return None
-
- if _is_block_device(device_path, partition_path):
- if partition_path is not None:
- return partition_path
- return device_path
- return None
-
-
-def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
- # make a suggestion on the size of swap for this system.
- if memsize is None:
- memsize = util.read_meminfo()['total']
-
- GB = 2 ** 30
- sugg_max = 8 * GB
-
- info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize}
-
- if fsys is None and maxsize is None:
- # set max to 8GB default if no filesystem given
- maxsize = sugg_max
- elif fsys:
- statvfs = os.statvfs(fsys)
- avail = statvfs.f_frsize * statvfs.f_bfree
- info['avail'] = avail
-
- if maxsize is None:
- # set to 25% of filesystem space
- maxsize = min(int(avail / 4), sugg_max)
- elif maxsize > ((avail * .9)):
- # set to 90% of available disk space
- maxsize = int(avail * .9)
- elif maxsize is None:
- maxsize = sugg_max
-
- info['max'] = maxsize
-
- formulas = [
- # < 1G: swap = double memory
- (1 * GB, lambda x: x * 2),
- # < 2G: swap = 2G
- (2 * GB, lambda x: 2 * GB),
- # < 4G: swap = memory
- (4 * GB, lambda x: x),
- # < 16G: 4G
- (16 * GB, lambda x: 4 * GB),
- # < 64G: 1/2 M up to max
- (64 * GB, lambda x: x / 2),
- ]
-
- size = None
- for top, func in formulas:
- if memsize <= top:
- size = min(func(memsize), maxsize)
- # if less than 1/2 memory and not much, return 0
- if size < (memsize / 2) and size < 4 * GB:
- size = 0
- break
- break
-
- if size is not None:
- size = maxsize
-
- info['size'] = size
-
- MB = 2 ** 20
- pinfo = {}
- for k, v in info.items():
- if isinstance(v, int):
- pinfo[k] = "%s MB" % (v / MB)
- else:
- pinfo[k] = v
-
- LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'"
- " disk given max=%(max_in)s [max=%(max)s]'" % pinfo)
- return size
-
-
-def setup_swapfile(fname, size=None, maxsize=None):
- """
- fname: full path string of filename to setup
- size: the size to create. set to "auto" for recommended
- maxsize: the maximum size
- """
- tdir = os.path.dirname(fname)
- if str(size).lower() == "auto":
- try:
- memsize = util.read_meminfo()['total']
- except IOError as e:
- LOG.debug("Not creating swap. failed to read meminfo")
- return
-
- util.ensure_dir(tdir)
- size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
- memsize=memsize)
-
- if not size:
- LOG.debug("Not creating swap: suggested size was 0")
- return
-
- mbsize = str(int(size / (2 ** 20)))
- msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
- try:
- util.ensure_dir(tdir)
- util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
-
- except Exception as e:
- raise IOError("Failed %s: %s" % (msg, e))
-
- return fname
-
-
-def handle_swapcfg(swapcfg):
- """handle the swap config, calling setup_swap if necessary.
- return None or (filename, size)
- """
- if not isinstance(swapcfg, dict):
- LOG.warn("input for swap config was not a dict.")
- return None
-
- fname = swapcfg.get('filename', '/swap.img')
- size = swapcfg.get('size', 0)
- maxsize = swapcfg.get('maxsize', None)
-
- if not (size and fname):
- LOG.debug("no need to setup swap")
- return
-
- if os.path.exists(fname):
- if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",
- fname)
- return fname
- try:
- for line in util.load_file("/proc/swaps").splitlines():
- if line.startswith(fname + " "):
- LOG.debug("swap file %s already in use.", fname)
- return fname
- LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
- except Exception:
- LOG.warn("swap file %s existed. Error reading /proc/swaps", fname)
- return fname
-
- try:
- if isinstance(size, str) and size != "auto":
- size = util.human2bytes(size)
- if isinstance(maxsize, str):
- maxsize = util.human2bytes(maxsize)
- return setup_swapfile(fname=fname, size=size, maxsize=maxsize)
-
- except Exception as e:
- LOG.warn("failed to setup swap: %s", e)
-
- return None
-
-
-def handle(_name, cfg, cloud, log, _args):
- # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
- def_mnt_opts = "defaults,nobootwait"
- if cloud.distro.uses_systemd():
- def_mnt_opts = "defaults,nofail"
-
- defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
- defvals = cfg.get("mount_default_fields", defvals)
-
- # these are our default set of mounts
- defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
- ["swap", "none", "swap", "sw", "0", "0"]]
-
- cfgmnt = []
- if "mounts" in cfg:
- cfgmnt = cfg["mounts"]
-
- for i in range(len(cfgmnt)):
- # skip something that wasn't a list
- if not isinstance(cfgmnt[i], list):
- log.warn("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
- continue
-
- start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignorming nonexistant named mount %s", start)
- continue
-
- if sanitized != start:
- log.debug("changed %s => %s" % (start, sanitized))
- cfgmnt[i][0] = sanitized
-
- # in case the user did not quote a field (likely fs-freq, fs_passno)
- # but do not convert None to 'None' (LP: #898365)
- for j in range(len(cfgmnt[i])):
- if cfgmnt[i][j] is None:
- continue
- else:
- cfgmnt[i][j] = str(cfgmnt[i][j])
-
- for i in range(len(cfgmnt)):
- # fill in values with defaults from defvals above
- for j in range(len(defvals)):
- if len(cfgmnt[i]) <= j:
- cfgmnt[i].append(defvals[j])
- elif cfgmnt[i][j] is None:
- cfgmnt[i][j] = defvals[j]
-
- # if the second entry in the list is 'None' this
- # clears all previous entries of that same 'fs_spec'
- # (fs_spec is the first field in /etc/fstab, ie, that device)
- if cfgmnt[i][1] is None:
- for j in range(i):
- if cfgmnt[j][0] == cfgmnt[i][0]:
- cfgmnt[j][1] = None
-
- # for each of the "default" mounts, add them only if no other
- # entry has the same device name
- for defmnt in defmnts:
- start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignoring nonexistant default named mount %s", start)
- continue
- if sanitized != start:
- log.debug("changed default device %s => %s" % (start, sanitized))
- defmnt[0] = sanitized
-
- cfgmnt_has = False
- for cfgm in cfgmnt:
- if cfgm[0] == defmnt[0]:
- cfgmnt_has = True
- break
-
- if cfgmnt_has:
- log.debug(("Not including %s, already"
- " previously included"), start)
- continue
- cfgmnt.append(defmnt)
-
- # now, each entry in the cfgmnt list has all fstab values
- # if the second field is None (not the string, the value) we skip it
- actlist = []
- for x in cfgmnt:
- if x[1] is None:
- log.debug("Skipping non-existent device named %s", x[0])
- else:
- actlist.append(x)
-
- swapret = handle_swapcfg(cfg.get('swap', {}))
- if swapret:
- actlist.append([swapret, "none", "swap", "sw", "0", "0"])
-
- if len(actlist) == 0:
- log.debug("No modifications to fstab needed.")
- return
-
- comment = "comment=cloudconfig"
- cc_lines = []
- needswap = False
- dirs = []
- for line in actlist:
- # write 'comment' in the fs_mntops, entry, claiming this
- line[3] = "%s,%s" % (line[3], comment)
- if line[2] == "swap":
- needswap = True
- if line[1].startswith("/"):
- dirs.append(line[1])
- cc_lines.append('\t'.join(line))
-
- fstab_lines = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- try:
- toks = WS.split(line)
- if toks[3].find(comment) != -1:
- continue
- except Exception:
- pass
- fstab_lines.append(line)
-
- fstab_lines.extend(cc_lines)
- contents = "%s\n" % ('\n'.join(fstab_lines))
- util.write_file(FSTAB_PATH, contents)
-
- if needswap:
- try:
- util.subp(("swapon", "-a"))
- except Exception:
- util.logexc(log, "Activating swap via 'swapon -a' failed")
-
- for d in dirs:
- try:
- util.ensure_dir(d)
- except Exception:
- util.logexc(log, "Failed to make '%s' config-mount", d)
-
- try:
- util.subp(("mount", "-a"))
- except Exception:
- util.logexc(log, "Activating mounts via 'mount -a' failed")
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
deleted file mode 100644
index 73b0e30d..00000000
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-
-from cloudinit import log as logging
-from cloudinit import util
-
-REBOOT_FILE = "/var/run/reboot-required"
-REBOOT_CMD = ["/sbin/reboot"]
-
-
-def _multi_cfg_bool_get(cfg, *keys):
- for k in keys:
- if util.get_cfg_option_bool(cfg, k, False):
- return True
- return False
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
-
-
-def handle(_name, cfg, cloud, log, _args):
- # Handle the old style + new config names
- update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
- upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
- reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
- 'package_reboot_if_required')
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
-
- errors = []
- if update or len(pkglist) or upgrade:
- try:
- cloud.distro.update_package_sources()
- except Exception as e:
- util.logexc(log, "Package update failed")
- errors.append(e)
-
- if upgrade:
- try:
- cloud.distro.package_command("upgrade")
- except Exception as e:
- util.logexc(log, "Package upgrade failed")
- errors.append(e)
-
- if len(pkglist):
- try:
- cloud.distro.install_packages(pkglist)
- except Exception as e:
- util.logexc(log, "Failed to install packages: %s", pkglist)
- errors.append(e)
-
- # TODO(smoser): handle this less violently
- # kernel and openssl (possibly some other packages)
- # write a file /var/run/reboot-required after upgrading.
- # if that file exists and configured, then just stop right now and reboot
- reboot_fn_exists = os.path.isfile(REBOOT_FILE)
- if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
- try:
- log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE)
- # Flush the above warning + anything else out...
- logging.flushLoggers(log)
- _fire_reboot(log)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- errors.append(e)
-
- if len(errors):
- log.warn("%s failed with exceptions, re-raising the last one",
- len(errors))
- raise errors[-1]
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
deleted file mode 100644
index 72176d42..00000000
--- a/cloudinit/config/cc_phone_home.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-POST_LIST_ALL = [
- 'pub_key_dsa',
- 'pub_key_rsa',
- 'pub_key_ecdsa',
- 'instance_id',
- 'hostname',
- 'fdqn'
-]
-
-
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
-#
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE_ID/
-# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id, hostname,
-# fqdn ]
-#
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- ph_cfg = util.read_conf(args[0])
- else:
- if 'phone_home' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'phone_home' configuration found"), name)
- return
- ph_cfg = cfg['phone_home']
-
- if 'url' not in ph_cfg:
- log.warn(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
- return
-
- url = ph_cfg['url']
- post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries')
- try:
- tries = int(tries)
- except Exception:
- tries = 10
- util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
-
- if post_list == "all":
- post_list = POST_LIST_ALL
-
- all_keys = {}
- all_keys['instance_id'] = cloud.get_instance_id()
- all_keys['hostname'] = cloud.get_hostname()
- all_keys['fqdn'] = cloud.get_hostname(fqdn=True)
-
- pubkeys = {
- 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
- 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
- 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
- }
-
- for (n, path) in pubkeys.items():
- try:
- all_keys[n] = util.load_file(path)
- except Exception:
- util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
-
- submit_keys = {}
- for k in post_list:
- if k in all_keys:
- submit_keys[k] = all_keys[k]
- else:
- submit_keys[k] = None
- log.warn(("Requested key %s from 'post'"
- " configuration list not available"), k)
-
- # Get them read to be posted
- real_submit_keys = {}
- for (k, v) in submit_keys.items():
- if v is None:
- real_submit_keys[k] = 'N/A'
- else:
- real_submit_keys[k] = str(v)
-
- # Incase the url is parameterized
- url_params = {
- 'INSTANCE_ID': all_keys['instance_id'],
- }
- url = templater.render_string(url, url_params)
- try:
- util.read_file_or_url(url, data=real_submit_keys,
- retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
- except Exception:
- util.logexc(log, "Failed to post phone home data to %s in %s tries",
- url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
deleted file mode 100644
index cc3f7f70..00000000
--- a/cloudinit/config/cc_power_state_change.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import errno
-import os
-import re
-import six
-import subprocess
-import time
-
-frequency = PER_INSTANCE
-
-EXIT_FAIL = 254
-
-
-def givecmdline(pid):
- # Returns the cmdline for the given process id. In Linux we can use procfs
- # for this but on BSD there is /usr/bin/procstat.
- try:
- # Example output from procstat -c 1
- # PID COMM ARGS
- # 1 init /bin/init --
- if util.system_info()["platform"].startswith('FreeBSD'):
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
- line = output.splitlines()[1]
- m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
- return m.group(2)
- else:
- return util.load_file("/proc/%s/cmdline" % pid)
- except IOError:
- return None
-
-
-def check_condition(cond, log=None):
- if isinstance(cond, bool):
- if log:
- log.debug("Static Condition: %s" % cond)
- return cond
-
- pre = "check_condition command (%s): " % cond
- try:
- proc = subprocess.Popen(cond, shell=not isinstance(cond, list))
- proc.communicate()
- ret = proc.returncode
- if ret == 0:
- if log:
- log.debug(pre + "exited 0. condition met.")
- return True
- elif ret == 1:
- if log:
- log.debug(pre + "exited 1. condition not met.")
- return False
- else:
- if log:
- log.warn(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
- return False
- except Exception as e:
- if log:
- log.warn(pre + "Unexpected error: %s" % e)
- return False
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- try:
- (args, timeout, condition) = load_power_state(cfg)
- if args is None:
- log.debug("no power_state provided. doing nothing")
- return
- except Exception as e:
- log.warn("%s Not performing power state change!" % str(e))
- return
-
- if condition is False:
- log.debug("Condition was false. Will not perform state change.")
- return
-
- mypid = os.getpid()
-
- cmdline = givecmdline(mypid)
- if not cmdline:
- log.warn("power_state: failed to get cmdline of current process")
- return
-
- devnull_fp = open(os.devnull, "w")
-
- log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
-
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
- condition, execmd, [args, devnull_fp])
-
-
-def load_power_state(cfg):
- # returns a tuple of shutdown_command, timeout
- # shutdown_command is None if no config found
- pstate = cfg.get('power_state')
-
- if pstate is None:
- return (None, None, None)
-
- if not isinstance(pstate, dict):
- raise TypeError("power_state is not a dict.")
-
- opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
-
- mode = pstate.get("mode")
- if mode not in opt_map:
- raise TypeError(
- "power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(opt_map.keys()), mode))
-
- delay = pstate.get("delay", "now")
- # convert integer 30 or string '30' to '+30'
- try:
- delay = "+%s" % int(delay)
- except ValueError:
- pass
-
- if delay != "now" and not re.match(r"\+[0-9]+", delay):
- raise TypeError(
- "power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay)
-
- args = ["shutdown", opt_map[mode], delay]
- if pstate.get("message"):
- args.append(pstate.get("message"))
-
- try:
- timeout = float(pstate.get('timeout', 30.0))
- except ValueError:
- raise ValueError("failed to convert timeout '%s' to float." %
- pstate['timeout'])
-
- condition = pstate.get("condition", True)
- if not isinstance(condition, six.string_types + (list, bool)):
- raise TypeError("condition type %s invalid. must be list, bool, str")
- return (args, timeout, condition)
-
-
-def doexit(sysexit):
- os._exit(sysexit)
-
-
-def execmd(exe_args, output=None, data_in=None):
- try:
- proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
- stdout=output, stderr=subprocess.STDOUT)
- proc.communicate(data_in)
- ret = proc.returncode
- except Exception:
- doexit(EXIT_FAIL)
- doexit(ret)
-
-
-def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
- # wait until pid, with /proc/pid/cmdline contents of pidcmdline
- # is no longer alive. After it is gone, or timeout has passed
- # execute func(args)
- msg = None
- end_time = time.time() + timeout
-
- def fatal(msg):
- if log:
- log.warn(msg)
- doexit(EXIT_FAIL)
-
- known_errnos = (errno.ENOENT, errno.ESRCH)
-
- while True:
- if time.time() > end_time:
- msg = "timeout reached before %s ended" % pid
- break
-
- try:
- cmdline = givecmdline(pid)
- if cmdline != pidcmdline:
- msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
- break
-
- except IOError as ioerr:
- if ioerr.errno in known_errnos:
- msg = "pidfile gone [%d]" % ioerr.errno
- else:
- fatal("IOError during wait: %s" % ioerr)
- break
-
- except Exception as e:
- fatal("Unexpected Exception: %s" % e)
-
- time.sleep(.25)
-
- if not msg:
- fatal("Unexpected error in run_after_pid_gone")
-
- if log:
- log.debug(msg)
-
- try:
- if not check_condition(condition, log):
- return
- except Exception as e:
- fatal("Unexpected Exception when checking condition: %s" % e)
-
- func(*args)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
deleted file mode 100644
index 774d3322..00000000
--- a/cloudinit/config/cc_puppet.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-import os
-import socket
-
-from cloudinit import helpers
-from cloudinit import util
-
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
-
-
-def _autostart_puppet(log):
- # Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
- else:
- log.warn(("Sorry we do not know how to enable"
- " puppet services on this system"))
-
-
-def handle(name, cfg, cloud, log, _args):
- # If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'puppet' configuration found"), name)
- return
-
- puppet_cfg = cfg['puppet']
-
- # Start by installing the puppet package if necessary...
- install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
- version = util.get_cfg_option_str(puppet_cfg, 'version', None)
- if not install and version:
- log.warn(("Puppet install set false but version supplied,"
- " doing nothing."))
- elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
- cloud.distro.install_packages(('puppet', version))
-
- # ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
- # Add all sections from the conf object to puppet.conf
- contents = util.load_file(PUPPET_CONF_PATH)
- # Create object for reading puppet.conf values
- puppet_config = helpers.DefaultingConfigParser()
- # Read puppet.conf values from original file in order to be able to
- # mix the rest up. First clean them up
- # (TODO(harlowja) is this really needed??)
- cleaned_lines = [i.lstrip() for i in contents.splitlines()]
- cleaned_contents = '\n'.join(cleaned_lines)
- puppet_config.readfp(StringIO(cleaned_contents),
- filename=PUPPET_CONF_PATH)
- for (cfg_name, cfg) in puppet_cfg['conf'].items():
- # Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
- if cfg_name == 'ca_cert':
- # Puppet ssl sub-directory isn't created yet
- # Create it with the proper permissions and ownership
- util.ensure_dir(PUPPET_SSL_DIR, 0o771)
- util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
- util.ensure_dir(PUPPET_SSL_CERT_DIR)
- util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
- util.write_file(PUPPET_SSL_CERT_PATH, cfg)
- util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
- else:
- # Iterate throug the config items, we'll use ConfigParser.set
- # to overwrite or create new items as needed
- for (o, v) in cfg.items():
- if o == 'certname':
- # Expand %f as the fqdn
- # TODO(harlowja) should this use the cloud fqdn??
- v = v.replace("%f", socket.getfqdn())
- # Expand %i as the instance id
- v = v.replace("%i", cloud.get_instance_id())
- # certname needs to be downcased
- v = v.lower()
- puppet_config.set(cfg_name, o, v)
- # We got all our config as wanted we'll rename
- # the previous puppet.conf and create our new one
- util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
- util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
-
- # Set it up so it autostarts
- _autostart_puppet(log)
-
- # Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
deleted file mode 100644
index 2a2a9f59..00000000
--- a/cloudinit/config/cc_resizefs.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import os
-import stat
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def _resize_btrfs(mount_point, devpth):
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
-
-
-def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
-
-
-def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', devpth)
-
-
-def _resize_ufs(mount_point, devpth):
- return ('growfs', devpth)
-
-# Do not use a dictionary as these commands should be able to be used
-# for multiple filesystem types if possible, e.g. one command for
-# ext2, ext3 and ext4.
-RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
-]
-
-NOBLOCK = "noblock"
-
-
-def rootdev_from_cmdline(cmdline):
- found = None
- for tok in cmdline.split():
- if tok.startswith("root="):
- found = tok[5:]
- break
- if found is None:
- return None
-
- if found.startswith("/dev/"):
- return found
- if found.startswith("LABEL="):
- return "/dev/disk/by-label/" + found[len("LABEL="):]
- if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):]
-
- return "/dev/" + found
-
-
-def handle(name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = args[0]
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
-
- if not util.translate_bool(resize_root, addons=[NOBLOCK]):
- log.debug("Skipping module named %s, resizing disabled", name)
- return
-
- # TODO(harlowja) is the directory ok to be used??
- resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
- util.ensure_dir(resize_root_d)
-
- # TODO(harlowja): allow what is to be resized to be configurable??
- resize_what = "/"
- result = util.get_mount_info(resize_what, log)
- if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
- return
-
- (devpth, fs_type, mount_point) = result
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
- log.debug("resize_info: %s" % info)
-
- container = util.is_container()
-
- # Ensure the path is a block device.
- if (devpth == "/dev/root" and not os.path.exists(devpth) and
- not container):
- devpth = rootdev_from_cmdline(util.get_cmdline())
- if devpth is None:
- log.warn("Unable to find device '/dev/root'")
- return
- log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
-
- try:
- statret = os.stat(devpth)
- except OSError as exc:
- if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpth, info)
- elif exc.errno == errno.ENOENT:
- log.warn("Device '%s' did not exist. cannot resize: %s",
- devpth, info)
- else:
- raise exc
- return
-
- if not os.access(devpth, os.W_OK):
- if container:
- log.debug("'%s' not writable in container. cannot resize: %s",
- devpth, info)
- else:
- log.warn("'%s' not writable. cannot resize: %s", devpth, info)
- return
-
- if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
- if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpth, info))
- else:
- log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpth, info))
- return
-
- resizer = None
- fstype_lc = fs_type.lower()
- for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
- if fstype_lc.startswith(pfix):
- resizer = root_cmd
- break
-
- if not resizer:
- log.warn("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
- return
-
- resize_cmd = resizer(resize_what, devpth)
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
- ' '.join(resize_cmd))
-
- if resize_root == NOBLOCK:
- # Fork to a child that will run
- # the resize command
- util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
- else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
-
- action = 'Resized'
- if resize_root == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
- resize_root)
-
-
-def do_resize(resize_cmd, log):
- try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
- util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
- raise
- # TODO(harlowja): Should we add a fsck check after this to make
- # sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
deleted file mode 100644
index 71d9e3a7..00000000
--- a/cloudinit/config/cc_resolv_conf.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Craig Tracey
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Craig Tracey <craigtracey@gmail.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Note:
-# This module is intended to manage resolv.conf in environments where
-# early configuration of resolv.conf is necessary for further
-# bootstrapping and/or where configuration management such as puppet or
-# chef own dns configuration. As Debian/Ubuntu will, by default, utilize
-# resovlconf, and similarly RedHat will use sysconfig, this module is
-# likely to be of little use unless those are configured correctly.
-#
-# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
-# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS
-# be configured via the standard /etc/network/interfaces configuration
-# file.
-#
-#
-# Usage Example:
-#
-# #cloud-config
-# manage_resolv_conf: true
-#
-# resolv_conf:
-# nameservers: ['8.8.4.4', '8.8.8.8']
-# searchdomains:
-# - foo.example.com
-# - bar.example.com
-# domain: example.com
-# options:
-# rotate: true
-# timeout: 1
-#
-
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import templater
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-distros = ['fedora', 'rhel', 'sles']
-
-
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
- flags = []
- false_flags = []
-
- if 'options' in params:
- for key, val in params['options'].items():
- if isinstance(val, bool):
- if val:
- flags.append(key)
- else:
- false_flags.append(key)
-
- for flag in flags + false_flags:
- del params['options'][flag]
-
- if not params.get('options'):
- params['options'] = {}
-
- params['flags'] = flags
- LOG.debug("Writing resolv.conf from template %s" % template_fn)
- templater.render_to_file(template_fn, target_fname, params)
-
-
-def handle(name, cfg, cloud, log, _args):
- """
- Handler for resolv.conf
-
- @param name: The module name "resolv-conf" from cloud.cfg
- @param cfg: A nested dict containing the entire cloud config contents.
- @param cloud: The L{CloudInit} object in use.
- @param log: Pre-initialized Python logger object to use for logging.
- @param args: Any module arguments from cloud.cfg
- """
- if "manage_resolv_conf" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'manage_resolv_conf' key in configuration"), name)
- return
-
- if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(("Skipping module named %s,"
- " 'manage_resolv_conf' present but set to False"), name)
- return
-
- if "resolv_conf" not in cfg:
- log.warn("manage_resolv_conf True but no parameters provided!")
-
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
- log.warn("No template found, not rendering /etc/resolv.conf")
- return
-
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
- return
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
deleted file mode 100644
index 3a113aea..00000000
--- a/cloudinit/config/cc_rh_subscription.py
+++ /dev/null
@@ -1,408 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Red Hat, Inc.
-#
-# Author: Brent Baude <bbaude@redhat.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, _cloud, log, _args):
- sm = SubscriptionManager(cfg)
- sm.log = log
- if not sm.is_configured():
- log.debug("%s: module not configured.", name)
- return None
-
- if not sm.is_registered():
- try:
- verify, verify_msg = sm._verify_keys()
- if verify is not True:
- raise SubscriptionError(verify_msg)
- cont = sm.rhn_register()
- if not cont:
- raise SubscriptionError("Registration failed or did not "
- "run completely")
-
- # Splitting up the registration, auto-attach, and servicelevel
- # commands because the error codes, messages from subman are not
- # specific enough.
-
- # Attempt to change the service level
- if sm.auto_attach and sm.servicelevel is not None:
- if not sm._set_service_level():
- raise SubscriptionError("Setting of service-level "
- "failed")
- else:
- sm.log.debug("Completed auto-attach with service level")
- elif sm.auto_attach:
- if not sm._set_auto_attach():
- raise SubscriptionError("Setting auto-attach failed")
- else:
- sm.log.debug("Completed auto-attach")
-
- if sm.pools is not None:
- if not isinstance(sm.pools, list):
- pool_fail = "Pools must in the format of a list"
- raise SubscriptionError(pool_fail)
-
- return_stat = sm.addPool(sm.pools)
- if not return_stat:
- raise SubscriptionError("Unable to attach pools {0}"
- .format(sm.pools))
- if (sm.enable_repo is not None) or (sm.disable_repo is not None):
- return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
- if not return_stat:
- raise SubscriptionError("Unable to add or remove repos")
- sm.log_success("rh_subscription plugin completed successfully")
- except SubscriptionError as e:
- sm.log_warn(str(e))
- sm.log_warn("rh_subscription plugin did not complete successfully")
- else:
- sm.log_success("System is already registered")
-
-
-class SubscriptionError(Exception):
- pass
-
-
-class SubscriptionManager(object):
- valid_rh_keys = ['org', 'activation-key', 'username', 'password',
- 'disable-repo', 'enable-repo', 'add-pool',
- 'rhsm-baseurl', 'server-hostname',
- 'auto-attach', 'service-level']
-
- def __init__(self, cfg):
- self.cfg = cfg
- self.rhel_cfg = self.cfg.get('rh_subscription', {})
- self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
- self.server_hostname = self.rhel_cfg.get('server-hostname')
- self.pools = self.rhel_cfg.get('add-pool')
- self.activation_key = self.rhel_cfg.get('activation-key')
- self.org = self.rhel_cfg.get('org')
- self.userid = self.rhel_cfg.get('username')
- self.password = self.rhel_cfg.get('password')
- self.auto_attach = self.rhel_cfg.get('auto-attach')
- self.enable_repo = self.rhel_cfg.get('enable-repo')
- self.disable_repo = self.rhel_cfg.get('disable-repo')
- self.servicelevel = self.rhel_cfg.get('service-level')
- self.subman = ['subscription-manager']
-
- def log_success(self, msg):
- '''Simple wrapper for logging info messages. Useful for unittests'''
- self.log.info(msg)
-
- def log_warn(self, msg):
- '''Simple wrapper for logging warning messages. Useful for unittests'''
- self.log.warn(msg)
-
- def _verify_keys(self):
- '''
- Checks that the keys in the rh_subscription dict from the user-data
- are what we expect.
- '''
-
- for k in self.rhel_cfg:
- if k not in self.valid_rh_keys:
- bad_key = "{0} is not a valid key for rh_subscription. "\
- "Valid keys are: "\
- "{1}".format(k, ', '.join(self.valid_rh_keys))
- return False, bad_key
-
- # Check for bad auto-attach value
- if (self.auto_attach is not None) and \
- not (util.is_true(self.auto_attach) or
- util.is_false(self.auto_attach)):
- not_bool = "The key auto-attach must be a boolean value "\
- "(True/False "
- return False, not_bool
-
- if (self.servicelevel is not None) and ((not self.auto_attach) or
- (util.is_false(str(self.auto_attach)))):
- no_auto = ("The service-level key must be used in conjunction "
- "with the auto-attach key. Please re-run with "
- "auto-attach: True")
- return False, no_auto
- return True, None
-
- def is_registered(self):
- '''
- Checks if the system is already registered and returns
- True if so, else False
- '''
- cmd = ['identity']
-
- try:
- self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- return False
-
- return True
-
- def _sub_man_cli(self, cmd, logstring_val=False):
- '''
- Uses the prefered cloud-init subprocess def of util.subp
- and runs subscription-manager. Breaking this to a
- separate function for later use in mocking and unittests
- '''
- cmd = self.subman + cmd
- return util.subp(cmd, logstring=logstring_val)
-
- def rhn_register(self):
- '''
- Registers the system by userid and password or activation key
- and org. Returns True when successful False when not.
- '''
-
- if (self.activation_key is not None) and (self.org is not None):
- # register by activation key
- cmd = ['register', '--activationkey={0}'.
- format(self.activation_key), '--org={0}'.format(self.org)]
-
- # If the baseurl and/or server url are passed in, we register
- # with them.
-
- if self.rhsm_baseurl is not None:
- cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
-
- if self.server_hostname is not None:
- cmd.append("--serverurl={0}".format(self.server_hostname))
-
- try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
- except util.ProcessExecutionError as e:
- if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
- return False
-
- elif (self.userid is not None) and (self.password is not None):
- # register by username and password
- cmd = ['register', '--username={0}'.format(self.userid),
- '--password={0}'.format(self.password)]
-
- # If the baseurl and/or server url are passed in, we register
- # with them.
-
- if self.rhsm_baseurl is not None:
- cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
-
- if self.server_hostname is not None:
- cmd.append("--serverurl={0}".format(self.server_hostname))
-
- # Attempting to register the system only
- try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
- except util.ProcessExecutionError as e:
- if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
- return False
-
- else:
- self.log_warn("Unable to register system due to incomplete "
- "information.")
- self.log_warn("Use either activationkey and org *or* userid "
- "and password")
- return False
-
- reg_id = return_out.split("ID: ")[1].rstrip()
- self.log.debug("Registered successfully with ID {0}".format(reg_id))
- return True
-
- def _set_service_level(self):
- cmd = ['attach', '--auto', '--servicelevel={0}'
- .format(self.servicelevel)]
-
- try:
- return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
- if e.stdout.rstrip() != '':
- for line in e.stdout.split("\n"):
- if line is not '':
- self.log_warn(line)
- else:
- self.log_warn("Setting the service level failed with: "
- "{0}".format(e.stderr.strip()))
- return False
- for line in return_out.split("\n"):
- if line is not "":
- self.log.debug(line)
- return True
-
- def _set_auto_attach(self):
- cmd = ['attach', '--auto']
- try:
- return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- self.log_warn("Auto-attach failed with: "
- "{0}]".format(return_err.strip()))
- return False
- for line in return_out.split("\n"):
- if line is not "":
- self.log.debug(line)
- return True
-
- def _getPools(self):
- '''
- Gets the list pools for the active subscription and returns them
- in list form.
- '''
- available = []
- consumed = []
-
- # Get all available pools
- cmd = ['list', '--available', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
- available = (results.rstrip()).split("\n")
-
- # Get all consumed pools
- cmd = ['list', '--consumed', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
- consumed = (results.rstrip()).split("\n")
-
- return available, consumed
-
- def _getRepos(self):
- '''
- Obtains the current list of active yum repositories and returns
- them in list form.
- '''
-
- cmd = ['repos', '--list-enabled']
- return_out, return_err = self._sub_man_cli(cmd)
- active_repos = []
- for repo in return_out.split("\n"):
- if "Repo ID:" in repo:
- active_repos.append((repo.split(':')[1]).strip())
-
- cmd = ['repos', '--list-disabled']
- return_out, return_err = self._sub_man_cli(cmd)
-
- inactive_repos = []
- for repo in return_out.split("\n"):
- if "Repo ID:" in repo:
- inactive_repos.append((repo.split(':')[1]).strip())
- return active_repos, inactive_repos
-
- def addPool(self, pools):
- '''
- Takes a list of subscription pools and "attaches" them to the
- current subscription
- '''
-
- # An empty list was passed
- if len(pools) == 0:
- self.log.debug("No pools to attach")
- return True
-
- pool_available, pool_consumed = self._getPools()
- pool_list = []
- cmd = ['attach']
- for pool in pools:
- if (pool not in pool_consumed) and (pool in pool_available):
- pool_list.append('--pool={0}'.format(pool))
- else:
- self.log_warn("Pool {0} is not available".format(pool))
- if len(pool_list) > 0:
- cmd.extend(pool_list)
- try:
- self._sub_man_cli(cmd)
- self.log.debug("Attached the following pools to your "
- "system: %s" % (", ".join(pool_list))
- .replace('--pool=', ''))
- return True
- except util.ProcessExecutionError as e:
- self.log_warn("Unable to attach pool {0} "
- "due to {1}".format(pool, e))
- return False
-
- def update_repos(self, erepos, drepos):
- '''
- Takes a list of yum repo ids that need to be disabled or enabled; then
- it verifies if they are already enabled or disabled and finally
- executes the action to disable or enable
- '''
-
- if (erepos is not None) and (not isinstance(erepos, list)):
- self.log_warn("Repo IDs must in the format of a list.")
- return False
-
- if (drepos is not None) and (not isinstance(drepos, list)):
- self.log_warn("Repo IDs must in the format of a list.")
- return False
-
- # Bail if both lists are not populated
- if (len(erepos) == 0) and (len(drepos) == 0):
- self.log.debug("No repo IDs to enable or disable")
- return True
-
- active_repos, inactive_repos = self._getRepos()
- # Creating a list of repoids to be enabled
- enable_list = []
- enable_list_fail = []
- for repoid in erepos:
- if (repoid in inactive_repos):
- enable_list.append("--enable={0}".format(repoid))
- else:
- enable_list_fail.append(repoid)
-
- # Creating a list of repoids to be disabled
- disable_list = []
- disable_list_fail = []
- for repoid in drepos:
- if repoid in active_repos:
- disable_list.append("--disable={0}".format(repoid))
- else:
- disable_list_fail.append(repoid)
-
- # Logging any repos that are already enabled or disabled
- if len(enable_list_fail) > 0:
- for fail in enable_list_fail:
- # Check if the repo exists or not
- if fail in active_repos:
- self.log.debug("Repo {0} is already enabled".format(fail))
- else:
- self.log_warn("Repo {0} does not appear to "
- "exist".format(fail))
- if len(disable_list_fail) > 0:
- for fail in disable_list_fail:
- self.log.debug("Repo {0} not disabled "
- "because it is not enabled".format(fail))
-
- cmd = ['repos']
- if len(enable_list) > 0:
- cmd.extend(enable_list)
- if len(disable_list) > 0:
- cmd.extend(disable_list)
-
- try:
- self._sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
- self.log_warn("Unable to alter repos due to {0}".format(e))
- return False
-
- if len(enable_list) > 0:
- self.log.debug("Enabled the following repos: %s" %
- (", ".join(enable_list)).replace('--enable=', ''))
- if len(disable_list) > 0:
- self.log.debug("Disabled the following repos: %s" %
- (", ".join(disable_list)).replace('--disable=', ''))
- return True
-
- def is_configured(self):
- return bool((self.userid and self.password) or self.activation_key)
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
deleted file mode 100644
index 8118fac4..00000000
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# The purpose of this script is to allow cloud-init to consume
-# rightscale style userdata. rightscale user data is key-value pairs
-# in a url-query-string like format.
-#
-# for cloud-init support, there will be a key named
-# 'CLOUD_INIT_REMOTE_HOOK'.
-#
-# This cloud-config module will
-# - read the blob of data from raw user data, and parse it as key/value
-# - for each key that is found, download the content to
-# the local instance/scripts directory and set them executable.
-# - the files in that directory will be run by the user-scripts module
-# Therefore, this must run before that.
-#
-#
-
-import os
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-from six.moves.urllib_parse import parse_qs
-
-frequency = PER_INSTANCE
-
-MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
-
-
-def handle(name, _cfg, cloud, log, _args):
- try:
- ud = cloud.get_userdata_raw()
- except Exception:
- log.debug("Failed to get raw userdata in module %s", name)
- return
-
- try:
- mdict = parse_qs(ud)
- if not mdict or MY_HOOKNAME not in mdict:
- log.debug(("Skipping module %s, "
- "did not find %s in parsed"
- " raw userdata"), name, MY_HOOKNAME)
- return
- except Exception:
- util.logexc(log, "Failed to parse query string %s into a dictionary",
- ud)
- raise
-
- wrote_fns = []
- captured_excps = []
-
- # These will eventually be then ran by the cc_scripts_user
- # TODO(harlowja): maybe this should just be a new user data handler??
- # Instead of a late module that acts like a user data handler?
- scripts_d = cloud.get_ipath_cur('scripts')
- urls = mdict[MY_HOOKNAME]
- for (i, url) in enumerate(urls):
- fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
- try:
- resp = uhelp.readurl(url)
- # Ensure its a valid http response (and something gotten)
- if resp.ok() and resp.contents:
- util.write_file(fname, resp, mode=0o700)
- wrote_fns.append(fname)
- except Exception as e:
- captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
- fname)
-
- if wrote_fns:
- log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
-
- if len(wrote_fns) != len(urls):
- skipped = len(urls) - len(wrote_fns)
- log.debug("%s urls were skipped or failed", skipped)
-
- if captured_excps:
- log.warn("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
- raise captured_excps[-1]
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
deleted file mode 100644
index b8642d65..00000000
--- a/cloudinit/config/cc_rsyslog.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# vi: ts=4 expandtab syntax=python
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-rsyslog module allows configuration of syslog logging via rsyslog
-Configuration is done under the cloud-config top level 'rsyslog'.
-
-Under 'rsyslog' you can define:
- - configs: [default=[]]
- this is a list. entries in it are a string or a dictionary.
- each entry has 2 parts:
- * content
- * filename
- if the entry is a string, then it is assigned to 'content'.
- for each entry, content is written to the provided filename.
- if filename is not provided, its default is read from 'config_filename'
-
- Content here can be any valid rsyslog configuration. No format
- specific format is enforced.
-
- For simply logging to an existing remote syslog server, via udp:
- configs: ["*.* @192.168.1.1"]
-
- - remotes: [default={}]
- This is a dictionary of name / value pairs.
- In comparison to 'config's, it is more focused in that it only supports
- remote syslog configuration. It is not rsyslog specific, and could
- convert to other syslog implementations.
-
- Each entry in remotes is a 'name' and a 'value'.
- * name: an string identifying the entry. good practice would indicate
- using a consistent and identifiable string for the producer.
- For example, the MAAS service could use 'maas' as the key.
- * value consists of the following parts:
- * optional filter for log messages
- default if not present: *.*
- * optional leading '@' or '@@' (indicates udp or tcp respectively).
- default if not present (udp): @
- This is rsyslog format for that. if not present, is '@'.
- * ipv4 or ipv6 or hostname
- ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
- * optional port
- port defaults to 514
-
- - config_filename: [default=20-cloud-config.conf]
- this is the file name to use if none is provided in a config entry.
-
- - config_dir: [default=/etc/rsyslog.d]
- this directory is used for filenames that are not absolute paths.
-
- - service_reload_command: [default="auto"]
- this command is executed if files have been written and thus the syslog
- daemon needs to be told.
-
-Note, since cloud-init 0.5 a legacy version of rsyslog config has been
-present and is still supported. See below for the mappings between old
-value and new value:
- old value -> new value
- 'rsyslog' -> rsyslog/configs
- 'rsyslog_filename' -> rsyslog/config_filename
- 'rsyslog_dir' -> rsyslog/config_dir
-
-the legacy config does not support 'service_reload_command'.
-
-Example config:
- #cloud-config
- rsyslog:
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- remotes:
- maas: "192.168.1.1"
- juju: "10.0.4.1"
- config_dir: config_dir
- config_filename: config_filename
- service_reload_command: [your, syslog, restart, command]
-
-Example Legacy config:
- #cloud-config
- rsyslog:
- - "*.* @@192.158.1.1"
- rsyslog_dir: /etc/rsyslog-config.d/
- rsyslog_filename: 99-local.conf
-"""
-
-import os
-import re
-import six
-
-from cloudinit import log as logging
-from cloudinit import util
-
-DEF_FILENAME = "20-cloud-config.conf"
-DEF_DIR = "/etc/rsyslog.d"
-DEF_RELOAD = "auto"
-DEF_REMOTES = {}
-
-KEYNAME_CONFIGS = 'configs'
-KEYNAME_FILENAME = 'config_filename'
-KEYNAME_DIR = 'config_dir'
-KEYNAME_RELOAD = 'service_reload_command'
-KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
-KEYNAME_LEGACY_DIR = 'rsyslog_dir'
-KEYNAME_REMOTES = 'remotes'
-
-LOG = logging.getLogger(__name__)
-
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
-HOST_PORT_RE = re.compile(
- r'^(?P<proto>[@]{0,2})'
- '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- '([:](?P<port>[0-9]+))?$')
-
-
-def reload_syslog(command=DEF_RELOAD, systemd=False):
- service = 'rsyslog'
- if command == DEF_RELOAD:
- if systemd:
- cmd = ['systemctl', 'reload-or-try-restart', service]
- else:
- cmd = ['service', service, 'restart']
- else:
- cmd = command
- util.subp(cmd, capture=True)
-
-
-def load_config(cfg):
- # return an updated config with entries of the correct type
- # support converting the old top level format into new format
- mycfg = cfg.get('rsyslog', {})
-
- if isinstance(cfg.get('rsyslog'), list):
- mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
- if KEYNAME_LEGACY_FILENAME in cfg:
- mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
- if KEYNAME_LEGACY_DIR in cfg:
- mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR]
-
- fillup = (
- (KEYNAME_CONFIGS, [], list),
- (KEYNAME_DIR, DEF_DIR, six.string_types),
- (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
- (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
- (KEYNAME_REMOTES, DEF_REMOTES, dict))
-
- for key, default, vtypes in fillup:
- if key not in mycfg or not isinstance(mycfg[key], vtypes):
- mycfg[key] = default
-
- return mycfg
-
-
-def apply_rsyslog_changes(configs, def_fname, cfg_dir):
- # apply the changes in 'configs' to the paths in def_fname and cfg_dir
- # return a list of the files changed
- files = []
- for cur_pos, ent in enumerate(configs):
- if isinstance(ent, dict):
- if "content" not in ent:
- LOG.warn("No 'content' entry in config entry %s", cur_pos + 1)
- continue
- content = ent['content']
- filename = ent.get("filename", def_fname)
- else:
- content = ent
- filename = def_fname
-
- filename = filename.strip()
- if not filename:
- LOG.warn("Entry %s has an empty filename", cur_pos + 1)
- continue
-
- filename = os.path.join(cfg_dir, filename)
-
- # Truncate filename first time you see it
- omode = "ab"
- if filename not in files:
- omode = "wb"
- files.append(filename)
-
- try:
- endl = ""
- if not content.endswith("\n"):
- endl = "\n"
- util.write_file(filename, content + endl, omode=omode)
- except Exception:
- util.logexc(LOG, "Failed to write to %s", filename)
-
- return files
-
-
-def parse_remotes_line(line, name=None):
- try:
- data, comment = COMMENT_RE.split(line)
- comment = comment.strip()
- except ValueError:
- data, comment = (line, None)
-
- toks = data.strip().split()
- match = None
- if len(toks) == 1:
- host_port = data
- elif len(toks) == 2:
- match, host_port = toks
- else:
- raise ValueError("line had multiple spaces: %s" % data)
-
- toks = HOST_PORT_RE.match(host_port)
-
- if not toks:
- raise ValueError("Invalid host specification '%s'" % host_port)
-
- proto = toks.group('proto')
- addr = toks.group('addr') or toks.group('bracket_addr')
- port = toks.group('port')
-
- if addr.startswith("[") and not addr.endswith("]"):
- raise ValueError("host spec had invalid brackets: %s" % addr)
-
- if comment and not name:
- name = comment
-
- t = SyslogRemotesLine(name=name, match=match, proto=proto,
- addr=addr, port=port)
- t.validate()
- return t
-
-
-class SyslogRemotesLine(object):
- def __init__(self, name=None, match=None, proto=None, addr=None,
- port=None):
- if not match:
- match = "*.*"
- self.name = name
- self.match = match
- if not proto:
- proto = "udp"
- if proto == "@":
- proto = "udp"
- elif proto == "@@":
- proto = "tcp"
- self.proto = proto
-
- self.addr = addr
- if port:
- self.port = int(port)
- else:
- self.port = None
-
- def validate(self):
- if self.port:
- try:
- int(self.port)
- except ValueError:
- raise ValueError("port '%s' is not an integer" % self.port)
-
- if not self.addr:
- raise ValueError("address is required")
-
- def __repr__(self):
- return "[name=%s match=%s proto=%s address=%s port=%s]" % (
- self.name, self.match, self.proto, self.addr, self.port
- )
-
- def __str__(self):
- buf = self.match + " "
- if self.proto == "udp":
- buf += "@"
- elif self.proto == "tcp":
- buf += "@@"
-
- if ":" in self.addr:
- buf += "[" + self.addr + "]"
- else:
- buf += self.addr
-
- if self.port:
- buf += ":%s" % self.port
-
- if self.name:
- buf += " # %s" % self.name
- return buf
-
-
-def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
- if not remotes:
- return None
- lines = []
- if header is not None:
- lines.append(header)
- for name, line in remotes.items():
- if not line:
- continue
- try:
- lines.append(str(parse_remotes_line(line, name=name)))
- except ValueError as e:
- LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
- if footer is not None:
- lines.append(footer)
- return '\n'.join(lines) + "\n"
-
-
-def handle(name, cfg, cloud, log, _args):
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
- return
-
- mycfg = load_config(cfg)
- configs = mycfg[KEYNAME_CONFIGS]
-
- if mycfg[KEYNAME_REMOTES]:
- configs.append(
- remotes_to_rsyslog_cfg(
- mycfg[KEYNAME_REMOTES],
- header="# begin remotes",
- footer="# end remotes",
- ))
-
- if not mycfg['configs']:
- log.debug("Empty config rsyslog['configs'], nothing to do")
- return
-
- changes = apply_rsyslog_changes(
- configs=mycfg[KEYNAME_CONFIGS],
- def_fname=mycfg[KEYNAME_FILENAME],
- cfg_dir=mycfg[KEYNAME_DIR])
-
- if not changes:
- log.debug("restart of syslog not necessary, no changes made")
- return
-
- try:
- restarted = reload_syslog(
- command=mycfg[KEYNAME_RELOAD],
- systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
- restarted = False
- log.warn("Failed to reload syslog", e)
-
- if restarted:
- # This only needs to run if we *actually* restarted
- # syslog above.
- cloud.cycle_logging()
- # This should now use rsyslog if
- # the logging was setup to use it...
- log.debug("%s configured %s files", name, changes)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
deleted file mode 100644
index bc09d38c..00000000
--- a/cloudinit/config/cc_runcmd.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, _args):
- if "runcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'runcmd' key in configuration"), name)
- return
-
- out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
- cmd = cfg["runcmd"]
- try:
- content = util.shellify(cmd)
- util.write_file(out_fn, content, 0o700)
- except Exception:
- util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
deleted file mode 100644
index f5786a31..00000000
--- a/cloudinit/config/cc_salt_minion.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Jeff Bauer <jbauer@rubic.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-# Note: see http://saltstack.org/topics/installation/
-
-
-def handle(name, cfg, cloud, log, _args):
- # If there isn't a salt key in the configuration don't do anything
- if 'salt_minion' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'salt_minion' key in configuration"), name)
- return
-
- salt_cfg = cfg['salt_minion']
-
- # Start by installing the salt package ...
- cloud.distro.install_packages(('salt-minion',))
-
- # Ensure we can configure files at the right dir
- config_dir = salt_cfg.get("config_dir", '/etc/salt')
- util.ensure_dir(config_dir)
-
- # ... and then update the salt configuration
- if 'conf' in salt_cfg:
- # Add all sections from the conf object to /etc/salt/minion
- minion_config = os.path.join(config_dir, 'minion')
- minion_data = util.yaml_dumps(salt_cfg.get('conf'))
- util.write_file(minion_config, minion_data)
-
- # ... copy the key pair if specified
- if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
- with util.umask(0o77):
- util.ensure_dir(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- util.write_file(pub_name, salt_cfg['public_key'])
- util.write_file(pem_name, salt_cfg['private_key'])
-
- # restart salt-minion. 'service' will start even if not started. if it
- # was started, it needs to be restarted for config change.
- util.subp(['service', 'salt-minion', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
deleted file mode 100644
index ee3b6c9f..00000000
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-SCRIPT_SUBDIR = 'per-boot'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
deleted file mode 100644
index c0d62b12..00000000
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'per-instance'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
deleted file mode 100644
index ecb527f6..00000000
--- a/cloudinit/config/cc_scripts_per_once.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ONCE
-
-frequency = PER_ONCE
-
-SCRIPT_SUBDIR = 'per-once'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
deleted file mode 100644
index 699857d1..00000000
--- a/cloudinit/config/cc_scripts_user.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'scripts'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # This is written to by the user data handlers
- # Ie, any custom shell scripts that come down
- # go here...
- runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
deleted file mode 100644
index 80bf10ff..00000000
--- a/cloudinit/config/cc_scripts_vendor.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'vendor'
-
-
-def handle(name, cfg, cloud, log, _args):
- # This is written to by the vendor data handlers
- # any vendor data shell scripts get placed in runparts_path
- runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
- SCRIPT_SUBDIR)
-
- prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
-
- try:
- util.runparts(runparts_path, exe_prefix=prefix)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
deleted file mode 100644
index 5085c23a..00000000
--- a/cloudinit/config/cc_seed_random.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Yahoo! Inc.
-# Copyright (C) 2014 Canonical, Ltd
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Dustin Kirkland <kirkland@ubuntu.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-
-from six import BytesIO
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-LOG = logging.getLogger(__name__)
-
-
-def _decode(data, encoding=None):
- if not data:
- return b''
- if not encoding or encoding.lower() in ['raw']:
- return util.encode_text(data)
- elif encoding.lower() in ['base64', 'b64']:
- return base64.b64decode(data)
- elif encoding.lower() in ['gzip', 'gz']:
- return util.decomp_gzip(data, quiet=False, decode=None)
- else:
- raise IOError("Unknown random_seed encoding: %s" % (encoding))
-
-
-def handle_random_seed_command(command, required, env=None):
- if not command and required:
- raise ValueError("no command found but required=true")
- elif not command:
- LOG.debug("no command provided")
- return
-
- cmd = command[0]
- if not util.which(cmd):
- if required:
- raise ValueError("command '%s' not found but required=true", cmd)
- else:
- LOG.debug("command '%s' not found for seed_command", cmd)
- return
- util.subp(command, env=env, capture=False)
-
-
-def handle(name, cfg, cloud, log, _args):
- mycfg = cfg.get('random_seed', {})
- seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', b'')
-
- seed_buf = BytesIO()
- if seed_data:
- seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
-
- # 'random_seed' is set up by Azure datasource, and comes already in
- # openstack meta_data.json
- metadata = cloud.datasource.metadata
- if metadata and 'random_seed' in metadata:
- seed_buf.write(util.encode_text(metadata['random_seed']))
-
- seed_data = seed_buf.getvalue()
- if len(seed_data):
- log.debug("%s: adding %s bytes of random seed entropy to %s", name,
- len(seed_data), seed_path)
- util.append_file(seed_path, seed_data)
-
- command = mycfg.get('command', None)
- req = mycfg.get('command_required', False)
- try:
- env = os.environ.copy()
- env['RANDOM_SEED_FILE'] = seed_path
- handle_random_seed_command(command=command, required=req, env=env)
- except ValueError as e:
- log.warn("handling random command [%s] failed: %s", command, e)
- raise e
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
deleted file mode 100644
index f43d8d5a..00000000
--- a/cloudinit/config/cc_set_hostname.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
- return
-
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
- cloud.distro.set_hostname(hostname, fqdn)
- except Exception:
- util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn,
- hostname)
- raise
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
deleted file mode 100644
index 5c8c23b8..00000000
--- a/cloudinit/config/cc_set_passwords.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-from string import ascii_letters, digits
-
-# We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits
- if x not in 'loLOI01']))
-
-
-def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
- # if run from command line, and give args, wipe the chpasswd['list']
- password = args[0]
- if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
- del cfg['chpasswd']['list']
- else:
- password = util.get_cfg_option_str(cfg, "password", None)
-
- expire = True
- plist = None
-
- if 'chpasswd' in cfg:
- chfg = cfg['chpasswd']
- plist = util.get_cfg_option_str(chfg, 'list', plist)
- expire = util.get_cfg_option_bool(chfg, 'expire', expire)
-
- if not plist and password:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- if user:
- plist = "%s:%s" % (user, password)
- else:
- log.warn("No default or defined user to change password for.")
-
- errors = []
- if plist:
- plist_in = []
- randlist = []
- users = []
- for line in plist.splitlines():
- u, p = line.split(':', 1)
- if p == "R" or p == "RANDOM":
- p = rand_user_password()
- randlist.append("%s:%s" % (u, p))
- plist_in.append("%s:%s" % (u, p))
- users.append(u)
-
- ch_in = '\n'.join(plist_in) + '\n'
- try:
- log.debug("Changing password for %s:", users)
- util.subp(['chpasswd'], ch_in)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set passwords with chpasswd for %s",
- users)
-
- if len(randlist):
- blurb = ("Set the following 'random' passwords\n",
- '\n'.join(randlist))
- sys.stderr.write("%s\n%s\n" % blurb)
-
- if expire:
- expired_users = []
- for u in users:
- try:
- util.subp(['passwd', '--expire', u])
- expired_users.append(u)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set 'expire' for %s", u)
- if expired_users:
- log.debug("Expired passwords for: %s users", expired_users)
-
- change_pwauth = False
- pw_auth = None
- if 'ssh_pwauth' in cfg:
- if util.is_true(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'yes'
- elif util.is_false(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'no'
- elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not str(cfg['ssh_pwauth']).strip():
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not cfg['ssh_pwauth']:
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- else:
- msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
- util.logexc(log, msg)
-
- if change_pwauth:
- replaced_auth = False
-
- # See: man sshd_config
- old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
- new_lines = []
- i = 0
- for (i, line) in enumerate(old_lines):
- # Keywords are case-insensitive and arguments are case-sensitive
- if line.key == 'passwordauthentication':
- log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
- replaced_auth = True
- line.value = pw_auth
- new_lines.append(line)
-
- if not replaced_auth:
- log.debug("Adding new auth line %s", i + 1)
- replaced_auth = True
- new_lines.append(ssh_util.SshdConfigLine('',
- 'PasswordAuthentication',
- pw_auth))
-
- lines = [str(l) for l in new_lines]
- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
-
- try:
- cmd = cloud.distro.init_cmd # Default service
- cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
- cmd.append('restart')
- if 'systemctl' in cmd: # Switch action ordering
- cmd[1], cmd[2] = cmd[2], cmd[1]
- cmd = filter(None, cmd) # Remove empty arguments
- util.subp(cmd)
- log.debug("Restarted the ssh daemon")
- except Exception:
- util.logexc(log, "Restarting of the ssh daemon failed")
-
- if len(errors):
- log.debug("%s errors occured, re-raising the last one", len(errors))
- raise errors[-1]
-
-
-def rand_user_password(pwlen=9):
- return util.rand_str(pwlen, select_from=PW_SET)
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
deleted file mode 100644
index 1a485ee6..00000000
--- a/cloudinit/config/cc_snappy.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# vi: ts=4 expandtab
-#
-"""
-snappy modules allows configuration of snappy.
-Example config:
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-
- - ssh_enabled:
- This controls the system's ssh service. The default value is 'auto'.
- True: enable ssh service
- False: disable ssh service
- auto: enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
- - snap installation and config
- The above would install 'etcd', and then install 'pkg2.smoser' with a
- '<config-file>' argument where 'config-file' has 'config-blob' inside it.
- If 'pkgname' is installed already, then 'snappy config pkgname <file>'
- will be called where 'file' has 'pkgname-config-blob' as its content.
-
- Entries in 'config' can be namespaced or non-namespaced for a package.
- In either case, the config provided to snappy command is non-namespaced.
- The package name is provided as it appears.
-
- If 'packages_dir' has files in it that end in '.snap', then they are
- installed. Given 3 files:
- <packages_dir>/foo.snap
- <packages_dir>/foo.config
- <packages_dir>/bar.snap
- cloud-init will invoke:
- snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- snappy install <packages_dir>/bar.snap
-
- Note, that if provided a 'config' entry for 'ubuntu-core', then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import glob
-import os
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snappy"
-NAMESPACE_DELIM = '.'
-
-BUILTIN_CFG = {
- 'packages': [],
- 'packages_dir': '/writable/user-data/cloud-init/snaps',
- 'ssh_enabled': "auto",
- 'system_snappy': "auto",
- 'config': {},
-}
-
-
-def parse_filename(fname):
- fname = os.path.basename(fname)
- fname_noext = fname.rpartition(".")[0]
- name = fname_noext.partition("_")[0]
- shortname = name.partition(".")[0]
- return(name, shortname, fname_noext)
-
-
-def get_fs_package_ops(fspath):
- if not fspath:
- return []
- ops = []
- for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
- (name, shortname, fname_noext) = parse_filename(snapfile)
- cfg = None
- for cand in (fname_noext, name, shortname):
- fpcand = os.path.sep.join([fspath, cand]) + ".config"
- if os.path.isfile(fpcand):
- cfg = fpcand
- break
- ops.append(makeop('install', name, config=None,
- path=snapfile, cfgfile=cfg))
- return ops
-
-
-def makeop(op, name, config=None, path=None, cfgfile=None):
- return({'op': op, 'name': name, 'config': config, 'path': path,
- 'cfgfile': cfgfile})
-
-
-def get_package_config(configs, name):
- # load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
- # over short name entry (config-example)
- if name in configs:
- return configs[name]
- return configs.get(name.partition(NAMESPACE_DELIM)[0])
-
-
-def get_package_ops(packages, configs, installed=None, fspath=None):
- # get the install an config operations that should be done
- if installed is None:
- installed = read_installed_packages()
- short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
-
- if not packages:
- packages = []
- if not configs:
- configs = {}
-
- ops = []
- ops += get_fs_package_ops(fspath)
-
- for name in packages:
- ops.append(makeop('install', name, get_package_config(configs, name)))
-
- to_install = [f['name'] for f in ops]
- short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
-
- for name in configs:
- if name in to_install:
- continue
- shortname = name.partition(NAMESPACE_DELIM)[0]
- if shortname in short_to_install:
- continue
- if name in installed or shortname in short_installed:
- ops.append(makeop('config', name,
- config=get_package_config(configs, name)))
-
- # prefer config entries to filepath entries
- for op in ops:
- if op['op'] != 'install' or not op['cfgfile']:
- continue
- name = op['name']
- fromcfg = get_package_config(configs, op['name'])
- if fromcfg:
- LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
- op['cfgfile'] = None
- op['config'] = fromcfg
-
- return ops
-
-
-def render_snap_op(op, name, path=None, cfgfile=None, config=None):
- if op not in ('install', 'config'):
- raise ValueError("cannot render op '%s'" % op)
-
- shortname = name.partition(NAMESPACE_DELIM)[0]
- try:
- cfg_tmpf = None
- if config is not None:
- # input to 'snappy config packagename' must have nested data. odd.
- # config:
- # packagename:
- # config
- # Note, however, we do not touch config files on disk.
- nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = tempfile.mkstemp()
- os.write(fd, util.yaml_dumps(nested_cfg).encode())
- os.close(fd)
- cfgfile = cfg_tmpf
-
- cmd = [SNAPPY_CMD, op]
- if op == 'install':
- if path:
- cmd.append("--allow-unauthenticated")
- cmd.append(path)
- else:
- cmd.append(name)
- if cfgfile:
- cmd.append(cfgfile)
- elif op == 'config':
- cmd += [name, cfgfile]
-
- util.subp(cmd)
-
- finally:
- if cfg_tmpf:
- os.unlink(cfg_tmpf)
-
-
-def read_installed_packages():
- ret = []
- for (name, date, version, dev) in read_pkg_data():
- if dev:
- ret.append(NAMESPACE_DELIM.join([name, dev]))
- else:
- ret.append(name)
- return ret
-
-
-def read_pkg_data():
- out, err = util.subp([SNAPPY_CMD, "list"])
- pkg_data = []
- for line in out.splitlines()[1:]:
- toks = line.split(sep=None, maxsplit=3)
- if len(toks) == 3:
- (name, date, version) = toks
- dev = None
- else:
- (name, date, version, dev) = toks
- pkg_data.append((name, date, version, dev,))
- return pkg_data
-
-
-def disable_enable_ssh(enabled):
- LOG.debug("setting enablement of ssh to: %s", enabled)
- # do something here that would enable or disable
- not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
- if enabled:
- util.del_file(not_to_be_run)
- # this is an indempotent operation
- util.subp(["systemctl", "start", "ssh"])
- else:
- # this is an indempotent operation
- util.subp(["systemctl", "stop", "ssh"])
- util.write_file(not_to_be_run, "cloud-init\n")
-
-
-def system_is_snappy():
- # channel.ini is configparser loadable.
- # snappy will move to using /etc/system-image/config.d/*.ini
- # this is certainly not a perfect test, but good enough for now.
- content = util.load_file("/etc/system-image/channel.ini", quiet=True)
- if 'ubuntu-core' in content.lower():
- return True
- if os.path.isdir("/etc/system-image/config.d/"):
- return True
- return False
-
-
-def set_snappy_command():
- global SNAPPY_CMD
- if util.which("snappy-go"):
- SNAPPY_CMD = "snappy-go"
- else:
- SNAPPY_CMD = "snappy"
- LOG.debug("snappy command is '%s'", SNAPPY_CMD)
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- sys_snappy = str(mycfg.get("system_snappy", "auto"))
- if util.is_false(sys_snappy):
- LOG.debug("%s: System is not snappy. disabling", name)
- return
-
- if sys_snappy.lower() == "auto" and not(system_is_snappy()):
- LOG.debug("%s: 'auto' mode, and system not snappy", name)
- return
-
- set_snappy_command()
-
- pkg_ops = get_package_ops(packages=mycfg['packages'],
- configs=mycfg['config'],
- fspath=mycfg['packages_dir'])
-
- fails = []
- for pkg_op in pkg_ops:
- try:
- render_snap_op(**pkg_op)
- except Exception as e:
- fails.append((pkg_op, e,))
- LOG.warn("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
-
- # Default to disabling SSH
- ssh_enabled = mycfg.get('ssh_enabled', "auto")
-
- # If the user has not explicitly enabled or disabled SSH, then enable it
- # when password SSH authentication is requested or there are SSH keys
- if ssh_enabled == "auto":
- user_ssh_keys = cloud.get_public_ssh_keys() or None
- password_auth_enabled = cfg.get('ssh_pwauth', False)
- if user_ssh_keys:
- LOG.debug("Enabling SSH, ssh keys found in datasource")
- ssh_enabled = True
- elif cfg.get('ssh_authorized_keys'):
- LOG.debug("Enabling SSH, ssh keys found in config")
- elif password_auth_enabled:
- LOG.debug("Enabling SSH, password authentication requested")
- ssh_enabled = True
- elif ssh_enabled not in (True, False):
- LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled)
-
- disable_enable_ssh(ssh_enabled)
-
- if fails:
- raise Exception("failed to install/configure snaps")
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
deleted file mode 100644
index cb9b70aa..00000000
--- a/cloudinit/config/cc_ssh.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import os
-import sys
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-DISABLE_ROOT_OPTS = (
- "no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"root\\\".\';echo;sleep 10\"")
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
-
-CONFIG_KEY_TO_FILE = {}
-PRIV_TO_PUB = {}
-for k in GENERATE_KEY_NAMES:
- CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
- CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
- PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
-
-KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-
-
-def handle(_name, cfg, cloud, log, _args):
-
- # remove the static keys from the pristine image
- if cfg.get("ssh_deletekeys", True):
- key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
- for f in glob.glob(key_pth):
- try:
- util.del_file(f)
- except Exception:
- util.logexc(log, "Failed deleting key file %s", f)
-
- if "ssh_keys" in cfg:
- # if there are keys in cloud-config, use them
- for (key, val) in cfg["ssh_keys"].items():
- if key in CONFIG_KEY_TO_FILE:
- tgt_fn = CONFIG_KEY_TO_FILE[key][0]
- tgt_perms = CONFIG_KEY_TO_FILE[key][1]
- util.write_file(tgt_fn, val, tgt_perms)
-
- for (priv, pub) in PRIV_TO_PUB.items():
- if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
- continue
- pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
- cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
- try:
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
- log.debug("Generated a key for %s from %s", pair[0], pair[1])
- except Exception:
- util.logexc(log, "Failed generated a key for %s from %s",
- pair[0], pair[1])
- else:
- # if not, generate them
- genkeys = util.get_cfg_option_list(cfg,
- 'ssh_genkeytypes',
- GENERATE_KEY_NAMES)
- lang_c = os.environ.copy()
- lang_c['LANG'] = 'C'
- for keytype in genkeys:
- keyfile = KEY_FILE_TPL % (keytype)
- if os.path.exists(keyfile):
- continue
- util.ensure_dir(os.path.dirname(keyfile))
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
-
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
- sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
- err = util.decode_binary(e.stderr).lower()
- if (e.exit_code == 1 and
- err.lower().startswith("unknown key")):
- log.debug("ssh-keygen: unknown key type '%s'", keytype)
- else:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
-
- try:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- DISABLE_ROOT_OPTS)
-
- keys = cloud.get_public_ssh_keys() or []
- if "ssh_authorized_keys" in cfg:
- cfgkeys = cfg["ssh_authorized_keys"]
- keys.extend(cfgkeys)
-
- apply_credentials(keys, user, disable_root, disable_root_opts)
- except Exception:
- util.logexc(log, "Applying ssh credentials failed!")
-
-
-def apply_credentials(keys, user, disable_root, disable_root_opts):
-
- keys = set(keys)
- if user:
- ssh_util.setup_user_keys(keys, user)
-
- if disable_root:
- if not user:
- user = "NONE"
- key_prefix = disable_root_opts.replace('$USER', user)
- else:
- key_prefix = ''
-
- ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
deleted file mode 100644
index 6ce831bc..00000000
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import hashlib
-
-from prettytable import PrettyTable
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-
-def _split_hash(bin_hash):
- split_up = []
- for i in range(0, len(bin_hash), 2):
- split_up.append(bin_hash[i:i + 2])
- return split_up
-
-
-def _gen_fingerprint(b64_text, hash_meth='md5'):
- if not b64_text:
- return ''
- # TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
- try:
- hasher = hashlib.new(hash_meth)
- hasher.update(base64.b64decode(b64_text))
- return ":".join(_split_hash(hasher.hexdigest()))
- except (TypeError, ValueError):
- # Raised when b64 not really b64...
- # or when the hash type is not really
- # a known/supported hash type...
- return '?'
-
-
-def _is_printable_key(entry):
- if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
- return True
- return False
-
-
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
- prefix='ci-info: '):
- if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
- % (prefix, user))
- util.multi_log(message)
- return
- tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
- 'Comment']
- tbl = PrettyTable(tbl_fields)
- for entry in key_entries:
- if _is_printable_key(entry):
- row = []
- row.append(entry.keytype or '-')
- row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
- row.append(entry.options or '-')
- row.append(entry.comment or '-')
- tbl.add_row(row)
- authtbl_s = tbl.get_string()
- authtbl_lines = authtbl_s.splitlines()
- max_len = len(max(authtbl_lines, key=len))
- lines = [
- util.center("Authorized keys from %s for user %s" %
- (key_fn, user), "+", max_len),
- ]
- lines.extend(authtbl_lines)
- for line in lines:
- util.multi_log(text="%s%s\n" % (prefix, line),
- stderr=False, console=True)
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.is_true(cfg.get('no_ssh_fingerprints', False)):
- log.debug(("Skipping module named %s, "
- "logging of ssh fingerprints disabled"), name)
- return
-
- hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- for (user_name, _cfg) in users.items():
- (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
- _pprint_key_entries(user_name, key_fn,
- key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
deleted file mode 100644
index 28c4585b..00000000
--- a/cloudinit/config/cc_ssh_import_id.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import util
-import pwd
-
-# https://launchpad.net/ssh-import-id
-distros = ['ubuntu', 'debian']
-
-
-def handle(_name, cfg, cloud, log, args):
-
- # import for "user: XXXXX"
- if len(args) != 0:
- user = args[0]
- ids = []
- if len(args) > 1:
- ids = args[1:]
-
- import_ssh_ids(ids, user, log)
- return
-
- # import for cloudinit created users
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- elist = []
- for (user, user_cfg) in users.items():
- import_ids = []
- if user_cfg['default']:
- import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
- else:
- try:
- import_ids = user_cfg['ssh_import_id']
- except Exception:
- log.debug("User %s is not configured for ssh_import_id", user)
- continue
-
- try:
- import_ids = util.uniq_merge(import_ids)
- import_ids = [str(i) for i in import_ids]
- except Exception:
- log.debug("User %s is not correctly configured for ssh_import_id",
- user)
- continue
-
- if not len(import_ids):
- continue
-
- try:
- import_ssh_ids(import_ids, user, log)
- except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s", user,
- import_ids)
- elist.append(exc)
-
- if len(elist):
- raise elist[0]
-
-
-def import_ssh_ids(ids, user, log):
-
- if not (user and ids):
- log.debug("empty user(%s) or ids(%s). not importing", user, ids)
- return
-
- try:
- pwd.getpwnam(user)
- except KeyError as exc:
- raise exc
-
- cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
- log.debug("Importing ssh ids for user %s.", user)
-
- try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s ssh ids", user)
- raise exc
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
deleted file mode 100644
index b9eb85b2..00000000
--- a/cloudinit/config/cc_timezone.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- timezone = args[0]
- else:
- timezone = util.get_cfg_option_str(cfg, "timezone", False)
-
- if not timezone:
- log.debug("Skipping module named %s, no 'timezone' specified", name)
- return
-
- # Let the distro handle settings its timezone
- cloud.distro.set_timezone(timezone)
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
deleted file mode 100644
index 884d79f1..00000000
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** reboot system into another init.
-
-**Description:** This module provides a way for the user to boot with systemd
-even if the image is set to boot with upstart. It should be run as one of the
-first ``cloud_init_modules``, and will switch the init system and then issue a
-reboot. The next boot will come up in the target init system and no action will
-be taken.
-
-This should be inert on non-ubuntu systems, and also exit quickly.
-
-It can be configured with the following option structure::
-
- init_switch:
- target: systemd (can be 'systemd' or 'upstart')
- reboot: true (reboot if a change was made, or false to not reboot)
-
-.. note::
-
- Best effort is made, but it's possible
- this system will break, and probably won't interact well with any other
- mechanism you've used to switch the init system.
-"""
-
-from cloudinit.distros import ubuntu
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import os
-import time
-
-frequency = PER_INSTANCE
-REBOOT_CMD = ["/sbin/reboot", "--force"]
-
-DEFAULT_CONFIG = {
- 'init_switch': {'target': None, 'reboot': True}
-}
-
-SWITCH_INIT = """
-#!/bin/sh
-# switch_init: [upstart | systemd]
-
-is_systemd() {
- [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ]
-}
-debug() { echo "$@" 1>&2; }
-fail() { echo "$@" 1>&2; exit 1; }
-
-if [ "$1" = "systemd" ]; then
- if is_systemd; then
- debug "already systemd, nothing to do"
- else
- [ -f /lib/systemd/systemd ] || fail "no systemd available";
- dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\
- --rename /sbin/init
- fi
- [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init
-elif [ "$1" = "upstart" ]; then
- if is_systemd; then
- rm -f /sbin/init
- dpkg-divert --package systemd-sysv --rename --remove /sbin/init
- else
- debug "already upstart, nothing to do."
- fi
-else
- fail "Error. expect 'upstart' or 'systemd'"
-fi
-"""
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- if not isinstance(cloud.distro, ubuntu.Distro):
- log.debug("%s: distro is '%s', not ubuntu. returning",
- name, cloud.distro.__class__)
- return
-
- cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
- target = cfg['init_switch']['target']
- reboot = cfg['init_switch']['reboot']
-
- if len(args) != 0:
- target = args[0]
- if len(args) > 1:
- reboot = util.is_true(args[1])
-
- if not target:
- log.debug("%s: target=%s. nothing to do", name, target)
- return
-
- if not util.which('dpkg'):
- log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
- return
-
- supported = ('upstart', 'systemd')
- if target not in supported:
- log.warn("%s: target set to %s, expected one of: %s",
- name, target, str(supported))
-
- if os.path.exists("/run/systemd/system"):
- current = "systemd"
- else:
- current = "upstart"
-
- if current == target:
- log.debug("%s: current = target = %s. nothing to do", name, target)
- return
-
- try:
- util.subp(['sh', '-s', target], data=SWITCH_INIT)
- except util.ProcessExecutionError as e:
- log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
- return
-
- if util.is_false(reboot):
- log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
- name, current, target)
- return
-
- try:
- log.warn("%s: switched '%s' to '%s'. rebooting.",
- name, current, target)
- logging.flushLoggers(log)
- _fire_reboot(log, wait_attempts=4, initial_sleep=4)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- raise
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
deleted file mode 100644
index 15703efe..00000000
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
- manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if util.translate_bool(manage_hosts, addons=['template']):
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
- return
-
- # Render from a template file
- tpl_fn_name = cloud.get_template_filename("hosts.%s" %
- (cloud.distro.osfamily))
- if not tpl_fn_name:
- raise RuntimeError(("No hosts template could be"
- " found for distro %s") %
- (cloud.distro.osfamily))
-
- templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
-
- elif manage_hosts == "localhost":
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
- return
-
- log.debug("Managing localhost in /etc/hosts")
- cloud.distro.update_etc_hosts(hostname, fqdn)
- else:
- log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
deleted file mode 100644
index 5b78afe1..00000000
--- a/cloudinit/config/cc_update_hostname.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
- return
-
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
- log.debug("Updating hostname to %s (%s)", fqdn, hostname)
- cloud.distro.update_hostname(hostname, fqdn, prev_fn)
- except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
- hostname)
- raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
deleted file mode 100644
index bf5b4581..00000000
--- a/cloudinit/config/cc_users_groups.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-
-def handle(name, cfg, cloud, _log, _args):
- (users, groups) = ds.normalize_users_groups(cfg, cloud.distro)
- for (name, members) in groups.items():
- cloud.distro.create_group(name, members)
- for (user, config) in users.items():
- cloud.distro.create_user(user, **config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
deleted file mode 100644
index b1096b9b..00000000
--- a/cloudinit/config/cc_write_files.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-import six
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-DEFAULT_OWNER = "root:root"
-DEFAULT_PERMS = 0o644
-UNKNOWN_ENC = 'text/plain'
-
-
-def handle(name, cfg, _cloud, log, _args):
- files = cfg.get('write_files')
- if not files:
- log.debug(("Skipping module named %s,"
- " no/empty 'write_files' key in configuration"), name)
- return
- write_files(name, files, log)
-
-
-def canonicalize_extraction(encoding_type, log):
- if not encoding_type:
- encoding_type = ''
- encoding_type = encoding_type.lower().strip()
- if encoding_type in ['gz', 'gzip']:
- return ['application/x-gzip']
- if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
- return ['application/base64', 'application/x-gzip']
- # Yaml already encodes binary data as base64 if it is given to the
- # yaml file as binary, so those will be automatically decoded for you.
- # But the above b64 is just for people that are more 'comfortable'
- # specifing it manually (which might be a possiblity)
- if encoding_type in ['b64', 'base64']:
- return ['application/base64']
- if encoding_type:
- log.warn("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
- return [UNKNOWN_ENC]
-
-
-def write_files(name, files, log):
- if not files:
- return
-
- for (i, f_info) in enumerate(files):
- path = f_info.get('path')
- if not path:
- log.warn("No path provided to write for entry %s in module %s",
- i + 1, name)
- continue
- path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'), log)
- contents = extract_contents(f_info.get('content', ''), extractions)
- (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS, log)
- util.write_file(path, contents, mode=perms)
- util.chownbyname(path, u, g)
-
-
-def decode_perms(perm, default, log):
- if perm is None:
- return default
- try:
- if isinstance(perm, six.integer_types + (float,)):
- # Just 'downcast' it (if a float)
- return int(perm)
- else:
- # Force to string and try octal conversion
- return int(str(perm), 8)
- except (TypeError, ValueError):
- log.warn("Undecodable permissions %s, assuming %s", perm, default)
- return default
-
-
-def extract_contents(contents, extraction_types):
- result = contents
- for t in extraction_types:
- if t == 'application/x-gzip':
- result = util.decomp_gzip(result, quiet=False, decode=False)
- elif t == 'application/base64':
- result = base64.b64decode(result)
- elif t == UNKNOWN_ENC:
- pass
- return result
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
deleted file mode 100644
index 64fba869..00000000
--- a/cloudinit/config/cc_yum_add_repo.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import configobj
-import six
-
-from cloudinit import util
-
-
-def _canonicalize_id(repo_id):
- repo_id = repo_id.lower().replace("-", "_")
- repo_id = repo_id.replace(" ", "_")
- return repo_id
-
-
-def _format_repo_value(val):
- if isinstance(val, (bool)):
- # Seems like yum prefers 1/0
- return str(int(val))
- if isinstance(val, (list, tuple)):
- # Can handle 'lists' in certain cases
- # See: http://bit.ly/Qqrf1t
- return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, six.string_types):
- return str(val)
- return val
-
-
-# TODO(harlowja): move to distro?
-# See man yum.conf
-def _format_repository_config(repo_id, repo_config):
- to_be = configobj.ConfigObj()
- to_be[repo_id] = {}
- # Do basic translation of the items -> values
- for (k, v) in repo_config.items():
- # For now assume that people using this know
- # the format of yum and don't verify keys/values further
- to_be[repo_id][k] = _format_repo_value(v)
- lines = to_be.write()
- lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822()))
- return "\n".join(lines)
-
-
-def handle(name, cfg, _cloud, log, _args):
- repos = cfg.get('yum_repos')
- if not repos:
- log.debug(("Skipping module named %s,"
- " no 'yum_repos' configuration found"), name)
- return
- repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
- '/etc/yum.repos.d/')
- repo_locations = {}
- repo_configs = {}
- for (repo_id, repo_config) in repos.items():
- canon_repo_id = _canonicalize_id(repo_id)
- repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
- if os.path.exists(repo_fn_pth):
- log.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
- continue
- elif canon_repo_id in repo_locations:
- log.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
- continue
- if not repo_config:
- repo_config = {}
- # Do some basic sanity checks/cleaning
- n_repo_config = {}
- for (k, v) in repo_config.items():
- k = k.lower().strip().replace("-", "_")
- if k:
- n_repo_config[k] = v
- repo_config = n_repo_config
- missing_required = 0
- for req_field in ['baseurl']:
- if req_field not in repo_config:
- log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
- missing_required += 1
- if not missing_required:
- repo_configs[canon_repo_id] = repo_config
- repo_locations[canon_repo_id] = repo_fn_pth
- else:
- log.warn("Repository %s is missing %s required fields, skipping!",
- repo_id, missing_required)
- for (c_repo_id, path) in repo_locations.items():
- repo_blob = _format_repository_config(c_repo_id,
- repo_configs.get(c_repo_id))
- util.write_file(path, repo_blob)