summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/__init__.py21
-rw-r--r--cloudinit/cloud.py109
-rw-r--r--cloudinit/cmd/__init__.py21
-rw-r--r--cloudinit/cmd/main.py685
-rw-r--r--cloudinit/config/__init__.py58
-rw-r--r--cloudinit/config/cc_apt_configure.py319
-rw-r--r--cloudinit/config/cc_apt_pipelining.py57
-rw-r--r--cloudinit/config/cc_bootcmd.py54
-rw-r--r--cloudinit/config/cc_byobu.py80
-rw-r--r--cloudinit/config/cc_ca_certs.py104
-rw-r--r--cloudinit/config/cc_chef.py342
-rw-r--r--cloudinit/config/cc_debug.py109
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py36
-rw-r--r--cloudinit/config/cc_disk_setup.py863
-rw-r--r--cloudinit/config/cc_emit_upstart.py69
-rw-r--r--cloudinit/config/cc_fan.py101
-rw-r--r--cloudinit/config/cc_final_message.py73
-rw-r--r--cloudinit/config/cc_foo.py52
-rw-r--r--cloudinit/config/cc_growpart.py300
-rw-r--r--cloudinit/config/cc_grub_dpkg.py73
-rw-r--r--cloudinit/config/cc_keys_to_console.py62
-rw-r--r--cloudinit/config/cc_landscape.py99
-rw-r--r--cloudinit/config/cc_locale.py37
-rw-r--r--cloudinit/config/cc_lxd.py177
-rw-r--r--cloudinit/config/cc_mcollective.py106
-rw-r--r--cloudinit/config/cc_migrator.py85
-rw-r--r--cloudinit/config/cc_mounts.py405
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py99
-rw-r--r--cloudinit/config/cc_phone_home.py122
-rw-r--r--cloudinit/config/cc_power_state_change.py223
-rw-r--r--cloudinit/config/cc_puppet.py118
-rw-r--r--cloudinit/config/cc_resizefs.py185
-rw-r--r--cloudinit/config/cc_resolv_conf.py116
-rw-r--r--cloudinit/config/cc_rh_subscription.py408
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py102
-rw-r--r--cloudinit/config/cc_rsyslog.py366
-rw-r--r--cloudinit/config/cc_runcmd.py38
-rw-r--r--cloudinit/config/cc_salt_minion.py59
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py41
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py41
-rw-r--r--cloudinit/config/cc_scripts_per_once.py41
-rw-r--r--cloudinit/config/cc_scripts_user.py42
-rw-r--r--cloudinit/config/cc_scripts_vendor.py43
-rw-r--r--cloudinit/config/cc_seed_random.py94
-rw-r--r--cloudinit/config/cc_set_hostname.py37
-rw-r--r--cloudinit/config/cc_set_passwords.py167
-rw-r--r--cloudinit/config/cc_snappy.py304
-rw-r--r--cloudinit/config/cc_ssh.py142
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py105
-rw-r--r--cloudinit/config/cc_ssh_import_id.py99
-rw-r--r--cloudinit/config/cc_timezone.py39
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py162
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py60
-rw-r--r--cloudinit/config/cc_update_hostname.py43
-rw-r--r--cloudinit/config/cc_users_groups.py34
-rw-r--r--cloudinit/config/cc_write_files.py105
-rw-r--r--cloudinit/config/cc_yum_add_repo.py107
-rw-r--r--cloudinit/cs_utils.py106
-rw-r--r--cloudinit/distros/__init__.py980
-rw-r--r--cloudinit/distros/arch.py201
-rw-r--r--cloudinit/distros/debian.py236
-rw-r--r--cloudinit/distros/fedora.py31
-rw-r--r--cloudinit/distros/freebsd.py417
-rw-r--r--cloudinit/distros/gentoo.py160
-rw-r--r--cloudinit/distros/net_util.py182
-rw-r--r--cloudinit/distros/parsers/__init__.py28
-rw-r--r--cloudinit/distros/parsers/hostname.py88
-rw-r--r--cloudinit/distros/parsers/hosts.py92
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py169
-rw-r--r--cloudinit/distros/parsers/sys_conf.py113
-rw-r--r--cloudinit/distros/rhel.py230
-rw-r--r--cloudinit/distros/rhel_util.py89
-rw-r--r--cloudinit/distros/sles.py179
-rw-r--r--cloudinit/distros/ubuntu.py31
-rw-r--r--cloudinit/ec2_utils.py201
-rw-r--r--cloudinit/filters/__init__.py21
-rw-r--r--cloudinit/filters/launch_index.py75
-rw-r--r--cloudinit/gpg.py74
-rw-r--r--cloudinit/handlers/__init__.py274
-rw-r--r--cloudinit/handlers/boot_hook.py70
-rw-r--r--cloudinit/handlers/cloud_config.py163
-rw-r--r--cloudinit/handlers/shell_script.py55
-rw-r--r--cloudinit/handlers/upstart_job.py119
-rw-r--r--cloudinit/helpers.py460
-rw-r--r--cloudinit/importer.py58
-rw-r--r--cloudinit/log.py155
-rw-r--r--cloudinit/mergers/__init__.py166
-rw-r--r--cloudinit/mergers/m_dict.py88
-rw-r--r--cloudinit/mergers/m_list.py89
-rw-r--r--cloudinit/mergers/m_str.py46
-rw-r--r--cloudinit/net/__init__.py371
-rw-r--r--cloudinit/net/cmdline.py203
-rw-r--r--cloudinit/net/eni.py504
-rw-r--r--cloudinit/net/network_state.py454
-rw-r--r--cloudinit/net/renderer.py48
-rw-r--r--cloudinit/net/sysconfig.py400
-rw-r--r--cloudinit/net/udev.py54
-rw-r--r--cloudinit/netinfo.py249
-rw-r--r--cloudinit/patcher.py58
-rw-r--r--cloudinit/registry.py37
-rw-r--r--cloudinit/reporting/__init__.py42
-rw-r--r--cloudinit/reporting/events.py248
-rw-r--r--cloudinit/reporting/handlers.py91
-rw-r--r--cloudinit/safeyaml.py32
-rw-r--r--cloudinit/serial.py50
-rw-r--r--cloudinit/settings.py68
-rw-r--r--cloudinit/signal_handler.py71
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py292
-rw-r--r--cloudinit/sources/DataSourceAzure.py651
-rw-r--r--cloudinit/sources/DataSourceBigstep.py57
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py132
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py253
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py278
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py110
-rw-r--r--cloudinit/sources/DataSourceEc2.py211
-rw-r--r--cloudinit/sources/DataSourceGCE.py167
-rw-r--r--cloudinit/sources/DataSourceMAAS.py353
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py323
-rw-r--r--cloudinit/sources/DataSourceNone.py57
-rw-r--r--cloudinit/sources/DataSourceOVF.py429
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py429
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py168
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py781
-rw-r--r--cloudinit/sources/__init__.py371
-rw-r--r--cloudinit/sources/helpers/__init__.py13
-rw-r--r--cloudinit/sources/helpers/azure.py279
-rw-r--r--cloudinit/sources/helpers/openstack.py648
-rw-r--r--cloudinit/sources/helpers/vmware/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py95
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py129
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py247
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py23
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py27
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py128
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py147
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py154
-rw-r--r--cloudinit/ssh_util.py314
-rw-r--r--cloudinit/stages.py890
-rw-r--r--cloudinit/templater.py155
-rw-r--r--cloudinit/type_utils.py52
-rw-r--r--cloudinit/url_helper.py509
-rw-r--r--cloudinit/user_data.py356
-rw-r--r--cloudinit/util.py2246
-rw-r--r--cloudinit/version.py27
150 files changed, 0 insertions, 28136 deletions
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
deleted file mode 100644
index da124641..00000000
--- a/cloudinit/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
deleted file mode 100644
index 3e6be203..00000000
--- a/cloudinit/cloud.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import os
-
-from cloudinit import log as logging
-from cloudinit.reporting import events
-
-LOG = logging.getLogger(__name__)
-
-# This class is the high level wrapper that provides
-# access to cloud-init objects without exposing the stage objects
-# to handler and or module manipulation. It allows for cloud
-# init to restrict what those types of user facing code may see
-# and or adjust (which helps avoid code messing with each other)
-#
-# It also provides util functions that avoid having to know
-# how to get a certain member from this submembers as well
-# as providing a backwards compatible object that can be maintained
-# while the stages/other objects can be worked on independently...
-
-
-class Cloud(object):
- def __init__(self, datasource, paths, cfg, distro, runners, reporter=None):
- self.datasource = datasource
- self.paths = paths
- self.distro = distro
- self._cfg = cfg
- self._runners = runners
- if reporter is None:
- reporter = events.ReportEventStack(
- name="unnamed-cloud-reporter",
- description="unnamed-cloud-reporter",
- reporting_enabled=False)
- self.reporter = reporter
-
- # If a 'user' manipulates logging or logging services
- # it is typically useful to cause the logging to be
- # setup again.
- def cycle_logging(self):
- logging.resetLogging()
- logging.setupLogging(self.cfg)
-
- @property
- def cfg(self):
- # Ensure that not indirectly modified
- return copy.deepcopy(self._cfg)
-
- def run(self, name, functor, args, freq=None, clear_on_fail=False):
- return self._runners.run(name, functor, args, freq, clear_on_fail)
-
- def get_template_filename(self, name):
- fn = self.paths.template_tpl % (name)
- if not os.path.isfile(fn):
- LOG.warn("No template found at %s for template named %s", fn, name)
- return None
- return fn
-
- # The rest of thes are just useful proxies
- def get_userdata(self, apply_filter=True):
- return self.datasource.get_userdata(apply_filter)
-
- def get_instance_id(self):
- return self.datasource.get_instance_id()
-
- @property
- def launch_index(self):
- return self.datasource.launch_index
-
- def get_public_ssh_keys(self):
- return self.datasource.get_public_ssh_keys()
-
- def get_locale(self):
- return self.datasource.get_locale()
-
- def get_hostname(self, fqdn=False):
- return self.datasource.get_hostname(fqdn=fqdn)
-
- def device_name_to_device(self, name):
- return self.datasource.device_name_to_device(name)
-
- def get_ipath_cur(self, name=None):
- return self.paths.get_ipath_cur(name)
-
- def get_cpath(self, name=None):
- return self.paths.get_cpath(name)
-
- def get_ipath(self, name=None):
- return self.paths.get_ipath(name)
diff --git a/cloudinit/cmd/__init__.py b/cloudinit/cmd/__init__.py
deleted file mode 100644
index da124641..00000000
--- a/cloudinit/cmd/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
deleted file mode 100644
index 63621c1d..00000000
--- a/cloudinit/cmd/main.py
+++ /dev/null
@@ -1,685 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import argparse
-import json
-import os
-import sys
-import tempfile
-import time
-import traceback
-
-from cloudinit import patcher
-patcher.patch() # noqa
-
-from cloudinit import log as logging
-from cloudinit import netinfo
-from cloudinit import signal_handler
-from cloudinit import sources
-from cloudinit import stages
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
-from cloudinit import reporting
-from cloudinit.reporting import events
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CLOUD_CONFIG)
-
-
-# Pretty little cheetah formatted welcome message template
-WELCOME_MSG_TPL = ("Cloud-init v. ${version} running '${action}' at "
- "${timestamp}. Up ${uptime} seconds.")
-
-# Module section template
-MOD_SECTION_TPL = "cloud_%s_modules"
-
-# Things u can query on
-QUERY_DATA_TYPES = [
- 'data',
- 'data_raw',
- 'instance_id',
-]
-
-# Frequency shortname to full name
-# (so users don't have to remember the full name...)
-FREQ_SHORT_NAMES = {
- 'instance': PER_INSTANCE,
- 'always': PER_ALWAYS,
- 'once': PER_ONCE,
-}
-
-LOG = logging.getLogger()
-
-
-# Used for when a logger may not be active
-# and we still want to print exceptions...
-def print_exc(msg=''):
- if msg:
- sys.stderr.write("%s\n" % (msg))
- sys.stderr.write('-' * 60)
- sys.stderr.write("\n")
- traceback.print_exc(file=sys.stderr)
- sys.stderr.write('-' * 60)
- sys.stderr.write("\n")
-
-
-def welcome(action, msg=None):
- if not msg:
- msg = welcome_format(action)
- util.multi_log("%s\n" % (msg),
- console=False, stderr=True, log=LOG)
- return msg
-
-
-def welcome_format(action):
- tpl_params = {
- 'version': version.version_string(),
- 'uptime': util.uptime(),
- 'timestamp': util.time_rfc2822(),
- 'action': action,
- }
- return templater.render_string(WELCOME_MSG_TPL, tpl_params)
-
-
-def extract_fns(args):
- # Files are already opened so lets just pass that along
- # since it would of broke if it couldn't have
- # read that file already...
- fn_cfgs = []
- if args.files:
- for fh in args.files:
- # The realpath is more useful in logging
- # so lets resolve to that...
- fn_cfgs.append(os.path.realpath(fh.name))
- return fn_cfgs
-
-
-def run_module_section(mods, action_name, section):
- full_section_name = MOD_SECTION_TPL % (section)
- (which_ran, failures) = mods.run_section(full_section_name)
- total_attempted = len(which_ran) + len(failures)
- if total_attempted == 0:
- msg = ("No '%s' modules to run"
- " under section '%s'") % (action_name, full_section_name)
- sys.stderr.write("%s\n" % (msg))
- LOG.debug(msg)
- return []
- else:
- LOG.debug("Ran %s modules with %s failures",
- len(which_ran), len(failures))
- return failures
-
-
-def apply_reporting_cfg(cfg):
- if cfg.get('reporting'):
- reporting.update_configuration(cfg.get('reporting'))
-
-
-def main_init(name, args):
- deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- if args.local:
- deps = [sources.DEP_FILESYSTEM]
-
- if not args.local:
- # See doc/kernel-cmdline.txt
- #
- # This is used in maas datasource, in "ephemeral" (read-only root)
- # environment where the instance netboots to iscsi ro root.
- # and the entity that controls the pxe config has to configure
- # the maas datasource.
- #
- # Could be used elsewhere, only works on network based (not local).
- root_name = "%s.d" % (CLOUD_CONFIG)
- target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
- util.read_write_cmdline_url(target_fn)
-
- # Cloud-init 'init' stage is broken up into the following sub-stages
- # 1. Ensure that the init object fetches its config without errors
- # 2. Setup logging/output redirections with resultant config (if any)
- # 3. Initialize the cloud-init filesystem
- # 4. Check if we can stop early by looking for various files
- # 5. Fetch the datasource
- # 6. Connect to the current instance location + update the cache
- # 7. Consume the userdata (handlers get activated here)
- # 8. Construct the modules object
- # 9. Adjust any subsequent logging/output redirections using the modules
- # objects config as it may be different from init object
- # 10. Run the modules for the 'init' stage
- # 11. Done!
- if not args.local:
- w_msg = welcome_format(name)
- else:
- w_msg = welcome_format("%s-local" % (name))
- init = stages.Init(ds_deps=deps, reporter=args.reporter)
- # Stage 1
- init.read_cfg(extract_fns(args))
- # Stage 2
- outfmt = None
- errfmt = None
- try:
- LOG.debug("Closing stdin")
- util.close_stdin()
- (outfmt, errfmt) = util.fixup_output(init.cfg, name)
- except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- print_exc("Failed to setup output redirection!")
- if args.debug:
- # Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
- logging.resetLogging()
- logging.setupLogging(init.cfg)
- apply_reporting_cfg(init.cfg)
-
- # Any log usage prior to setupLogging above did not have local user log
- # config applied. We send the welcome message now, as stderr/out have
- # been redirected and log now configured.
- welcome(name, msg=w_msg)
-
- # Stage 3
- try:
- init.initialize()
- except Exception:
- util.logexc(LOG, "Failed to initialize, likely bad things to come!")
- # Stage 4
- path_helper = init.paths
- mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK
-
- if mode == sources.DSMODE_NETWORK:
- existing = "trust"
- sys.stderr.write("%s\n" % (netinfo.debug_info()))
- LOG.debug(("Checking to see if files that we need already"
- " exist from a previous run that would allow us"
- " to stop early."))
- # no-net is written by upstart cloud-init-nonet when network failed
- # to come up
- stop_files = [
- os.path.join(path_helper.get_cpath("data"), "no-net"),
- ]
- existing_files = []
- for fn in stop_files:
- if os.path.isfile(fn):
- existing_files.append(fn)
-
- if existing_files:
- LOG.debug("[%s] Exiting. stop file %s existed",
- mode, existing_files)
- return (None, [])
- else:
- LOG.debug("Execution continuing, no previous run detected that"
- " would allow us to stop early.")
- else:
- existing = "check"
- if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False):
- existing = "trust"
-
- init.purge_cache()
- # Delete the non-net file as well
- util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
-
- # Stage 5
- try:
- init.fetch(existing=existing)
- # if in network mode, and the datasource is local
- # then work was done at that stage.
- if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s in local mode",
- mode, init.datasource)
- return (None, [])
- except sources.DataSourceNotFoundException:
- # In the case of 'cloud-init init' without '--local' it is a bit
- # more likely that the user would consider it failure if nothing was
- # found. When using upstart it will also mentions job failure
- # in console log if exit code is != 0.
- if mode == sources.DSMODE_LOCAL:
- LOG.debug("No local datasource found")
- else:
- util.logexc(LOG, ("No instance datasource found!"
- " Likely bad things to come!"))
- if not args.force:
- init.apply_network_config(bring_up=not args.local)
- LOG.debug("[%s] Exiting without datasource in local mode", mode)
- if mode == sources.DSMODE_LOCAL:
- return (None, [])
- else:
- return (None, ["No instance datasource found."])
- else:
- LOG.debug("[%s] barreling on in force mode without datasource",
- mode)
-
- # Stage 6
- iid = init.instancify()
- LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s",
- mode, name, iid, init.is_new_instance())
-
- init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL))
-
- if mode == sources.DSMODE_LOCAL:
- if init.datasource.dsmode != mode:
- LOG.debug("[%s] Exiting. datasource %s not in local mode.",
- mode, init.datasource)
- return (init.datasource, [])
- else:
- LOG.debug("[%s] %s is in local mode, will apply init modules now.",
- mode, init.datasource)
-
- # update fully realizes user-data (pulling in #include if necessary)
- init.update()
- # Stage 7
- try:
- # Attempt to consume the data per instance.
- # This may run user-data handlers and/or perform
- # url downloads and such as needed.
- (ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
- if not ran:
- # Just consume anything that is set to run per-always
- # if nothing ran in the per-instance code
- #
- # See: https://bugs.launchpad.net/bugs/819507 for a little
- # reason behind this...
- init.consume_data(PER_ALWAYS)
- except Exception:
- util.logexc(LOG, "Consuming user data failed!")
- return (init.datasource, ["Consuming user data failed!"])
-
- apply_reporting_cfg(init.cfg)
-
- # Stage 8 - re-read and apply relevant cloud-config to include user-data
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
- # Stage 9
- try:
- outfmt_orig = outfmt
- errfmt_orig = errfmt
- (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
- if outfmt_orig != outfmt or errfmt_orig != errfmt:
- LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
- (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
- except Exception:
- util.logexc(LOG, "Failed to re-adjust output redirection!")
- logging.setupLogging(mods.cfg)
-
- # Stage 10
- return (init.datasource, run_module_section(mods, name, name))
-
-
-def main_modules(action_name, args):
- name = args.mode
- # Cloud-init 'modules' stages are broken up into the following sub-stages
- # 1. Ensure that the init object fetches its config without errors
- # 2. Get the datasource from the init object, if it does
- # not exist then that means the main_init stage never
- # worked, and thus this stage can not run.
- # 3. Construct the modules object
- # 4. Adjust any subsequent logging/output redirections using
- # the modules objects configuration
- # 5. Run the modules for the given stage name
- # 6. Done!
- w_msg = welcome_format("%s:%s" % (action_name, name))
- init = stages.Init(ds_deps=[], reporter=args.reporter)
- # Stage 1
- init.read_cfg(extract_fns(args))
- # Stage 2
- try:
- init.fetch(existing="trust")
- except sources.DataSourceNotFoundException:
- # There was no datasource found, theres nothing to do
- msg = ('Can not apply stage %s, no datasource found! Likely bad '
- 'things to come!' % name)
- util.logexc(LOG, msg)
- print_exc(msg)
- if not args.force:
- return [(msg)]
- # Stage 3
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
- # Stage 4
- try:
- LOG.debug("Closing stdin")
- util.close_stdin()
- util.fixup_output(mods.cfg, name)
- except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- if args.debug:
- # Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
- logging.resetLogging()
- logging.setupLogging(mods.cfg)
- apply_reporting_cfg(init.cfg)
-
- # now that logging is setup and stdout redirected, send welcome
- welcome(name, msg=w_msg)
-
- # Stage 5
- return run_module_section(mods, name, name)
-
-
-def main_query(name, _args):
- raise NotImplementedError(("Action '%s' is not"
- " currently implemented") % (name))
-
-
-def main_single(name, args):
- # Cloud-init single stage is broken up into the following sub-stages
- # 1. Ensure that the init object fetches its config without errors
- # 2. Attempt to fetch the datasource (warn if it doesn't work)
- # 3. Construct the modules object
- # 4. Adjust any subsequent logging/output redirections using
- # the modules objects configuration
- # 5. Run the single module
- # 6. Done!
- mod_name = args.name
- w_msg = welcome_format(name)
- init = stages.Init(ds_deps=[], reporter=args.reporter)
- # Stage 1
- init.read_cfg(extract_fns(args))
- # Stage 2
- try:
- init.fetch(existing="trust")
- except sources.DataSourceNotFoundException:
- # There was no datasource found,
- # that might be bad (or ok) depending on
- # the module being ran (so continue on)
- util.logexc(LOG, ("Failed to fetch your datasource,"
- " likely bad things to come!"))
- print_exc(("Failed to fetch your datasource,"
- " likely bad things to come!"))
- if not args.force:
- return 1
- # Stage 3
- mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
- mod_args = args.module_args
- if mod_args:
- LOG.debug("Using passed in arguments %s", mod_args)
- mod_freq = args.frequency
- if mod_freq:
- LOG.debug("Using passed in frequency %s", mod_freq)
- mod_freq = FREQ_SHORT_NAMES.get(mod_freq)
- # Stage 4
- try:
- LOG.debug("Closing stdin")
- util.close_stdin()
- util.fixup_output(mods.cfg, None)
- except Exception:
- util.logexc(LOG, "Failed to setup output redirection!")
- if args.debug:
- # Reset so that all the debug handlers are closed out
- LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
- logging.resetLogging()
- logging.setupLogging(mods.cfg)
- apply_reporting_cfg(init.cfg)
-
- # now that logging is setup and stdout redirected, send welcome
- welcome(name, msg=w_msg)
-
- # Stage 5
- (which_ran, failures) = mods.run_single(mod_name,
- mod_args,
- mod_freq)
- if failures:
- LOG.warn("Ran %s but it failed!", mod_name)
- return 1
- elif not which_ran:
- LOG.warn("Did not run %s, does it exist?", mod_name)
- return 1
- else:
- # Guess it worked
- return 0
-
-
-def atomic_write_file(path, content, mode='w'):
- tf = None
- try:
- tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
- delete=False, mode=mode)
- tf.write(content)
- tf.close()
- os.rename(tf.name, path)
- except Exception as e:
- if tf is not None:
- os.unlink(tf.name)
- raise e
-
-
-def atomic_write_json(path, data):
- return atomic_write_file(path, json.dumps(data, indent=1) + "\n")
-
-
-def status_wrapper(name, args, data_d=None, link_d=None):
- if data_d is None:
- data_d = os.path.normpath("/var/lib/cloud/data")
- if link_d is None:
- link_d = os.path.normpath("/run/cloud-init")
-
- status_path = os.path.join(data_d, "status.json")
- status_link = os.path.join(link_d, "status.json")
- result_path = os.path.join(data_d, "result.json")
- result_link = os.path.join(link_d, "result.json")
-
- util.ensure_dirs((data_d, link_d,))
-
- (_name, functor) = args.action
-
- if name == "init":
- if args.local:
- mode = "init-local"
- else:
- mode = "init"
- elif name == "modules":
- mode = "modules-%s" % args.mode
- else:
- raise ValueError("unknown name: %s" % name)
-
- modes = ('init', 'init-local', 'modules-config', 'modules-final')
-
- status = None
- if mode == 'init-local':
- for f in (status_link, result_link, status_path, result_path):
- util.del_file(f)
- else:
- try:
- status = json.loads(util.load_file(status_path))
- except Exception:
- pass
-
- if status is None:
- nullstatus = {
- 'errors': [],
- 'start': None,
- 'finished': None,
- }
- status = {'v1': {}}
- for m in modes:
- status['v1'][m] = nullstatus.copy()
- status['v1']['datasource'] = None
-
- v1 = status['v1']
- v1['stage'] = mode
- v1[mode]['start'] = time.time()
-
- atomic_write_json(status_path, status)
- util.sym_link(os.path.relpath(status_path, link_d), status_link,
- force=True)
-
- try:
- ret = functor(name, args)
- if mode in ('init', 'init-local'):
- (datasource, errors) = ret
- if datasource is not None:
- v1['datasource'] = str(datasource)
- else:
- errors = ret
-
- v1[mode]['errors'] = [str(e) for e in errors]
-
- except Exception as e:
- util.logexc(LOG, "failed stage %s", mode)
- print_exc("failed run of stage %s" % mode)
- v1[mode]['errors'] = [str(e)]
-
- v1[mode]['finished'] = time.time()
- v1['stage'] = None
-
- atomic_write_json(status_path, status)
-
- if mode == "modules-final":
- # write the 'finished' file
- errors = []
- for m in modes:
- if v1[m]['errors']:
- errors.extend(v1[m].get('errors', []))
-
- atomic_write_json(result_path,
- {'v1': {'datasource': v1['datasource'],
- 'errors': errors}})
- util.sym_link(os.path.relpath(result_path, link_d), result_link,
- force=True)
-
- return len(v1[mode]['errors'])
-
-
-def main(sysv_args=None):
- if sysv_args is not None:
- parser = argparse.ArgumentParser(prog=sysv_args[0])
- sysv_args = sysv_args[1:]
- else:
- parser = argparse.ArgumentParser()
-
- # Top level args
- parser.add_argument('--version', '-v', action='version',
- version='%(prog)s ' + (version.version_string()))
- parser.add_argument('--file', '-f', action='append',
- dest='files',
- help=('additional yaml configuration'
- ' files to use'),
- type=argparse.FileType('rb'))
- parser.add_argument('--debug', '-d', action='store_true',
- help=('show additional pre-action'
- ' logging (default: %(default)s)'),
- default=False)
- parser.add_argument('--force', action='store_true',
- help=('force running even if no datasource is'
- ' found (use at your own risk)'),
- dest='force',
- default=False)
-
- parser.set_defaults(reporter=None)
- subparsers = parser.add_subparsers()
-
- # Each action and its sub-options (if any)
- parser_init = subparsers.add_parser('init',
- help=('initializes cloud-init and'
- ' performs initial modules'))
- parser_init.add_argument("--local", '-l', action='store_true',
- help="start in local mode (default: %(default)s)",
- default=False)
- # This is used so that we can know which action is selected +
- # the functor to use to run this subcommand
- parser_init.set_defaults(action=('init', main_init))
-
- # These settings are used for the 'config' and 'final' stages
- parser_mod = subparsers.add_parser('modules',
- help=('activates modules using '
- 'a given configuration key'))
- parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
- parser_mod.set_defaults(action=('modules', main_modules))
-
- # These settings are used when you want to query information
- # stored in the cloud-init data objects/directories/files
- parser_query = subparsers.add_parser('query',
- help=('query information stored '
- 'in cloud-init'))
- parser_query.add_argument("--name", '-n', action="store",
- help="item name to query on",
- required=True,
- choices=QUERY_DATA_TYPES)
- parser_query.set_defaults(action=('query', main_query))
-
- # This subcommand allows you to run a single module
- parser_single = subparsers.add_parser('single',
- help=('run a single module '))
- parser_single.set_defaults(action=('single', main_single))
- parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
- parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
- parser_single.add_argument("--report", action="store_true",
- help="enable reporting",
- required=False)
- parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
- parser_single.set_defaults(action=('single', main_single))
-
- args = parser.parse_args(args=sysv_args)
-
- try:
- (name, functor) = args.action
- except AttributeError:
- parser.error('too few arguments')
-
- # Setup basic logging to start (until reinitialized)
- # iff in debug mode...
- if args.debug:
- logging.setupBasicLogging()
-
- # Setup signal handlers before running
- signal_handler.attach_handlers()
-
- if name in ("modules", "init"):
- functor = status_wrapper
-
- report_on = True
- if name == "init":
- if args.local:
- rname, rdesc = ("init-local", "searching for local datasources")
- else:
- rname, rdesc = ("init-network",
- "searching for network datasources")
- elif name == "modules":
- rname, rdesc = ("modules-%s" % args.mode,
- "running modules for %s" % args.mode)
- elif name == "single":
- rname, rdesc = ("single/%s" % args.name,
- "running single module %s" % args.name)
- report_on = args.report
-
- args.reporter = events.ReportEventStack(
- rname, rdesc, reporting_enabled=report_on)
- with args.reporter:
- return util.log_time(
- logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
- get_uptime=True, func=functor, args=(name, args))
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
deleted file mode 100644
index d57453be..00000000
--- a/cloudinit/config/__init__.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2008-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Chuck Short <chuck.short@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
-
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-# This prefix is used to make it less
-# of a chance that when importing
-# we will not find something else with the same
-# name in the lookup path...
-MOD_PREFIX = "cc_"
-
-
-def form_module_name(name):
- canon_name = name.replace("-", "_")
- if canon_name.lower().endswith(".py"):
- canon_name = canon_name[0:(len(canon_name) - 3)]
- canon_name = canon_name.strip()
- if not canon_name:
- return None
- if not canon_name.startswith(MOD_PREFIX):
- canon_name = '%s%s' % (MOD_PREFIX, canon_name)
- return canon_name
-
-
-def fixup_module(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warn("Module %s has an unknown frequency %s", mod, freq)
- if not hasattr(mod, 'distros'):
- setattr(mod, 'distros', [])
- if not hasattr(mod, 'osfamilies'):
- setattr(mod, 'osfamilies', [])
- return mod
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
deleted file mode 100644
index 05ad4b03..00000000
--- a/cloudinit/config/cc_apt_configure.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import os
-import re
-
-from cloudinit import gpg
-from cloudinit import templater
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
-APT_CONFIG_FN = "/etc/apt/apt.conf.d/94cloud-init-config"
-APT_PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
-
-# this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar')
-ADD_APT_REPO_MATCH = r"^[\w-]+:\w"
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.is_false(cfg.get('apt_configure_enabled', True)):
- log.debug("Skipping module named %s, disabled by config.", name)
- return
-
- release = get_release()
- mirrors = find_apt_mirror_info(cloud, cfg)
- if not mirrors or "primary" not in mirrors:
- log.debug(("Skipping module named %s,"
- " no package 'mirror' located"), name)
- return
-
- # backwards compatibility
- mirror = mirrors["primary"]
- mirrors["mirror"] = mirror
-
- log.debug("Mirror info: %s" % mirrors)
-
- if not util.get_cfg_option_bool(cfg,
- 'apt_preserve_sources_list', False):
- generate_sources_list(cfg, release, mirrors, cloud, log)
- old_mirrors = cfg.get('apt_old_mirrors',
- {"primary": "archive.ubuntu.com/ubuntu",
- "security": "security.ubuntu.com/ubuntu"})
- rename_apt_lists(old_mirrors, mirrors)
-
- try:
- apply_apt_config(cfg, APT_PROXY_FN, APT_CONFIG_FN)
- except Exception as e:
- log.warn("failed to proxy or apt config info: %s", e)
-
- # Process 'apt_sources'
- if 'apt_sources' in cfg:
- params = mirrors
- params['RELEASE'] = release
- params['MIRROR'] = mirror
-
- matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH)
- if matchcfg:
- matcher = re.compile(matchcfg).search
- else:
- def matcher(x):
- return False
-
- errors = add_apt_sources(cfg['apt_sources'], params,
- aa_repo_match=matcher)
- for e in errors:
- log.warn("Add source error: %s", ':'.join(e))
-
- dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
- if dconf_sel:
- log.debug("Setting debconf selections per cloud config")
- try:
- util.subp(('debconf-set-selections', '-'), dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections")
-
-
-def mirrorurl_to_apt_fileprefix(mirror):
- string = mirror
- # take off http:// or ftp://
- if string.endswith("/"):
- string = string[0:-1]
- pos = string.find("://")
- if pos >= 0:
- string = string[pos + 3:]
- string = string.replace("/", "_")
- return string
-
-
-def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
- for (name, omirror) in old_mirrors.items():
- nmirror = new_mirrors.get(name)
- if not nmirror:
- continue
- oprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(omirror))
- nprefix = os.path.join(lists_d, mirrorurl_to_apt_fileprefix(nmirror))
- if oprefix == nprefix:
- continue
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
-
-
-def get_release():
- (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
- return stdout.strip()
-
-
-def generate_sources_list(cfg, codename, mirrors, cloud, log):
- params = {'codename': codename}
- for k in mirrors:
- params[k] = mirrors[k]
-
- custtmpl = cfg.get('apt_custom_sources_list', None)
- if custtmpl is not None:
- templater.render_string_to_file(custtmpl,
- '/etc/apt/sources.list', params)
- return
-
- template_fn = cloud.get_template_filename('sources.list.%s' %
- (cloud.distro.name))
- if not template_fn:
- template_fn = cloud.get_template_filename('sources.list')
- if not template_fn:
- log.warn("No template found, not rendering /etc/apt/sources.list")
- return
-
- templater.render_to_file(template_fn, '/etc/apt/sources.list', params)
-
-
-def add_apt_key_raw(key):
- """
- actual adding of a key as defined in key argument
- to the system
- """
- try:
- util.subp(('apt-key', 'add', '-'), key)
- except util.ProcessExecutionError:
- raise ValueError('failed to add apt GPG Key to apt keyring')
-
-
-def add_apt_key(ent):
- """
- add key to the system as defined in ent (if any)
- supports raw keys or keyid's
- The latter will as a first step fetch the raw key from a keyserver
- """
- if 'keyid' in ent and 'key' not in ent:
- keyserver = "keyserver.ubuntu.com"
- if 'keyserver' in ent:
- keyserver = ent['keyserver']
- ent['key'] = gpg.get_key_by_id(ent['keyid'], keyserver)
-
- if 'key' in ent:
- add_apt_key_raw(ent['key'])
-
-
-def convert_to_new_format(srclist):
- """convert_to_new_format
- convert the old list based format to the new dict based one
- """
- srcdict = {}
- if isinstance(srclist, list):
- for srcent in srclist:
- if 'filename' not in srcent:
- # file collides for multiple !filename cases for compatibility
- # yet we need them all processed, so not same dictionary key
- srcent['filename'] = "cloud_config_sources.list"
- key = util.rand_dict_key(srcdict, "cloud_config_sources.list")
- else:
- # all with filename use that as key (matching new format)
- key = srcent['filename']
- srcdict[key] = srcent
- elif isinstance(srclist, dict):
- srcdict = srclist
- else:
- raise ValueError("unknown apt_sources format")
-
- return srcdict
-
-
-def add_apt_sources(srclist, template_params=None, aa_repo_match=None):
- """
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srclist'. When rendering template, also
- include the values in dictionary searchList
- """
- if template_params is None:
- template_params = {}
-
- if aa_repo_match is None:
- def _aa_repo_match(x):
- return False
- aa_repo_match = _aa_repo_match
-
- errorlist = []
- srcdict = convert_to_new_format(srclist)
-
- for filename in srcdict:
- ent = srcdict[filename]
- if 'filename' not in ent:
- ent['filename'] = filename
-
- # keys can be added without specifying a source
- try:
- add_apt_key(ent)
- except ValueError as detail:
- errorlist.append([ent, detail])
-
- if 'source' not in ent:
- errorlist.append(["", "missing source"])
- continue
- source = ent['source']
- source = templater.render_string(source, template_params)
-
- if not ent['filename'].startswith(os.path.sep):
- ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
- ent['filename'])
-
- if aa_repo_match(source):
- try:
- util.subp(["add-apt-repository", source])
- except util.ProcessExecutionError as e:
- errorlist.append([source,
- ("add-apt-repository failed. " + str(e))])
- continue
-
- try:
- contents = "%s\n" % (source)
- util.write_file(ent['filename'], contents, omode="ab")
- except Exception:
- errorlist.append([source,
- "failed write to file %s" % ent['filename']])
-
- return errorlist
-
-
-def find_apt_mirror_info(cloud, cfg):
- """find an apt_mirror given the cloud and cfg provided."""
-
- mirror = None
-
- # this is less preferred way of specifying mirror preferred would be to
- # use the distro's search or package_mirror.
- mirror = cfg.get("apt_mirror", None)
-
- search = cfg.get("apt_mirror_search", None)
- if not mirror and search:
- mirror = util.search_for_mirror(search)
-
- if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
- mydom = ""
- doms = []
-
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
-
- doms.extend((".localdomain", "",))
-
- mirror_list = []
- distro = cloud.distro.name
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % (post))
-
- mirror = util.search_for_mirror(mirror_list)
-
- mirror_info = cloud.datasource.get_package_mirror_info()
-
- # this is a bit strange.
- # if mirror is set, then one of the legacy options above set it
- # but they do not cover security. so we need to get that from
- # get_package_mirror_info
- if mirror:
- mirror_info.update({'primary': mirror})
-
- return mirror_info
-
-
-def apply_apt_config(cfg, proxy_fname, config_fname):
- # Set up any apt proxy
- cfgs = (('apt_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_http_proxy', 'Acquire::HTTP::Proxy "%s";'),
- ('apt_ftp_proxy', 'Acquire::FTP::Proxy "%s";'),
- ('apt_https_proxy', 'Acquire::HTTPS::Proxy "%s";'))
-
- proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)]
- if len(proxies):
- util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
- elif os.path.isfile(proxy_fname):
- util.del_file(proxy_fname)
-
- if cfg.get('apt_config', None):
- util.write_file(config_fname, cfg.get('apt_config'))
- elif os.path.isfile(config_fname):
- util.del_file(config_fname)
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
deleted file mode 100644
index 40c32c84..00000000
--- a/cloudinit/config/cc_apt_pipelining.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-distros = ['ubuntu', 'debian']
-
-DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-
-APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
- 'Acquire::http::Pipeline-Depth "%s";\n')
-
-# Acquire::http::Pipeline-Depth can be a value
-# from 0 to 5 indicating how many outstanding requests APT should send.
-# A value of zero MUST be specified if the remote host does not properly linger
-# on TCP connections - otherwise data corruption will occur.
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
- apt_pipe_value_s = str(apt_pipe_value).lower().strip()
-
- if apt_pipe_value_s == "false":
- write_apt_snippet("0", log, DEFAULT_FILE)
- elif apt_pipe_value_s in ("none", "unchanged", "os"):
- return
- elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
- write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
- else:
- log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
-
-
-def write_apt_snippet(setting, log, f_name):
- """Writes f_name with apt pipeline depth 'setting'."""
-
- file_contents = APT_PIPE_TPL % (setting)
- util.write_file(f_name, file_contents)
- log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
deleted file mode 100644
index b763a3c3..00000000
--- a/cloudinit/config/cc_bootcmd.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
-
- if "bootcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'bootcmd' key in configuration"), name)
- return
-
- with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
- try:
- content = util.shellify(cfg["bootcmd"])
- tmpf.write(util.encode_text(content))
- tmpf.flush()
- except Exception:
- util.logexc(log, "Failed to shellify bootcmd")
- raise
-
- try:
- env = os.environ.copy()
- iid = cloud.get_instance_id()
- if iid:
- env['INSTANCE_ID'] = str(iid)
- cmd = ['/bin/sh', tmpf.name]
- util.subp(cmd, env=env, capture=False)
- except Exception:
- util.logexc(log, "Failed to run bootcmd module %s", name)
- raise
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
deleted file mode 100644
index ef0ce7ab..00000000
--- a/cloudinit/config/cc_byobu.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- value = args[0]
- else:
- value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
-
- if not value:
- log.debug("Skipping module named %s, no 'byobu' values found", name)
- return
-
- if value == "user" or value == "system":
- value = "enable-%s" % value
-
- valid = ("enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable")
- if value not in valid:
- log.warn("Unknown value %s for byobu_by_default", value)
-
- mod_user = value.endswith("-user")
- mod_sys = value.endswith("-system")
- if value.startswith("enable"):
- bl_inst = "install"
- dc_val = "byobu byobu/launch-by-default boolean true"
- mod_sys = True
- else:
- if value == "disable":
- mod_user = True
- mod_sys = True
- bl_inst = "uninstall"
- dc_val = "byobu byobu/launch-by-default boolean false"
-
- shcmd = ""
- if mod_user:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- if not user:
- log.warn(("No default byobu user provided, "
- "can not launch %s for the default user"), bl_inst)
- else:
- shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
- shcmd += " || X=$(($X+1)); "
- if mod_sys:
- shcmd += "echo \"%s\" | debconf-set-selections" % dc_val
- shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
- shcmd += " || X=$(($X+1)); "
-
- if len(shcmd):
- cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
- log.debug("Setting byobu to %s", value)
- util.subp(cmd, capture=False)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
deleted file mode 100644
index 8248b020..00000000
--- a/cloudinit/config/cc_ca_certs.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Mike Milner <mike.milner@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-CA_CERT_PATH = "/usr/share/ca-certificates/"
-CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
-CA_CERT_CONFIG = "/etc/ca-certificates.conf"
-CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
-CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
-
-distros = ['ubuntu', 'debian']
-
-
-def update_ca_certs():
- """
- Updates the CA certificate cache on the current machine.
- """
- util.subp(["update-ca-certificates"], capture=False)
-
-
-def add_ca_certs(certs):
- """
- Adds certificates to the system. To actually apply the new certificates
- you must also call L{update_ca_certs}.
-
- @param certs: A list of certificate strings.
- """
- if certs:
- # First ensure they are strings...
- cert_file_contents = "\n".join([str(c) for c in certs])
- util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
-
- # Append cert filename to CA_CERT_CONFIG file.
- # We have to strip the content because blank lines in the file
- # causes subsequent entries to be ignored. (LP: #1077020)
- orig = util.load_file(CA_CERT_CONFIG)
- cur_cont = '\n'.join([l for l in orig.splitlines()
- if l != CA_CERT_FILENAME])
- out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
- util.write_file(CA_CERT_CONFIG, out, omode="wb")
-
-
-def remove_default_ca_certs():
- """
- Removes all default trusted CA certificates from the system. To actually
- apply the change you must also call L{update_ca_certs}.
- """
- util.delete_dir_contents(CA_CERT_PATH)
- util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
- util.write_file(CA_CERT_CONFIG, "", mode=0o644)
- debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- util.subp(('debconf-set-selections', '-'), debconf_sel)
-
-
-def handle(name, cfg, _cloud, log, _args):
- """
- Call to handle ca-cert sections in cloud-config file.
-
- @param name: The module name "ca-cert" from cloud.cfg
- @param cfg: A nested dict containing the entire cloud config contents.
- @param cloud: The L{CloudInit} object in use.
- @param log: Pre-initialized Python logger object to use for logging.
- @param args: Any module arguments from cloud.cfg
- """
- # If there isn't a ca-certs section in the configuration don't do anything
- if "ca-certs" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'ca-certs' key in configuration"), name)
- return
-
- ca_cert_cfg = cfg['ca-certs']
-
- # If there is a remove-defaults option set to true, remove the system
- # default trusted CA certs first.
- if ca_cert_cfg.get("remove-defaults", False):
- log.debug("Removing default certificates")
- remove_default_ca_certs()
-
- # If we are given any new trusted CA certs to add, add them.
- if "trusted" in ca_cert_cfg:
- trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
- if trusted_certs:
- log.debug("Adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
-
- # Update the system with the new cert configuration.
- log.debug("Updating certificates")
- update_ca_certs()
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
deleted file mode 100644
index 4c28be6a..00000000
--- a/cloudinit/config/cc_chef.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Avishai Ish-Shalom <avishai@fewbytes.com>
-# Author: Mike Moulton <mike@meltmedia.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** module that configures, starts and installs chef.
-
-**Description:** This module enables chef to be installed (from packages or
-from gems, or from omnibus). Before this occurs chef configurations are
-written to disk (validation.pem, client.pem, firstboot.json, client.rb),
-and needed chef folders/directories are created (/etc/chef and /var/log/chef
-and so-on). Then once installing proceeds correctly if configured chef will
-be started (in daemon mode or in non-daemon mode) and then once that has
-finished (if ran in non-daemon mode this will be when chef finishes
-converging, if ran in daemon mode then no further actions are possible since
-chef will have forked into its own process) then a post run function can
-run that can do finishing activities (such as removing the validation pem
-file).
-
-It can be configured with the following option structure::
-
- chef:
- directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
- /var/cache/chef, /var/backups/chef, /var/run/chef)
- validation_cert: (optional string to be written to file validation_key)
- special value 'system' means set use existing file
- validation_key: (optional the path for validation_cert. default
- /etc/chef/validation.pem)
- firstboot_path: (path to write run_list and initial_attributes keys that
- should also be present in this configuration, defaults
- to /etc/chef/firstboot.json)
- exec: boolean to run or not run chef (defaults to false, unless
- a gem installed is requested
- where this will then default
- to true)
-
- chef.rb template keys (if falsey, then will be skipped and not
- written to /etc/chef/client.rb)
-
- chef:
- client_key:
- environment:
- file_backup_path:
- file_cache_path:
- json_attribs:
- log_level:
- log_location:
- node_name:
- pid_file:
- server_url:
- show_time:
- ssl_verify_mode:
- validation_cert:
- validation_key:
- validation_name:
-"""
-
-import itertools
-import json
-import os
-
-from cloudinit import templater
-from cloudinit import url_helper
-from cloudinit import util
-
-import six
-
-RUBY_VERSION_DEFAULT = "1.8"
-
-CHEF_DIRS = tuple([
- '/etc/chef',
- '/var/log/chef',
- '/var/lib/chef',
- '/var/cache/chef',
- '/var/backups/chef',
- '/var/run/chef',
-])
-REQUIRED_CHEF_DIRS = tuple([
- '/etc/chef',
-])
-
-# Used if fetching chef from a omnibus style package
-OMNIBUS_URL = "https://www.getchef.com/chef/install.sh"
-OMNIBUS_URL_RETRIES = 5
-
-CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem'
-CHEF_FB_PATH = '/etc/chef/firstboot.json'
-CHEF_RB_TPL_DEFAULTS = {
- # These are ruby symbols...
- 'ssl_verify_mode': ':verify_none',
- 'log_level': ':info',
- # These are not symbols...
- 'log_location': '/var/log/chef/client.log',
- 'validation_key': CHEF_VALIDATION_PEM_PATH,
- 'validation_cert': None,
- 'client_key': "/etc/chef/client.pem",
- 'json_attribs': CHEF_FB_PATH,
- 'file_cache_path': "/var/cache/chef",
- 'file_backup_path': "/var/backups/chef",
- 'pid_file': "/var/run/chef/client.pid",
- 'show_time': True,
-}
-CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time'])
-CHEF_RB_TPL_PATH_KEYS = frozenset([
- 'log_location',
- 'validation_key',
- 'client_key',
- 'file_cache_path',
- 'json_attribs',
- 'file_cache_path',
- 'pid_file',
-])
-CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys())
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS)
-CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS)
-CHEF_RB_TPL_KEYS.extend([
- 'server_url',
- 'node_name',
- 'environment',
- 'validation_name',
-])
-CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS)
-CHEF_RB_PATH = '/etc/chef/client.rb'
-CHEF_EXEC_PATH = '/usr/bin/chef-client'
-CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20'])
-
-
-def is_installed():
- if not os.path.isfile(CHEF_EXEC_PATH):
- return False
- if not os.access(CHEF_EXEC_PATH, os.X_OK):
- return False
- return True
-
-
-def post_run_chef(chef_cfg, log):
- delete_pem = util.get_cfg_option_bool(chef_cfg,
- 'delete_validation_post_exec',
- default=False)
- if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH):
- os.unlink(CHEF_VALIDATION_PEM_PATH)
-
-
-def get_template_params(iid, chef_cfg, log):
- params = CHEF_RB_TPL_DEFAULTS.copy()
- # Allow users to overwrite any of the keys they want (if they so choose),
- # when a value is None, then the value will be set to None and no boolean
- # or string version will be populated...
- for (k, v) in chef_cfg.items():
- if k not in CHEF_RB_TPL_KEYS:
- log.debug("Skipping unknown chef template key '%s'", k)
- continue
- if v is None:
- params[k] = None
- else:
- # This will make the value a boolean or string...
- if k in CHEF_RB_TPL_BOOL_KEYS:
- params[k] = util.get_cfg_option_bool(chef_cfg, k)
- else:
- params[k] = util.get_cfg_option_str(chef_cfg, k)
- # These ones are overwritten to be exact values...
- params.update({
- 'generated_by': util.make_header(),
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- default=iid),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- default='_default'),
- # These two are mandatory...
- 'server_url': chef_cfg['server_url'],
- 'validation_name': chef_cfg['validation_name'],
- })
- return params
-
-
-def handle(name, cfg, cloud, log, _args):
- """Handler method activated by cloud-init."""
-
- # If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'chef' key in configuration"), name)
- return
- chef_cfg = cfg['chef']
-
- # Ensure the chef directories we use exist
- chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories')
- if not chef_dirs:
- chef_dirs = list(CHEF_DIRS)
- for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS):
- util.ensure_dir(d)
-
- vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH)
- vcert = chef_cfg.get('validation_cert')
- # special value 'system' means do not overwrite the file
- # but still render the template to contain 'validation_key'
- if vcert:
- if vcert != "system":
- util.write_file(vkey_path, vcert)
- elif not os.path.isfile(vkey_path):
- log.warn("chef validation_cert provided as 'system', but "
- "validation_key path '%s' does not exist.",
- vkey_path)
-
- # Create the chef config from template
- template_fn = cloud.get_template_filename('chef_client.rb')
- if template_fn:
- iid = str(cloud.datasource.get_instance_id())
- params = get_template_params(iid, chef_cfg, log)
- # Do a best effort attempt to ensure that the template values that
- # are associated with paths have there parent directory created
- # before they are used by the chef-client itself.
- param_paths = set()
- for (k, v) in params.items():
- if k in CHEF_RB_TPL_PATH_KEYS and v:
- param_paths.add(os.path.dirname(v))
- util.ensure_dirs(param_paths)
- templater.render_to_file(template_fn, CHEF_RB_PATH, params)
- else:
- log.warn("No template found, not rendering to %s",
- CHEF_RB_PATH)
-
- # Set the firstboot json
- fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path',
- default=CHEF_FB_PATH)
- if not fb_filename:
- log.info("First boot path empty, not writing first boot json file")
- else:
- initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
- for k in list(initial_attributes.keys()):
- initial_json[k] = initial_attributes[k]
- util.write_file(fb_filename, json.dumps(initial_json))
-
- # Try to install chef, if its not already installed...
- force_install = util.get_cfg_option_bool(chef_cfg,
- 'force_install', default=False)
- if not is_installed() or force_install:
- run = install_chef(cloud, chef_cfg, log)
- elif is_installed():
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
- else:
- run = False
- if run:
- run_chef(chef_cfg, log)
- post_run_chef(chef_cfg, log)
-
-
-def run_chef(chef_cfg, log):
- log.debug('Running chef-client')
- cmd = [CHEF_EXEC_PATH]
- if 'exec_arguments' in chef_cfg:
- cmd_args = chef_cfg['exec_arguments']
- if isinstance(cmd_args, (list, tuple)):
- cmd.extend(cmd_args)
- elif isinstance(cmd_args, six.string_types):
- cmd.append(cmd_args)
- else:
- log.warn("Unknown type %s provided for chef"
- " 'exec_arguments' expected list, tuple,"
- " or string", type(cmd_args))
- cmd.extend(CHEF_EXEC_DEF_ARGS)
- else:
- cmd.extend(CHEF_EXEC_DEF_ARGS)
- util.subp(cmd, capture=False)
-
-
-def install_chef(cloud, chef_cfg, log):
- # If chef is not installed, we install chef based on 'install_type'
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False)
- if install_type == "gems":
- # This will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- RUBY_VERSION_DEFAULT)
- install_chef_from_gems(ruby_version, chef_version, cloud.distro)
- # Retain backwards compat, by preferring True instead of False
- # when not provided/overriden...
- run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True)
- elif install_type == 'packages':
- # This will install and run the chef-client from packages
- cloud.distro.install_packages(('chef',))
- elif install_type == 'omnibus':
- # This will install as a omnibus unified package
- url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
- retries = max(0, util.get_cfg_option_int(chef_cfg,
- "omnibus_url_retries",
- default=OMNIBUS_URL_RETRIES))
- content = url_helper.readurl(url=url, retries=retries)
- with util.tempdir() as tmpd:
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, content, mode=0o700)
- util.subp([tmpf], capture=False)
- else:
- log.warn("Unknown chef install type '%s'", install_type)
- run = False
- return run
-
-
-def get_ruby_packages(version):
- # return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
- if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
- return pkgs
-
-
-def install_chef_from_gems(ruby_version, chef_version, distro):
- distro.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
- if chef_version:
- util.subp(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
- else:
- util.subp(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
deleted file mode 100644
index bdc32fe6..00000000
--- a/cloudinit/config/cc_debug.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Yahoo! Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** helper to debug cloud-init *internal* datastructures.
-
-**Description:** This module will enable for outputting various internal
-information that cloud-init sources provide to either a file or to the output
-console/log location that this cloud-init has been configured with when
-running.
-
-It can be configured with the following option structure::
-
- debug:
- verbose: (defaulting to true)
- output: (location to write output, defaulting to console + log)
-
-.. note::
-
- Log configurations are not output.
-"""
-
-import copy
-
-from six import StringIO
-
-from cloudinit import type_utils
-from cloudinit import util
-
-SKIP_KEYS = frozenset(['log_cfgs'])
-
-
-def _make_header(text):
- header = StringIO()
- header.write("-" * 80)
- header.write("\n")
- header.write(text.center(80, ' '))
- header.write("\n")
- header.write("-" * 80)
- header.write("\n")
- return header.getvalue()
-
-
-def _dumps(obj):
- text = util.yaml_dumps(obj, explicit_start=False, explicit_end=False)
- return text.rstrip()
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True)
- if args:
- # if args are provided (from cmdline) then explicitly set verbose
- out_file = args[0]
- verbose = True
- else:
- out_file = util.get_cfg_by_path(cfg, ('debug', 'output'))
-
- if not verbose:
- log.debug(("Skipping module named %s,"
- " verbose printing disabled"), name)
- return
- # Clean out some keys that we just don't care about showing...
- dump_cfg = copy.deepcopy(cfg)
- for k in SKIP_KEYS:
- dump_cfg.pop(k, None)
- all_keys = list(dump_cfg)
- for k in all_keys:
- if k.startswith("_"):
- dump_cfg.pop(k, None)
- # Now dump it...
- to_print = StringIO()
- to_print.write(_make_header("Config"))
- to_print.write(_dumps(dump_cfg))
- to_print.write("\n")
- to_print.write(_make_header("MetaData"))
- to_print.write(_dumps(cloud.datasource.metadata))
- to_print.write("\n")
- to_print.write(_make_header("Misc"))
- to_print.write("Datasource: %s\n" %
- (type_utils.obj_name(cloud.datasource)))
- to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro)))
- to_print.write("Hostname: %s\n" % (cloud.get_hostname(True)))
- to_print.write("Instance ID: %s\n" % (cloud.get_instance_id()))
- to_print.write("Locale: %s\n" % (cloud.get_locale()))
- to_print.write("Launch IDX: %s\n" % (cloud.launch_index))
- contents = to_print.getvalue()
- content_to_file = []
- for line in contents.splitlines():
- line = "ci-info: %s\n" % (line)
- content_to_file.append(line)
- if out_file:
- util.write_file(out_file, "".join(content_to_file), 0o644, "w")
- else:
- util.multi_log("".join(content_to_file), console=True, stderr=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
deleted file mode 100644
index 3fd2c20f..00000000
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
-
-
-def handle(name, cfg, _cloud, log, _args):
- disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
- if disabled:
- util.subp(REJECT_CMD, capture=False)
- else:
- log.debug(("Skipping module named %s,"
- " disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
deleted file mode 100644
index b642f1f8..00000000
--- a/cloudinit/config/cc_disk_setup.py
+++ /dev/null
@@ -1,863 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-import logging
-import os
-import shlex
-
-frequency = PER_INSTANCE
-
-# Define the commands to use
-UDEVADM_CMD = util.which('udevadm')
-SFDISK_CMD = util.which("sfdisk")
-SGDISK_CMD = util.which("sgdisk")
-LSBLK_CMD = util.which("lsblk")
-BLKID_CMD = util.which("blkid")
-BLKDEV_CMD = util.which("blockdev")
-WIPEFS_CMD = util.which("wipefs")
-
-LOG = logging.getLogger(__name__)
-
-
-def handle(_name, cfg, cloud, log, _args):
- """
- See doc/examples/cloud-config_disk-setup.txt for documentation on the
- format.
- """
- disk_setup = cfg.get("disk_setup")
- if isinstance(disk_setup, dict):
- update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
- log.debug("Partitioning disks: %s", str(disk_setup))
- for disk, definition in disk_setup.items():
- if not isinstance(definition, dict):
- log.warn("Invalid disk definition for %s" % disk)
- continue
-
- try:
- log.debug("Creating new partition table/disk")
- util.log_time(logfunc=LOG.debug,
- msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, definition))
- except Exception as e:
- util.logexc(LOG, "Failed partitioning operation\n%s" % e)
-
- fs_setup = cfg.get("fs_setup")
- if isinstance(fs_setup, list):
- log.debug("setting up filesystems: %s", str(fs_setup))
- update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
- for definition in fs_setup:
- if not isinstance(definition, dict):
- log.warn("Invalid file system definition: %s" % definition)
- continue
-
- try:
- log.debug("Creating new filesystem.")
- device = definition.get('device')
- util.log_time(logfunc=LOG.debug,
- msg="Creating fs for %s" % device,
- func=mkfs, args=(definition,))
- except Exception as e:
- util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
-
-
-def update_disk_setup_devices(disk_setup, tformer):
- # update 'disk_setup' dictionary anywhere were a device may occur
- # update it with the response from 'tformer'
- for origname in disk_setup.keys():
- transformed = tformer(origname)
- if transformed is None or transformed == origname:
- continue
- if transformed in disk_setup:
- LOG.info("Replacing %s in disk_setup for translation of %s",
- origname, transformed)
- del disk_setup[transformed]
-
- disk_setup[transformed] = disk_setup[origname]
- disk_setup[transformed]['_origname'] = origname
- del disk_setup[origname]
- LOG.debug("updated disk_setup device entry '%s' to '%s'",
- origname, transformed)
-
-
-def update_fs_setup_devices(disk_setup, tformer):
- # update 'fs_setup' dictionary anywhere were a device may occur
- # update it with the response from 'tformer'
- for definition in disk_setup:
- if not isinstance(definition, dict):
- LOG.warn("entry in disk_setup not a dict: %s", definition)
- continue
-
- origname = definition.get('device')
-
- if origname is None:
- continue
-
- (dev, part) = util.expand_dotted_devname(origname)
-
- tformed = tformer(dev)
- if tformed is not None:
- dev = tformed
- LOG.debug("%s is mapped to disk=%s part=%s",
- origname, tformed, part)
- definition['_origname'] = origname
- definition['device'] = tformed
-
- if part and 'partition' in definition:
- definition['_partition'] = definition['partition']
- definition['partition'] = part
-
-
-def value_splitter(values, start=None):
- """
- Returns the key/value pairs of output sent as string
- like: FOO='BAR' HOME='127.0.0.1'
- """
- _values = shlex.split(values)
- if start:
- _values = _values[start:]
-
- for key, value in [x.split('=') for x in _values]:
- yield key, value
-
-
-def enumerate_disk(device, nodeps=False):
- """
- Enumerate the elements of a child device.
-
- Parameters:
- device: the kernel device name
- nodeps <BOOL>: don't enumerate children devices
-
- Return a dict describing the disk:
- type: the entry type, i.e disk or part
- fstype: the filesystem type, if it exists
- label: file system label, if it exists
- name: the device name, i.e. sda
- """
-
- lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
- device]
-
- if nodeps:
- lsblk_cmd.append('--nodeps')
-
- info = None
- try:
- info, _err = util.subp(lsblk_cmd)
- except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
-
- parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
-
- for part in parts:
- d = {
- 'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
- }
-
- for key, value in value_splitter(part):
- d[key.lower()] = value
-
- yield d
-
-
-def device_type(device):
- """
- Return the device type of the device by calling lsblk.
- """
-
- for d in enumerate_disk(device, nodeps=True):
- if "type" in d:
- return d["type"].lower()
- return None
-
-
-def is_device_valid(name, partition=False):
- """
- Check if the device is a valid device.
- """
- d_type = ""
- try:
- d_type = device_type(name)
- except Exception:
- LOG.warn("Query against device %s failed" % name)
- return False
-
- if partition and d_type == 'part':
- return True
- elif not partition and d_type == 'disk':
- return True
- return False
-
-
-def check_fs(device):
- """
- Check if the device has a filesystem on it
-
- Output of blkid is generally something like:
- /dev/sda: LABEL="Backup500G" UUID="..." TYPE="ext4"
-
- Return values are device, label, type, uuid
- """
- out, label, fs_type, uuid = None, None, None, None
-
- blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device]
- try:
- out, _err = util.subp(blkid_cmd, rcs=[0, 2])
- except Exception as e:
- raise Exception("Failed during disk check for %s\n%s" % (device, e))
-
- if out:
- if len(out.splitlines()) == 1:
- for key, value in value_splitter(out, start=1):
- if key.lower() == 'label':
- label = value
- elif key.lower() == 'type':
- fs_type = value
- elif key.lower() == 'uuid':
- uuid = value
-
- return label, fs_type, uuid
-
-
-def is_filesystem(device):
- """
- Returns true if the device has a file system.
- """
- _, fs_type, _ = check_fs(device)
- return fs_type
-
-
-def find_device_node(device, fs_type=None, label=None, valid_targets=None,
- label_match=True, replace_fs=None):
- """
- Find a device that is either matches the spec, or the first
-
- The return is value is (<device>, <bool>) where the device is the
- device to use and the bool is whether the device matches the
- fs_type and label.
-
- Note: This works with GPT partition tables!
- """
- # label of None is same as no label
- if label is None:
- label = ""
-
- if not valid_targets:
- valid_targets = ['disk', 'part']
-
- raw_device_used = False
- for d in enumerate_disk(device):
-
- if d['fstype'] == replace_fs and label_match is False:
- # We found a device where we want to replace the FS
- return ('/dev/%s' % d['name'], False)
-
- if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
- # If we find a matching device, we return that
- return ('/dev/%s' % d['name'], True)
-
- if d['type'] in valid_targets:
-
- if d['type'] != 'disk' or d['fstype']:
- raw_device_used = True
-
- if d['type'] == 'disk':
- # Skip the raw disk, its the default
- pass
-
- elif not d['fstype']:
- return ('/dev/%s' % d['name'], False)
-
- if not raw_device_used:
- return (device, False)
-
- LOG.warn("Failed to find device during available device search.")
- return (None, False)
-
-
-def is_disk_used(device):
- """
- Check if the device is currently used. Returns true if the device
- has either a file system or a partition entry
- is no filesystem found on the disk.
- """
-
- # If the child count is higher 1, then there are child nodes
- # such as partition or device mapper nodes
- if len(list(enumerate_disk(device))) > 1:
- return True
-
- # If we see a file system, then its used
- _, check_fstype, _ = check_fs(device)
- if check_fstype:
- return True
-
- return False
-
-
-def get_dyn_func(*args):
- """
- Call the appropriate function.
-
- The first value is the template for function name
- The second value is the template replacement
- The remain values are passed to the function
-
- For example: get_dyn_func("foo_%s", 'bar', 1, 2, 3,)
- would call "foo_bar" with args of 1, 2, 3
- """
- if len(args) < 2:
- raise Exception("Unable to determine dynamic funcation name")
-
- func_name = (args[0] % args[1])
- func_args = args[2:]
-
- try:
- if func_args:
- return globals()[func_name](*func_args)
- else:
- return globals()[func_name]
-
- except KeyError:
- raise Exception("No such function %s to call!" % func_name)
-
-
-def get_mbr_hdd_size(device):
- size_cmd = [SFDISK_CMD, '--show-size', device]
- size = None
- try:
- size, _err = util.subp(size_cmd)
- except Exception as e:
- raise Exception("Failed to get %s size\n%s" % (device, e))
-
- return int(size.strip())
-
-
-def get_gpt_hdd_size(device):
- out, _ = util.subp([SGDISK_CMD, '-p', device])
- return out.splitlines()[0].split()[2]
-
-
-def get_hdd_size(table_type, device):
- """
- Returns the hard disk size.
- This works with any disk type, including GPT.
- """
- return get_dyn_func("get_%s_hdd_size", table_type, device)
-
-
-def check_partition_mbr_layout(device, layout):
- """
- Returns true if the partition layout matches the one on the disk
-
- Layout should be a list of values. At this time, this only
- verifies that the number of partitions and their labels is correct.
- """
-
- read_parttbl(device)
- prt_cmd = [SFDISK_CMD, "-l", device]
- try:
- out, _err = util.subp(prt_cmd, data="%s\n" % layout)
- except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
-
- found_layout = []
- for line in out.splitlines():
- _line = line.split()
- if len(_line) == 0:
- continue
-
- if device in _line[0]:
- # We don't understand extended partitions yet
- if _line[-1].lower() in ['extended', 'empty']:
- continue
-
- # Find the partition types
- type_label = None
- for x in sorted(range(1, len(_line)), reverse=True):
- if _line[x].isdigit() and _line[x] != '/':
- type_label = _line[x]
- break
-
- found_layout.append(type_label)
- return found_layout
-
-
-def check_partition_gpt_layout(device, layout):
- prt_cmd = [SGDISK_CMD, '-p', device]
- try:
- out, _err = util.subp(prt_cmd)
- except Exception as e:
- raise Exception("Error running partition command on %s\n%s" % (
- device, e))
-
- out_lines = iter(out.splitlines())
- # Skip header
- for line in out_lines:
- if line.strip().startswith('Number'):
- break
-
- return [line.strip().split()[-1] for line in out_lines]
-
-
-def check_partition_layout(table_type, device, layout):
- """
- See if the partition lay out matches.
-
- This is future a future proofing function. In order
- to add support for other disk layout schemes, add a
- function called check_partition_%s_layout
- """
- found_layout = get_dyn_func(
- "check_partition_%s_layout", table_type, device, layout)
-
- if isinstance(layout, bool):
- # if we are using auto partitioning, or "True" be happy
- # if a single partition exists.
- if layout and len(found_layout) >= 1:
- return True
- return False
-
- else:
- if len(found_layout) != len(layout):
- return False
- else:
- # This just makes sure that the number of requested
- # partitions and the type labels are right
- for x in range(1, len(layout) + 1):
- if isinstance(layout[x - 1], tuple):
- _, part_type = layout[x]
- if int(found_layout[x]) != int(part_type):
- return False
- return True
-
- return False
-
-
-def get_partition_mbr_layout(size, layout):
- """
- Calculate the layout of the partition table. Partition sizes
- are defined as percentage values or a tuple of percentage and
- partition type.
-
- For example:
- [ 33, [66: 82] ]
-
- Defines the first partition to be a size of 1/3 the disk,
- while the remaining 2/3's will be of type Linux Swap.
- """
-
- if not isinstance(layout, list) and isinstance(layout, bool):
- # Create a single partition
- return "0,"
-
- if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
- raise Exception("Partition layout is invalid")
-
- last_part_num = len(layout)
- if last_part_num > 4:
- raise Exception("Only simply partitioning is allowed.")
-
- part_definition = []
- part_num = 0
- for part in layout:
- part_type = 83 # Default to Linux
- percent = part
- part_num += 1
-
- if isinstance(part, list):
- if len(part) != 2:
- raise Exception("Partition was incorrectly defined: %s" % part)
- percent, part_type = part
-
- part_size = int((float(size) * (float(percent) / 100)) / 1024)
-
- if part_num == last_part_num:
- part_definition.append(",,%s" % part_type)
- else:
- part_definition.append(",%s,%s" % (part_size, part_type))
-
- sfdisk_definition = "\n".join(part_definition)
- if len(part_definition) > 4:
- raise Exception("Calculated partition definition is too big\n%s" %
- sfdisk_definition)
-
- return sfdisk_definition
-
-
-def get_partition_gpt_layout(size, layout):
- if isinstance(layout, bool):
- return [(None, [0, 0])]
-
- partition_specs = []
- for partition in layout:
- if isinstance(partition, list):
- if len(partition) != 2:
- raise Exception(
- "Partition was incorrectly defined: %s" % partition)
- percent, partition_type = partition
- else:
- percent = partition
- partition_type = None
-
- part_size = int(float(size) * (float(percent) / 100))
- partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
-
- # The last partition should use up all remaining space
- partition_specs[-1][-1][-1] = 0
- return partition_specs
-
-
-def purge_disk_ptable(device):
- # wipe the first and last megabyte of a disk (or file)
- # gpt stores partition table both at front and at end.
- null = '\0'
- start_len = 1024 * 1024
- end_len = 1024 * 1024
- with open(device, "rb+") as fp:
- fp.write(null * (start_len))
- fp.seek(-end_len, os.SEEK_END)
- fp.write(null * end_len)
- fp.flush()
-
- read_parttbl(device)
-
-
-def purge_disk(device):
- """
- Remove parition table entries
- """
-
- # wipe any file systems first
- for d in enumerate_disk(device):
- if d['type'] not in ["disk", "crypt"]:
- wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']]
- try:
- LOG.info("Purging filesystem on /dev/%s" % d['name'])
- util.subp(wipefs_cmd)
- except Exception:
- raise Exception("Failed FS purge of /dev/%s" % d['name'])
-
- purge_disk_ptable(device)
-
-
-def get_partition_layout(table_type, size, layout):
- """
- Call the appropriate function for creating the table
- definition. Returns the table definition
-
- This is a future proofing function. To add support for
- other layouts, simply add a "get_partition_%s_layout"
- function.
- """
- return get_dyn_func("get_partition_%s_layout", table_type, size, layout)
-
-
-def read_parttbl(device):
- """
- Use partprobe instead of 'udevadm'. Partprobe is the only
- reliable way to probe the partition table.
- """
- blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
- udev_cmd = [UDEVADM_CMD, 'settle']
- try:
- util.subp(udev_cmd)
- util.subp(blkdev_cmd)
- util.subp(udev_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed reading the partition table %s" % e)
-
-
-def exec_mkpart_mbr(device, layout):
- """
- Break out of mbr partition to allow for future partition
- types, i.e. gpt
- """
- # Create the partitions
- prt_cmd = [SFDISK_CMD, "--Linux", "-uM", device]
- try:
- util.subp(prt_cmd, data="%s\n" % layout)
- except Exception as e:
- raise Exception("Failed to partition device %s\n%s" % (device, e))
-
- read_parttbl(device)
-
-
-def exec_mkpart_gpt(device, layout):
- try:
- util.subp([SGDISK_CMD, '-Z', device])
- for index, (partition_type, (start, end)) in enumerate(layout):
- index += 1
- util.subp([SGDISK_CMD,
- '-n', '{}:{}:{}'.format(index, start, end), device])
- if partition_type is not None:
- util.subp(
- [SGDISK_CMD,
- '-t', '{}:{}'.format(index, partition_type), device])
- except Exception:
- LOG.warn("Failed to partition device %s" % device)
- raise
-
-
-def exec_mkpart(table_type, device, layout):
- """
- Fetches the function for creating the table type.
- This allows to dynamically find which function to call.
-
- Paramaters:
- table_type: type of partition table to use
- device: the device to work on
- layout: layout definition specific to partition table
- """
- return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-
-
-def mkpart(device, definition):
- """
- Creates the partition table.
-
- Parameters:
- definition: dictionary describing how to create the partition.
-
- The following are supported values in the dict:
- overwrite: Should the partition table be created regardless
- of any pre-exisiting data?
- layout: the layout of the partition table
- table_type: Which partition table to use, defaults to MBR
- device: the device to work on.
- """
- # ensure that we get a real device rather than a symbolic link
- device = os.path.realpath(device)
-
- LOG.debug("Checking values for %s definition" % device)
- overwrite = definition.get('overwrite', False)
- layout = definition.get('layout', False)
- table_type = definition.get('table_type', 'mbr')
-
- # Check if the default device is a partition or not
- LOG.debug("Checking against default devices")
-
- if (isinstance(layout, bool) and not layout) or not layout:
- LOG.debug("Device is not to be partitioned, skipping")
- return # Device is not to be partitioned
-
- # This prevents you from overwriting the device
- LOG.debug("Checking if device %s is a valid device", device)
- if not is_device_valid(device):
- raise Exception("Device %s is not a disk device!", device)
-
- # Remove the partition table entries
- if isinstance(layout, str) and layout.lower() == "remove":
- LOG.debug("Instructed to remove partition table entries")
- purge_disk(device)
- return
-
- LOG.debug("Checking if device layout matches")
- if check_partition_layout(table_type, device, layout):
- LOG.debug("Device partitioning layout matches")
- return True
-
- LOG.debug("Checking if device is safe to partition")
- if not overwrite and (is_disk_used(device) or is_filesystem(device)):
- LOG.debug("Skipping partitioning on configured device %s" % device)
- return
-
- LOG.debug("Checking for device size")
- device_size = get_hdd_size(table_type, device)
-
- LOG.debug("Calculating partition layout")
- part_definition = get_partition_layout(table_type, device_size, layout)
- LOG.debug(" Layout is: %s" % part_definition)
-
- LOG.debug("Creating partition table on %s", device)
- exec_mkpart(table_type, device, part_definition)
-
- LOG.debug("Partition table created for %s", device)
-
-
-def lookup_force_flag(fs):
- """
- A force flag might be -F or -F, this look it up
- """
- flags = {
- 'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- }
-
- if 'ext' in fs.lower():
- fs = 'ext'
-
- if fs.lower() in flags:
- return flags[fs]
-
- LOG.warn("Force flag for %s is unknown." % fs)
- return ''
-
-
-def mkfs(fs_cfg):
- """
- Create a file system on the device.
-
- label: defines the label to use on the device
- fs_cfg: defines how the filesystem is to look
- The following values are required generally:
- device: which device or cloud defined default_device
- filesystem: which file system type
- overwrite: indiscriminately create the file system
- partition: when device does not define a partition,
- setting this to a number will mean
- device + partition. When set to 'auto', the
- first free device or the first device which
- matches both label and type will be used.
-
- 'any' means the first filesystem that matches
- on the device.
-
- When 'cmd' is provided then no other parameter is required.
- """
- label = fs_cfg.get('label')
- device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition', 'any'))
- fs_type = fs_cfg.get('filesystem')
- fs_cmd = fs_cfg.get('cmd', [])
- fs_opts = fs_cfg.get('extra_opts', [])
- fs_replace = fs_cfg.get('replace_fs', False)
- overwrite = fs_cfg.get('overwrite', False)
-
- # ensure that we get a real device rather than a symbolic link
- device = os.path.realpath(device)
-
- # This allows you to define the default ephemeral or swap
- LOG.debug("Checking %s against default devices", device)
-
- if not partition or partition.isdigit():
- # Handle manual definition of partition
- if partition.isdigit():
- device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s",
- partition, device)
-
- # Check to see if the fs already exists
- LOG.debug("Checking device %s", device)
- check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
-
- if check_label == label and check_fstype == fs_type:
- LOG.debug("Existing file system found at %s", device)
-
- if not overwrite:
- LOG.debug("Device %s has required file system", device)
- return
- else:
- LOG.warn("Destroying filesystem on %s", device)
-
- else:
- LOG.debug("Device %s is cleared for formating", device)
-
- elif partition and str(partition).lower() in ('auto', 'any'):
- # For auto devices, we match if the filesystem does exist
- odevice = device
- LOG.debug("Identifying device to create %s filesytem on", label)
-
- # any mean pick the first match on the device with matching fs_type
- label_match = True
- if partition.lower() == 'any':
- label_match = False
-
- device, reuse = find_device_node(device, fs_type=fs_type, label=label,
- label_match=label_match,
- replace_fs=fs_replace)
- LOG.debug("Automatic device for %s identified as %s", odevice, device)
-
- if reuse:
- LOG.debug("Found filesystem match, skipping formating.")
- return
-
- if not reuse and fs_replace and device:
- LOG.debug("Replacing file system on %s as instructed." % device)
-
- if not device:
- LOG.debug("No device aviable that matches request. "
- "Skipping fs creation for %s", fs_cfg)
- return
- elif not partition or str(partition).lower() == 'none':
- LOG.debug("Using the raw device to place filesystem %s on" % label)
-
- else:
- LOG.debug("Error in device identification handling.")
- return
-
- LOG.debug("File system %s will be created on %s", label, device)
-
- # Make sure the device is defined
- if not device:
- LOG.warn("Device is not known: %s", device)
- return
-
- # Check that we can create the FS
- if not (fs_type or fs_cmd):
- raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
- "must be set.", label)
-
- # Create the commands
- if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {
- 'label': label,
- 'filesystem': fs_type,
- 'device': device,
- }
- else:
- # Find the mkfs command
- mkfs_cmd = util.which("mkfs.%s" % fs_type)
- if not mkfs_cmd:
- mkfs_cmd = util.which("mk%s" % fs_type)
-
- if not mkfs_cmd:
- LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
- fs_type)
- return
-
- fs_cmd = [mkfs_cmd, device]
-
- if label:
- fs_cmd.extend(["-L", label])
-
- # File systems that support the -F flag
- if overwrite or device_type(device) == "disk":
- fs_cmd.append(lookup_force_flag(fs_type))
-
- # Add the extends FS options
- if fs_opts:
- fs_cmd.extend(fs_opts)
-
- LOG.debug("Creating file system %s on %s", label, device)
- LOG.debug(" Using cmd: %s", " ".join(fs_cmd))
- try:
- util.subp(fs_cmd)
- except Exception as e:
- raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e))
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
deleted file mode 100644
index 98828b9e..00000000
--- a/cloudinit/config/cc_emit_upstart.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-distros = ['ubuntu', 'debian']
-LOG = logging.getLogger(__name__)
-
-
-def is_upstart_system():
- if not os.path.isfile("/sbin/initctl"):
- LOG.debug("no /sbin/initctl located")
- return False
-
- myenv = os.environ.copy()
- if 'UPSTART_SESSION' in myenv:
- del myenv['UPSTART_SESSION']
- check_cmd = ['initctl', 'version']
- try:
- (out, err) = util.subp(check_cmd, env=myenv)
- return 'upstart' in out
- except util.ProcessExecutionError as e:
- LOG.debug("'%s' returned '%s', not using upstart",
- ' '.join(check_cmd), e.exit_code)
- return False
-
-
-def handle(name, _cfg, cloud, log, args):
- event_names = args
- if not event_names:
- # Default to the 'cloud-config'
- # event for backwards compat.
- event_names = ['cloud-config']
-
- if not is_upstart_system():
- log.debug("not upstart system, '%s' disabled", name)
- return
-
- cfgpath = cloud.paths.get_ipath_cur("cloud_config")
- for n in event_names:
- cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
- try:
- util.subp(cmd)
- except Exception as e:
- # TODO(harlowja), use log exception from utils??
- log.warn("Emission of upstart event %s failed due to: %s", n, e)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
deleted file mode 100644
index 545fee22..00000000
--- a/cloudinit/config/cc_fan.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-fan module allows configuration of Ubuntu Fan
- https://wiki.ubuntu.com/FanNetworking
-
-Example config:
- #cloud-config
- fan:
- config: |
- # fan 240
- 10.0.0.0/8 eth0/16 dhcp
- 10.0.0.0/8 eth1/16 dhcp off
- # fan 241
- 241.0.0.0/8 eth0/16 dhcp
- config_path: /etc/network/fan
-
-If cloud-init sees a 'fan' entry in cloud-config it will
- a.) write 'config_path' with the contents
- b.) install the package 'ubuntu-fan' if it is not installed
- c.) ensure the service is started (or restarted if was previously running)
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-BUILTIN_CFG = {
- 'config': None,
- 'config_path': '/etc/network/fan',
-}
-
-
-def stop_update_start(service, config_file, content, systemd=False):
- if systemd:
- cmds = {'stop': ['systemctl', 'stop', service],
- 'start': ['systemctl', 'start', service],
- 'enable': ['systemctl', 'enable', service]}
- else:
- cmds = {'stop': ['service', 'stop'],
- 'start': ['service', 'start']}
-
- def run(cmd, msg):
- try:
- return util.subp(cmd, capture=True)
- except util.ProcessExecutionError as e:
- LOG.warn("failed: %s (%s): %s", service, cmd, e)
- return False
-
- stop_failed = not run(cmds['stop'], msg='stop %s' % service)
- if not content.endswith('\n'):
- content += '\n'
- util.write_file(config_file, content, omode="w")
-
- ret = run(cmds['start'], msg='start %s' % service)
- if ret and stop_failed:
- LOG.warn("success: %s started", service)
-
- if 'enable' in cmds:
- ret = run(cmds['enable'], msg='enable %s' % service)
-
- return ret
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('fan')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- if not mycfg.get('config'):
- LOG.debug("%s: no 'fan' config entry. disabling", name)
- return
-
- util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
- distro = cloud.distro
- if not util.which('fanctl'):
- distro.install_packages(['ubuntu-fan'])
-
- stop_update_start(
- service='ubuntu-fan', config_file=mycfg.get('config_path'),
- content=mycfg.get('config'), systemd=distro.uses_systemd())
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
deleted file mode 100644
index c9021eb1..00000000
--- a/cloudinit/config/cc_final_message.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-from cloudinit import version
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-# Jinja formated default message
-FINAL_MESSAGE_DEF = (
- "## template: jinja\n"
- "Cloud-init v. {{version}} finished at {{timestamp}}."
- " Datasource {{datasource}}. Up {{uptime}} seconds"
-)
-
-
-def handle(_name, cfg, cloud, log, args):
-
- msg_in = ''
- if len(args) != 0:
- msg_in = str(args[0])
- else:
- msg_in = util.get_cfg_option_str(cfg, "final_message", "")
-
- msg_in = msg_in.strip()
- if not msg_in:
- msg_in = FINAL_MESSAGE_DEF
-
- uptime = util.uptime()
- ts = util.time_rfc2822()
- cver = version.version_string()
- try:
- subs = {
- 'uptime': uptime,
- 'timestamp': ts,
- 'version': cver,
- 'datasource': str(cloud.datasource),
- }
- subs.update(dict([(k.upper(), v) for k, v in subs.items()]))
- util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
- console=False, stderr=True, log=log)
- except Exception:
- util.logexc(log, "Failed to render final message template")
-
- boot_fin_fn = cloud.paths.boot_finished
- try:
- contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
- util.write_file(boot_fin_fn, contents)
- except Exception:
- util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
-
- if cloud.datasource.is_disconnected:
- log.warn("Used fallback datasource")
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
deleted file mode 100644
index 95aab4dd..00000000
--- a/cloudinit/config/cc_foo.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-
-# Modules are expected to have the following attributes.
-# 1. A required 'handle' method which takes the following params.
-# a) The name will not be this files name, but instead
-# the name specified in configuration (which is the name
-# which will be used to find this module).
-# b) A configuration object that is the result of the merging
-# of cloud configs configuration with legacy configuration
-# as well as any datasource provided configuration
-# c) A cloud object that can be used to access various
-# datasource and paths for the given distro and data provided
-# by the various datasource instance types.
-# d) A argument list that may or may not be empty to this module.
-# Typically those are from module configuration where the module
-# is defined with some extra configuration that will eventually
-# be translated from yaml into arguments to this module.
-# 2. A optional 'frequency' that defines how often this module should be ran.
-# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
-# provided PER_INSTANCE will be assumed.
-# See settings.py for these constants.
-# 3. A optional 'distros' array/set/tuple that defines the known distros
-# this module will work with (if not all of them). This is used to write
-# a warning out if a module is being ran on a untested distribution for
-# informational purposes. If non existent all distros are assumed and
-# no warning occurs.
-
-frequency = PER_INSTANCE
-
-
-def handle(name, _cfg, _cloud, log, _args):
- log.debug("Hi from module %s", name)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
deleted file mode 100644
index 40560f11..00000000
--- a/cloudinit/config/cc_growpart.py
+++ /dev/null
@@ -1,300 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import re
-import stat
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-DEFAULT_CONFIG = {
- 'mode': 'auto',
- 'devices': ['/'],
- 'ignore_growroot_disabled': False,
-}
-
-
-class RESIZE(object):
- SKIPPED = "SKIPPED"
- CHANGED = "CHANGED"
- NOCHANGE = "NOCHANGE"
- FAILED = "FAILED"
-
-
-LOG = logging.getLogger(__name__)
-
-
-def resizer_factory(mode):
- resize_class = None
- if mode == "auto":
- for (_name, resizer) in RESIZERS:
- cur = resizer()
- if cur.available():
- resize_class = cur
- break
-
- if not resize_class:
- raise ValueError("No resizers available")
-
- else:
- mmap = {}
- for (k, v) in RESIZERS:
- mmap[k] = v
-
- if mode not in mmap:
- raise TypeError("unknown resize mode %s" % mode)
-
- mclass = mmap[mode]()
- if mclass.available():
- resize_class = mclass
-
- if not resize_class:
- raise ValueError("mode %s not available" % mode)
-
- return resize_class
-
-
-class ResizeFailedException(Exception):
- pass
-
-
-class ResizeGrowPart(object):
- def available(self):
- myenv = os.environ.copy()
- myenv['LANG'] = 'C'
-
- try:
- (out, _err) = util.subp(["growpart", "--help"], env=myenv)
- if re.search(r"--update\s+", out, re.DOTALL):
- return True
-
- except util.ProcessExecutionError:
- pass
- return False
-
- def resize(self, diskdev, partnum, partdev):
- before = get_size(partdev)
- try:
- util.subp(["growpart", '--dry-run', diskdev, partnum])
- except util.ProcessExecutionError as e:
- if e.exit_code != 1:
- util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)",
- diskdev, partnum)
- raise ResizeFailedException(e)
- return (before, before)
-
- try:
- util.subp(["growpart", diskdev, partnum])
- except util.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum)
- raise ResizeFailedException(e)
-
- return (before, get_size(partdev))
-
-
-class ResizeGpart(object):
- def available(self):
- if not util.which('gpart'):
- return False
- return True
-
- def resize(self, diskdev, partnum, partdev):
- """
- GPT disks store metadata at the beginning (primary) and at the
- end (secondary) of the disk. When launching an image with a
- larger disk compared to the original image, the secondary copy
- is lost. Thus, the metadata will be marked CORRUPT, and need to
- be recovered.
- """
- try:
- util.subp(["gpart", "recover", diskdev])
- except util.ProcessExecutionError as e:
- if e.exit_code != 0:
- util.logexc(LOG, "Failed: gpart recover %s", diskdev)
- raise ResizeFailedException(e)
-
- before = get_size(partdev)
- try:
- util.subp(["gpart", "resize", "-i", partnum, diskdev])
- except util.ProcessExecutionError as e:
- util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev)
- raise ResizeFailedException(e)
-
- # Since growing the FS requires a reboot, make sure we reboot
- # first when this module has finished.
- open('/var/run/reboot-required', 'a').close()
-
- return (before, get_size(partdev))
-
-
-def get_size(filename):
- fd = os.open(filename, os.O_RDONLY)
- try:
- return os.lseek(fd, 0, os.SEEK_END)
- finally:
- os.close(fd)
-
-
-def device_part_info(devpath):
- # convert an entry in /dev/ to parent disk and partition number
-
- # input of /dev/vdb or /dev/disk/by-label/foo
- # rpath is hopefully a real-ish path in /dev (vda, sdb..)
- rpath = os.path.realpath(devpath)
-
- bname = os.path.basename(rpath)
- syspath = "/sys/class/block/%s" % bname
-
- # FreeBSD doesn't know of sysfs so just get everything we need from
- # the device, like /dev/vtbd0p2.
- if util.system_info()["platform"].startswith('FreeBSD'):
- m = re.search('^(/dev/.+)p([0-9])$', devpath)
- return (m.group(1), m.group(2))
-
- if not os.path.exists(syspath):
- raise ValueError("%s had no syspath (%s)" % (devpath, syspath))
-
- ptpath = os.path.join(syspath, "partition")
- if not os.path.exists(ptpath):
- raise TypeError("%s not a partition" % devpath)
-
- ptnum = util.load_file(ptpath).rstrip()
-
- # for a partition, real syspath is something like:
- # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1
- rsyspath = os.path.realpath(syspath)
- disksyspath = os.path.dirname(rsyspath)
-
- diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip()
- diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin)
-
- # diskdevpath has something like 253:0
- # and udev has put links in /dev/block/253:0 to the device name in /dev/
- return (diskdevpath, ptnum)
-
-
-def devent2dev(devent):
- if devent.startswith("/dev/"):
- return devent
- else:
- result = util.get_mount_info(devent)
- if not result:
- raise ValueError("Could not determine device of '%s' % dev_ent")
- return result[0]
-
-
-def resize_devices(resizer, devices):
- # returns a tuple of tuples containing (entry-in-devices, action, message)
- info = []
- for devent in devices:
- try:
- blockdev = devent2dev(devent)
- except ValueError as e:
- info.append((devent, RESIZE.SKIPPED,
- "unable to convert to device: %s" % e,))
- continue
-
- try:
- statret = os.stat(blockdev)
- except OSError as e:
- info.append((devent, RESIZE.SKIPPED,
- "stat of '%s' failed: %s" % (blockdev, e),))
- continue
-
- if (not stat.S_ISBLK(statret.st_mode) and
- not stat.S_ISCHR(statret.st_mode)):
- info.append((devent, RESIZE.SKIPPED,
- "device '%s' not a block device" % blockdev,))
- continue
-
- try:
- (disk, ptnum) = device_part_info(blockdev)
- except (TypeError, ValueError) as e:
- info.append((devent, RESIZE.SKIPPED,
- "device_part_info(%s) failed: %s" % (blockdev, e),))
- continue
-
- try:
- (old, new) = resizer.resize(disk, ptnum, blockdev)
- if old == new:
- info.append((devent, RESIZE.NOCHANGE,
- "no change necessary (%s, %s)" % (disk, ptnum),))
- else:
- info.append((devent, RESIZE.CHANGED,
- "changed (%s, %s) from %s to %s" %
- (disk, ptnum, old, new),))
-
- except ResizeFailedException as e:
- info.append((devent, RESIZE.FAILED,
- "failed to resize: disk=%s, ptnum=%s: %s" %
- (disk, ptnum, e),))
-
- return info
-
-
-def handle(_name, cfg, _cloud, log, _args):
- if 'growpart' not in cfg:
- log.debug("No 'growpart' entry in cfg. Using default: %s" %
- DEFAULT_CONFIG)
- cfg['growpart'] = DEFAULT_CONFIG
-
- mycfg = cfg.get('growpart')
- if not isinstance(mycfg, dict):
- log.warn("'growpart' in config was not a dict")
- return
-
- mode = mycfg.get('mode', "auto")
- if util.is_false(mode):
- log.debug("growpart disabled: mode=%s" % mode)
- return
-
- if util.is_false(mycfg.get('ignore_growroot_disabled', False)):
- if os.path.isfile("/etc/growroot-disabled"):
- log.debug("growpart disabled: /etc/growroot-disabled exists")
- log.debug("use ignore_growroot_disabled to ignore")
- return
-
- devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
- if not len(devices):
- log.debug("growpart: empty device list")
- return
-
- try:
- resizer = resizer_factory(mode)
- except (ValueError, TypeError) as e:
- log.debug("growpart unable to find resizer for '%s': %s" % (mode, e))
- if mode != "auto":
- raise e
- return
-
- resized = util.log_time(logfunc=log.debug, msg="resize_devices",
- func=resize_devices, args=(resizer, devices))
- for (entry, action, msg) in resized:
- if action == RESIZE.CHANGED:
- log.info("'%s' resized: %s" % (entry, msg))
- else:
- log.debug("'%s' %s: %s" % (entry, action, msg))
-
-RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart))
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
deleted file mode 100644
index 156722d9..00000000
--- a/cloudinit/config/cc_grub_dpkg.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-distros = ['ubuntu', 'debian']
-
-
-def handle(name, cfg, _cloud, log, _args):
-
- mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
- if not mycfg:
- mycfg = {}
-
- enabled = mycfg.get('enabled', True)
- if util.is_false(enabled):
- log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
- return
-
- idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(
- mycfg, "grub-pc/install_devices_empty", None)
-
- if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs is None:
- idevs = ""
- if idevs_empty is None:
- idevs_empty = "true"
- else:
- if idevs_empty is None:
- idevs_empty = "false"
- if idevs is None:
- idevs = "/dev/sda"
- for dev in ("/dev/sda", "/dev/vda", "/dev/xvda",
- "/dev/sda1", "/dev/vda1", "/dev/xvda1"):
- if os.path.exists(dev):
- idevs = dev
- break
-
- # now idevs and idevs_empty are set to determined values
- # or, those set by user
-
- dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
- "grub-pc grub-pc/install_devices_empty boolean %s\n") %
- (idevs, idevs_empty))
-
- log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
-
- try:
- util.subp(['debconf-set-selections'], dconf_sel)
- except Exception:
- util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
deleted file mode 100644
index 9a02f056..00000000
--- a/cloudinit/config/cc_keys_to_console.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-# This is a tool that cloud init provides
-HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints'
-
-
-def _get_helper_tool_path(distro):
- try:
- base_lib = distro.usr_lib_exec
- except AttributeError:
- base_lib = '/usr/lib'
- return HELPER_TOOL_TPL % base_lib
-
-
-def handle(name, cfg, cloud, log, _args):
- helper_path = _get_helper_tool_path(cloud.distro)
- if not os.path.exists(helper_path):
- log.warn(("Unable to activate module %s,"
- " helper tool not found at %s"), name, helper_path)
- return
-
- fp_blacklist = util.get_cfg_option_list(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list(cfg,
- "ssh_key_console_blacklist",
- ["ssh-dss"])
-
- try:
- cmd = [helper_path]
- cmd.append(','.join(fp_blacklist))
- cmd.append(','.join(key_blacklist))
- (stdout, _stderr) = util.subp(cmd)
- util.multi_log("%s\n" % (stdout.strip()),
- stderr=False, console=True)
- except Exception:
- log.warn("Writing keys to the system console failed!")
- raise
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
deleted file mode 100644
index 68fcb27f..00000000
--- a/cloudinit/config/cc_landscape.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from six import StringIO
-
-from configobj import ConfigObj
-
-from cloudinit import type_utils
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
-LS_DEFAULT_FILE = "/etc/default/landscape-client"
-
-distros = ['ubuntu']
-
-# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
- }
-}
-
-
-def handle(_name, cfg, cloud, log, _args):
- """
- Basically turn a top level 'landscape' entry with a 'client' dict
- and render it to ConfigObj format under '[client]' section in
- /etc/landscape/client.conf
- """
-
- ls_cloudcfg = cfg.get("landscape", {})
-
- if not isinstance(ls_cloudcfg, (dict)):
- raise RuntimeError(("'landscape' key existed in config,"
- " but not a dictionary type,"
- " is a %s instead"),
- type_utils.obj_name(ls_cloudcfg))
- if not ls_cloudcfg:
- return
-
- cloud.distro.install_packages(('landscape-client',))
-
- merge_data = [
- LSC_BUILTIN_CFG,
- LSC_CLIENT_CFG_FILE,
- ls_cloudcfg,
- ]
- merged = merge_together(merge_data)
- contents = StringIO()
- merged.write(contents)
-
- util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
- util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue())
- log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE)
-
- util.write_file(LS_DEFAULT_FILE, "RUN=1\n")
- util.subp(["service", "landscape-client", "restart"])
-
-
-def merge_together(objs):
- """
- merge together ConfigObj objects or things that ConfigObj() will take in
- later entries override earlier
- """
- cfg = ConfigObj({})
- for obj in objs:
- if not obj:
- continue
- if isinstance(obj, ConfigObj):
- cfg.merge(obj)
- else:
- cfg.merge(ConfigObj(obj))
- return cfg
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
deleted file mode 100644
index bbe5fcae..00000000
--- a/cloudinit/config/cc_locale.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- locale = args[0]
- else:
- locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
-
- if util.is_false(locale):
- log.debug("Skipping module named %s, disabled by config: %s",
- name, locale)
- return
-
- log.debug("Setting locale to %s", locale)
- locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
- cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
deleted file mode 100644
index 70d4e7c3..00000000
--- a/cloudinit/config/cc_lxd.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module initializes lxd using 'lxd init'
-
-Example config:
- #cloud-config
- lxd:
- init:
- network_address: <ip addr>
- network_port: <port>
- storage_backend: <zfs/dir>
- storage_create_device: <dev>
- storage_create_loop: <size>
- storage_pool: <name>
- trust_password: <password>
- bridge:
- mode: <new, existing or none>
- name: <name>
- ipv4_address: <ip addr>
- ipv4_netmask: <cidr>
- ipv4_dhcp_first: <ip addr>
- ipv4_dhcp_last: <ip addr>
- ipv4_dhcp_leases: <size>
- ipv4_nat: <bool>
- ipv6_address: <ip addr>
- ipv6_netmask: <cidr>
- ipv6_nat: <bool>
- domain: <domain>
-"""
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, args):
- # Get config
- lxd_cfg = cfg.get('lxd')
- if not lxd_cfg:
- log.debug("Skipping module named %s, not present or disabled by cfg",
- name)
- return
- if not isinstance(lxd_cfg, dict):
- log.warn("lxd config must be a dictionary. found a '%s'",
- type(lxd_cfg))
- return
-
- # Grab the configuration
- init_cfg = lxd_cfg.get('init')
- if not isinstance(init_cfg, dict):
- log.warn("lxd/init config must be a dictionary. found a '%s'",
- type(init_cfg))
- init_cfg = {}
-
- bridge_cfg = lxd_cfg.get('bridge')
- if not isinstance(bridge_cfg, dict):
- log.warn("lxd/bridge config must be a dictionary. found a '%s'",
- type(bridge_cfg))
- bridge_cfg = {}
-
- # Install the needed packages
- packages = []
- if not util.which("lxd"):
- packages.append('lxd')
-
- if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
- packages.append('zfs')
-
- if len(packages):
- try:
- cloud.distro.install_packages(packages)
- except util.ProcessExecutionError as exc:
- log.warn("failed to install packages %s: %s", packages, exc)
- return
-
- # Set up lxd if init config is given
- if init_cfg:
- init_keys = (
- 'network_address', 'network_port', 'storage_backend',
- 'storage_create_device', 'storage_create_loop',
- 'storage_pool', 'trust_password')
- cmd = ['lxd', 'init', '--auto']
- for k in init_keys:
- if init_cfg.get(k):
- cmd.extend(["--%s=%s" %
- (k.replace('_', '-'), str(init_cfg[k]))])
- util.subp(cmd)
-
- # Set up lxd-bridge if bridge config is given
- dconf_comm = "debconf-communicate"
- if bridge_cfg and util.which(dconf_comm):
- debconf = bridge_to_debconf(bridge_cfg)
-
- # Update debconf database
- try:
- log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
- except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)
-
- # Remove the existing configuration file (forces re-generation)
- util.del_file("/etc/default/lxd-bridge")
-
- # Run reconfigure
- log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
- elif bridge_cfg:
- raise RuntimeError(
- "Unable to configure lxd bridge without %s." + dconf_comm)
-
-
-def bridge_to_debconf(bridge_cfg):
- debconf = {}
-
- if bridge_cfg.get("mode") == "none":
- debconf["lxd/setup-bridge"] = "false"
- debconf["lxd/bridge-name"] = ""
-
- elif bridge_cfg.get("mode") == "existing":
- debconf["lxd/setup-bridge"] = "false"
- debconf["lxd/use-existing-bridge"] = "true"
- debconf["lxd/bridge-name"] = bridge_cfg.get("name")
-
- elif bridge_cfg.get("mode") == "new":
- debconf["lxd/setup-bridge"] = "true"
- if bridge_cfg.get("name"):
- debconf["lxd/bridge-name"] = bridge_cfg.get("name")
-
- if bridge_cfg.get("ipv4_address"):
- debconf["lxd/bridge-ipv4"] = "true"
- debconf["lxd/bridge-ipv4-address"] = \
- bridge_cfg.get("ipv4_address")
- debconf["lxd/bridge-ipv4-netmask"] = \
- bridge_cfg.get("ipv4_netmask")
- debconf["lxd/bridge-ipv4-dhcp-first"] = \
- bridge_cfg.get("ipv4_dhcp_first")
- debconf["lxd/bridge-ipv4-dhcp-last"] = \
- bridge_cfg.get("ipv4_dhcp_last")
- debconf["lxd/bridge-ipv4-dhcp-leases"] = \
- bridge_cfg.get("ipv4_dhcp_leases")
- debconf["lxd/bridge-ipv4-nat"] = \
- bridge_cfg.get("ipv4_nat", "true")
-
- if bridge_cfg.get("ipv6_address"):
- debconf["lxd/bridge-ipv6"] = "true"
- debconf["lxd/bridge-ipv6-address"] = \
- bridge_cfg.get("ipv6_address")
- debconf["lxd/bridge-ipv6-netmask"] = \
- bridge_cfg.get("ipv6_netmask")
- debconf["lxd/bridge-ipv6-nat"] = \
- bridge_cfg.get("ipv6_nat", "false")
-
- if bridge_cfg.get("domain"):
- debconf["lxd/bridge-domain"] = bridge_cfg.get("domain")
-
- else:
- raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
-
- return debconf
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
deleted file mode 100644
index ada535f8..00000000
--- a/cloudinit/config/cc_mcollective.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Marc Cluet <marc.cluet@canonical.com>
-# Based on code by Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import BytesIO
-
-# Used since this can maintain comments
-# and doesn't need a top level section
-from configobj import ConfigObj
-
-from cloudinit import log as logging
-from cloudinit import util
-
-PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
-PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
-SERVER_CFG = '/etc/mcollective/server.cfg'
-
-LOG = logging.getLogger(__name__)
-
-
-def configure(config, server_cfg=SERVER_CFG,
- pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE):
- # Read server.cfg values from the
- # original file in order to be able to mix the rest up
- try:
- mcollective_config = ConfigObj(server_cfg, file_error=True)
- existed = True
- except IOError:
- LOG.debug("Did not find file %s", server_cfg)
- mcollective_config = ConfigObj()
- existed = False
-
- for (cfg_name, cfg) in config.items():
- if cfg_name == 'public-cert':
- util.write_file(pubcert_file, cfg, mode=0o644)
- mcollective_config[
- 'plugin.ssl_server_public'] = pubcert_file
- mcollective_config['securityprovider'] = 'ssl'
- elif cfg_name == 'private-cert':
- util.write_file(pricert_file, cfg, mode=0o600)
- mcollective_config[
- 'plugin.ssl_server_private'] = pricert_file
- mcollective_config['securityprovider'] = 'ssl'
- else:
- if isinstance(cfg, six.string_types):
- # Just set it in the 'main' section
- mcollective_config[cfg_name] = cfg
- elif isinstance(cfg, (dict)):
- # Iterate through the config items, create a section if
- # it is needed and then add/or create items as needed
- if cfg_name not in mcollective_config.sections:
- mcollective_config[cfg_name] = {}
- for (o, v) in cfg.items():
- mcollective_config[cfg_name][o] = v
- else:
- # Otherwise just try to convert it to a string
- mcollective_config[cfg_name] = str(cfg)
-
- if existed:
- # We got all our config as wanted we'll rename
- # the previous server.cfg and create our new one
- util.rename(server_cfg, "%s.old" % (server_cfg))
-
- # Now we got the whole file, write to disk...
- contents = BytesIO()
- mcollective_config.write(contents)
- util.write_file(server_cfg, contents.getvalue(), mode=0o644)
-
-
-def handle(name, cfg, cloud, log, _args):
-
- # If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'mcollective' key in configuration"), name)
- return
-
- mcollective_cfg = cfg['mcollective']
-
- # Start by installing the mcollective package ...
- cloud.distro.install_packages(("mcollective",))
-
- # ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- configure(config=mcollective_cfg['conf'])
-
- # restart mcollective to handle updated config
- util.subp(['service', 'mcollective', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
deleted file mode 100644
index facaa538..00000000
--- a/cloudinit/config/cc_migrator.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import shutil
-
-from cloudinit import helpers
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-
-def _migrate_canon_sems(cloud):
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
- am_adjusted = 0
- for sem_path in paths:
- if not sem_path or not os.path.exists(sem_path):
- continue
- for p in os.listdir(sem_path):
- full_path = os.path.join(sem_path, p)
- if os.path.isfile(full_path):
- (name, ext) = os.path.splitext(p)
- canon_name = helpers.canon_sem_name(name)
- if canon_name != name:
- new_path = os.path.join(sem_path, canon_name + ext)
- shutil.move(full_path, new_path)
- am_adjusted += 1
- return am_adjusted
-
-
-def _migrate_legacy_sems(cloud, log):
- legacy_adjust = {
- 'apt-update-upgrade': [
- 'apt-configure',
- 'package-update-upgrade-install',
- ],
- }
- paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem'))
- for sem_path in paths:
- if not sem_path or not os.path.exists(sem_path):
- continue
- sem_helper = helpers.FileSemaphores(sem_path)
- for (mod_name, migrate_to) in legacy_adjust.items():
- possibles = [mod_name, helpers.canon_sem_name(mod_name)]
- old_exists = []
- for p in os.listdir(sem_path):
- (name, _ext) = os.path.splitext(p)
- if name in possibles and os.path.isfile(p):
- old_exists.append(p)
- for p in old_exists:
- util.del_file(os.path.join(sem_path, p))
- (_name, freq) = os.path.splitext(p)
- for m in migrate_to:
- log.debug("Migrating %s => %s with the same frequency",
- p, m)
- with sem_helper.lock(m, freq):
- pass
-
-
-def handle(name, cfg, cloud, log, _args):
- do_migrate = util.get_cfg_option_str(cfg, "migrate", True)
- if not util.translate_bool(do_migrate):
- log.debug("Skipping module named %s, migration disabled", name)
- return
- sems_moved = _migrate_canon_sems(cloud)
- log.debug("Migrated %s semaphore files to there canonicalized names",
- sems_moved)
- _migrate_legacy_sems(cloud, log)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
deleted file mode 100644
index 2b981935..00000000
--- a/cloudinit/config/cc_mounts.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from string import whitespace
-
-import logging
-import os.path
-import re
-
-from cloudinit import type_utils
-from cloudinit import util
-
-# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
-DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
-DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
-WS = re.compile("[%s]+" % (whitespace))
-FSTAB_PATH = "/etc/fstab"
-
-LOG = logging.getLogger(__name__)
-
-
-def is_meta_device_name(name):
- # return true if this is a metadata service name
- if name in ["ami", "root", "swap"]:
- return True
- # names 'ephemeral0' or 'ephemeral1'
- # 'ebs[0-9]' appears when '--block-device-mapping sdf=snap-d4d90bbc'
- for enumname in ("ephemeral", "ebs"):
- if name.startswith(enumname) and name.find(":") == -1:
- return True
- return False
-
-
-def _get_nth_partition_for_device(device_path, partition_number):
- potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
- '-part%s' % (partition_number,)]
- for suffix in potential_suffixes:
- potential_partition_device = '%s%s' % (device_path, suffix)
- if os.path.exists(potential_partition_device):
- return potential_partition_device
- return None
-
-
-def _is_block_device(device_path, partition_path=None):
- device_name = os.path.realpath(device_path).split('/')[-1]
- sys_path = os.path.join('/sys/block/', device_name)
- if partition_path is not None:
- sys_path = os.path.join(
- sys_path, os.path.realpath(partition_path).split('/')[-1])
- return os.path.exists(sys_path)
-
-
-def sanitize_devname(startname, transformer, log):
- log.debug("Attempting to determine the real name of %s", startname)
-
- # workaround, allow user to specify 'ephemeral'
- # rather than more ec2 correct 'ephemeral0'
- devname = startname
- if devname == "ephemeral":
- devname = "ephemeral0"
- log.debug("Adjusted mount option from ephemeral to ephemeral0")
-
- device_path, partition_number = util.expand_dotted_devname(devname)
-
- if is_meta_device_name(device_path):
- orig = device_path
- device_path = transformer(device_path)
- if not device_path:
- return None
- if not device_path.startswith("/"):
- device_path = "/dev/%s" % (device_path,)
- log.debug("Mapped metadata name %s to %s", orig, device_path)
- else:
- if DEVICE_NAME_RE.match(startname):
- device_path = "/dev/%s" % (device_path,)
-
- partition_path = None
- if partition_number is None:
- partition_path = _get_nth_partition_for_device(device_path, 1)
- else:
- partition_path = _get_nth_partition_for_device(device_path,
- partition_number)
- if partition_path is None:
- return None
-
- if _is_block_device(device_path, partition_path):
- if partition_path is not None:
- return partition_path
- return device_path
- return None
-
-
-def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
- # make a suggestion on the size of swap for this system.
- if memsize is None:
- memsize = util.read_meminfo()['total']
-
- GB = 2 ** 30
- sugg_max = 8 * GB
-
- info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize}
-
- if fsys is None and maxsize is None:
- # set max to 8GB default if no filesystem given
- maxsize = sugg_max
- elif fsys:
- statvfs = os.statvfs(fsys)
- avail = statvfs.f_frsize * statvfs.f_bfree
- info['avail'] = avail
-
- if maxsize is None:
- # set to 25% of filesystem space
- maxsize = min(int(avail / 4), sugg_max)
- elif maxsize > ((avail * .9)):
- # set to 90% of available disk space
- maxsize = int(avail * .9)
- elif maxsize is None:
- maxsize = sugg_max
-
- info['max'] = maxsize
-
- formulas = [
- # < 1G: swap = double memory
- (1 * GB, lambda x: x * 2),
- # < 2G: swap = 2G
- (2 * GB, lambda x: 2 * GB),
- # < 4G: swap = memory
- (4 * GB, lambda x: x),
- # < 16G: 4G
- (16 * GB, lambda x: 4 * GB),
- # < 64G: 1/2 M up to max
- (64 * GB, lambda x: x / 2),
- ]
-
- size = None
- for top, func in formulas:
- if memsize <= top:
- size = min(func(memsize), maxsize)
- # if less than 1/2 memory and not much, return 0
- if size < (memsize / 2) and size < 4 * GB:
- size = 0
- break
- break
-
- if size is not None:
- size = maxsize
-
- info['size'] = size
-
- MB = 2 ** 20
- pinfo = {}
- for k, v in info.items():
- if isinstance(v, int):
- pinfo[k] = "%s MB" % (v / MB)
- else:
- pinfo[k] = v
-
- LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'"
- " disk given max=%(max_in)s [max=%(max)s]'" % pinfo)
- return size
-
-
-def setup_swapfile(fname, size=None, maxsize=None):
- """
- fname: full path string of filename to setup
- size: the size to create. set to "auto" for recommended
- maxsize: the maximum size
- """
- tdir = os.path.dirname(fname)
- if str(size).lower() == "auto":
- try:
- memsize = util.read_meminfo()['total']
- except IOError as e:
- LOG.debug("Not creating swap. failed to read meminfo")
- return
-
- util.ensure_dir(tdir)
- size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
- memsize=memsize)
-
- if not size:
- LOG.debug("Not creating swap: suggested size was 0")
- return
-
- mbsize = str(int(size / (2 ** 20)))
- msg = "creating swap file '%s' of %sMB" % (fname, mbsize)
- try:
- util.ensure_dir(tdir)
- util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
-
- except Exception as e:
- raise IOError("Failed %s: %s" % (msg, e))
-
- return fname
-
-
-def handle_swapcfg(swapcfg):
- """handle the swap config, calling setup_swap if necessary.
- return None or (filename, size)
- """
- if not isinstance(swapcfg, dict):
- LOG.warn("input for swap config was not a dict.")
- return None
-
- fname = swapcfg.get('filename', '/swap.img')
- size = swapcfg.get('size', 0)
- maxsize = swapcfg.get('maxsize', None)
-
- if not (size and fname):
- LOG.debug("no need to setup swap")
- return
-
- if os.path.exists(fname):
- if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",
- fname)
- return fname
- try:
- for line in util.load_file("/proc/swaps").splitlines():
- if line.startswith(fname + " "):
- LOG.debug("swap file %s already in use.", fname)
- return fname
- LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
- except Exception:
- LOG.warn("swap file %s existed. Error reading /proc/swaps", fname)
- return fname
-
- try:
- if isinstance(size, str) and size != "auto":
- size = util.human2bytes(size)
- if isinstance(maxsize, str):
- maxsize = util.human2bytes(maxsize)
- return setup_swapfile(fname=fname, size=size, maxsize=maxsize)
-
- except Exception as e:
- LOG.warn("failed to setup swap: %s", e)
-
- return None
-
-
-def handle(_name, cfg, cloud, log, _args):
- # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
- def_mnt_opts = "defaults,nobootwait"
- if cloud.distro.uses_systemd():
- def_mnt_opts = "defaults,nofail"
-
- defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
- defvals = cfg.get("mount_default_fields", defvals)
-
- # these are our default set of mounts
- defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
- ["swap", "none", "swap", "sw", "0", "0"]]
-
- cfgmnt = []
- if "mounts" in cfg:
- cfgmnt = cfg["mounts"]
-
- for i in range(len(cfgmnt)):
- # skip something that wasn't a list
- if not isinstance(cfgmnt[i], list):
- log.warn("Mount option %s not a list, got a %s instead",
- (i + 1), type_utils.obj_name(cfgmnt[i]))
- continue
-
- start = str(cfgmnt[i][0])
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignorming nonexistant named mount %s", start)
- continue
-
- if sanitized != start:
- log.debug("changed %s => %s" % (start, sanitized))
- cfgmnt[i][0] = sanitized
-
- # in case the user did not quote a field (likely fs-freq, fs_passno)
- # but do not convert None to 'None' (LP: #898365)
- for j in range(len(cfgmnt[i])):
- if cfgmnt[i][j] is None:
- continue
- else:
- cfgmnt[i][j] = str(cfgmnt[i][j])
-
- for i in range(len(cfgmnt)):
- # fill in values with defaults from defvals above
- for j in range(len(defvals)):
- if len(cfgmnt[i]) <= j:
- cfgmnt[i].append(defvals[j])
- elif cfgmnt[i][j] is None:
- cfgmnt[i][j] = defvals[j]
-
- # if the second entry in the list is 'None' this
- # clears all previous entries of that same 'fs_spec'
- # (fs_spec is the first field in /etc/fstab, ie, that device)
- if cfgmnt[i][1] is None:
- for j in range(i):
- if cfgmnt[j][0] == cfgmnt[i][0]:
- cfgmnt[j][1] = None
-
- # for each of the "default" mounts, add them only if no other
- # entry has the same device name
- for defmnt in defmnts:
- start = defmnt[0]
- sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignoring nonexistant default named mount %s", start)
- continue
- if sanitized != start:
- log.debug("changed default device %s => %s" % (start, sanitized))
- defmnt[0] = sanitized
-
- cfgmnt_has = False
- for cfgm in cfgmnt:
- if cfgm[0] == defmnt[0]:
- cfgmnt_has = True
- break
-
- if cfgmnt_has:
- log.debug(("Not including %s, already"
- " previously included"), start)
- continue
- cfgmnt.append(defmnt)
-
- # now, each entry in the cfgmnt list has all fstab values
- # if the second field is None (not the string, the value) we skip it
- actlist = []
- for x in cfgmnt:
- if x[1] is None:
- log.debug("Skipping non-existent device named %s", x[0])
- else:
- actlist.append(x)
-
- swapret = handle_swapcfg(cfg.get('swap', {}))
- if swapret:
- actlist.append([swapret, "none", "swap", "sw", "0", "0"])
-
- if len(actlist) == 0:
- log.debug("No modifications to fstab needed.")
- return
-
- comment = "comment=cloudconfig"
- cc_lines = []
- needswap = False
- dirs = []
- for line in actlist:
- # write 'comment' in the fs_mntops, entry, claiming this
- line[3] = "%s,%s" % (line[3], comment)
- if line[2] == "swap":
- needswap = True
- if line[1].startswith("/"):
- dirs.append(line[1])
- cc_lines.append('\t'.join(line))
-
- fstab_lines = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- try:
- toks = WS.split(line)
- if toks[3].find(comment) != -1:
- continue
- except Exception:
- pass
- fstab_lines.append(line)
-
- fstab_lines.extend(cc_lines)
- contents = "%s\n" % ('\n'.join(fstab_lines))
- util.write_file(FSTAB_PATH, contents)
-
- if needswap:
- try:
- util.subp(("swapon", "-a"))
- except Exception:
- util.logexc(log, "Activating swap via 'swapon -a' failed")
-
- for d in dirs:
- try:
- util.ensure_dir(d)
- except Exception:
- util.logexc(log, "Failed to make '%s' config-mount", d)
-
- try:
- util.subp(("mount", "-a"))
- except Exception:
- util.logexc(log, "Activating mounts via 'mount -a' failed")
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
deleted file mode 100644
index 73b0e30d..00000000
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-
-from cloudinit import log as logging
-from cloudinit import util
-
-REBOOT_FILE = "/var/run/reboot-required"
-REBOOT_CMD = ["/sbin/reboot"]
-
-
-def _multi_cfg_bool_get(cfg, *keys):
- for k in keys:
- if util.get_cfg_option_bool(cfg, k, False):
- return True
- return False
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
-
-
-def handle(_name, cfg, cloud, log, _args):
- # Handle the old style + new config names
- update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update')
- upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade')
- reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required',
- 'package_reboot_if_required')
- pkglist = util.get_cfg_option_list(cfg, 'packages', [])
-
- errors = []
- if update or len(pkglist) or upgrade:
- try:
- cloud.distro.update_package_sources()
- except Exception as e:
- util.logexc(log, "Package update failed")
- errors.append(e)
-
- if upgrade:
- try:
- cloud.distro.package_command("upgrade")
- except Exception as e:
- util.logexc(log, "Package upgrade failed")
- errors.append(e)
-
- if len(pkglist):
- try:
- cloud.distro.install_packages(pkglist)
- except Exception as e:
- util.logexc(log, "Failed to install packages: %s", pkglist)
- errors.append(e)
-
- # TODO(smoser): handle this less violently
- # kernel and openssl (possibly some other packages)
- # write a file /var/run/reboot-required after upgrading.
- # if that file exists and configured, then just stop right now and reboot
- reboot_fn_exists = os.path.isfile(REBOOT_FILE)
- if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists:
- try:
- log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE)
- # Flush the above warning + anything else out...
- logging.flushLoggers(log)
- _fire_reboot(log)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- errors.append(e)
-
- if len(errors):
- log.warn("%s failed with exceptions, re-raising the last one",
- len(errors))
- raise errors[-1]
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
deleted file mode 100644
index 72176d42..00000000
--- a/cloudinit/config/cc_phone_home.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-POST_LIST_ALL = [
- 'pub_key_dsa',
- 'pub_key_rsa',
- 'pub_key_ecdsa',
- 'instance_id',
- 'hostname',
- 'fdqn'
-]
-
-
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
-#
-# phone_home:
-# url: http://my.foo.bar/$INSTANCE_ID/
-# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id, hostname,
-# fqdn ]
-#
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- ph_cfg = util.read_conf(args[0])
- else:
- if 'phone_home' not in cfg:
- log.debug(("Skipping module named %s, "
- "no 'phone_home' configuration found"), name)
- return
- ph_cfg = cfg['phone_home']
-
- if 'url' not in ph_cfg:
- log.warn(("Skipping module named %s, "
- "no 'url' found in 'phone_home' configuration"), name)
- return
-
- url = ph_cfg['url']
- post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries')
- try:
- tries = int(tries)
- except Exception:
- tries = 10
- util.logexc(log, "Configuration entry 'tries' is not an integer, "
- "using %s instead", tries)
-
- if post_list == "all":
- post_list = POST_LIST_ALL
-
- all_keys = {}
- all_keys['instance_id'] = cloud.get_instance_id()
- all_keys['hostname'] = cloud.get_hostname()
- all_keys['fqdn'] = cloud.get_hostname(fqdn=True)
-
- pubkeys = {
- 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub',
- 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub',
- 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
- }
-
- for (n, path) in pubkeys.items():
- try:
- all_keys[n] = util.load_file(path)
- except Exception:
- util.logexc(log, "%s: failed to open, can not phone home that "
- "data!", path)
-
- submit_keys = {}
- for k in post_list:
- if k in all_keys:
- submit_keys[k] = all_keys[k]
- else:
- submit_keys[k] = None
- log.warn(("Requested key %s from 'post'"
- " configuration list not available"), k)
-
- # Get them read to be posted
- real_submit_keys = {}
- for (k, v) in submit_keys.items():
- if v is None:
- real_submit_keys[k] = 'N/A'
- else:
- real_submit_keys[k] = str(v)
-
- # Incase the url is parameterized
- url_params = {
- 'INSTANCE_ID': all_keys['instance_id'],
- }
- url = templater.render_string(url, url_params)
- try:
- util.read_file_or_url(url, data=real_submit_keys,
- retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
- except Exception:
- util.logexc(log, "Failed to post phone home data to %s in %s tries",
- url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
deleted file mode 100644
index cc3f7f70..00000000
--- a/cloudinit/config/cc_power_state_change.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import errno
-import os
-import re
-import six
-import subprocess
-import time
-
-frequency = PER_INSTANCE
-
-EXIT_FAIL = 254
-
-
-def givecmdline(pid):
- # Returns the cmdline for the given process id. In Linux we can use procfs
- # for this but on BSD there is /usr/bin/procstat.
- try:
- # Example output from procstat -c 1
- # PID COMM ARGS
- # 1 init /bin/init --
- if util.system_info()["platform"].startswith('FreeBSD'):
- (output, _err) = util.subp(['procstat', '-c', str(pid)])
- line = output.splitlines()[1]
- m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
- return m.group(2)
- else:
- return util.load_file("/proc/%s/cmdline" % pid)
- except IOError:
- return None
-
-
-def check_condition(cond, log=None):
- if isinstance(cond, bool):
- if log:
- log.debug("Static Condition: %s" % cond)
- return cond
-
- pre = "check_condition command (%s): " % cond
- try:
- proc = subprocess.Popen(cond, shell=not isinstance(cond, list))
- proc.communicate()
- ret = proc.returncode
- if ret == 0:
- if log:
- log.debug(pre + "exited 0. condition met.")
- return True
- elif ret == 1:
- if log:
- log.debug(pre + "exited 1. condition not met.")
- return False
- else:
- if log:
- log.warn(pre + "unexpected exit %s. " % ret +
- "do not apply change.")
- return False
- except Exception as e:
- if log:
- log.warn(pre + "Unexpected error: %s" % e)
- return False
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- try:
- (args, timeout, condition) = load_power_state(cfg)
- if args is None:
- log.debug("no power_state provided. doing nothing")
- return
- except Exception as e:
- log.warn("%s Not performing power state change!" % str(e))
- return
-
- if condition is False:
- log.debug("Condition was false. Will not perform state change.")
- return
-
- mypid = os.getpid()
-
- cmdline = givecmdline(mypid)
- if not cmdline:
- log.warn("power_state: failed to get cmdline of current process")
- return
-
- devnull_fp = open(os.devnull, "w")
-
- log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
-
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
- condition, execmd, [args, devnull_fp])
-
-
-def load_power_state(cfg):
- # returns a tuple of shutdown_command, timeout
- # shutdown_command is None if no config found
- pstate = cfg.get('power_state')
-
- if pstate is None:
- return (None, None, None)
-
- if not isinstance(pstate, dict):
- raise TypeError("power_state is not a dict.")
-
- opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}
-
- mode = pstate.get("mode")
- if mode not in opt_map:
- raise TypeError(
- "power_state[mode] required, must be one of: %s. found: '%s'." %
- (','.join(opt_map.keys()), mode))
-
- delay = pstate.get("delay", "now")
- # convert integer 30 or string '30' to '+30'
- try:
- delay = "+%s" % int(delay)
- except ValueError:
- pass
-
- if delay != "now" and not re.match(r"\+[0-9]+", delay):
- raise TypeError(
- "power_state[delay] must be 'now' or '+m' (minutes)."
- " found '%s'." % delay)
-
- args = ["shutdown", opt_map[mode], delay]
- if pstate.get("message"):
- args.append(pstate.get("message"))
-
- try:
- timeout = float(pstate.get('timeout', 30.0))
- except ValueError:
- raise ValueError("failed to convert timeout '%s' to float." %
- pstate['timeout'])
-
- condition = pstate.get("condition", True)
- if not isinstance(condition, six.string_types + (list, bool)):
- raise TypeError("condition type %s invalid. must be list, bool, str")
- return (args, timeout, condition)
-
-
-def doexit(sysexit):
- os._exit(sysexit)
-
-
-def execmd(exe_args, output=None, data_in=None):
- try:
- proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
- stdout=output, stderr=subprocess.STDOUT)
- proc.communicate(data_in)
- ret = proc.returncode
- except Exception:
- doexit(EXIT_FAIL)
- doexit(ret)
-
-
-def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
- # wait until pid, with /proc/pid/cmdline contents of pidcmdline
- # is no longer alive. After it is gone, or timeout has passed
- # execute func(args)
- msg = None
- end_time = time.time() + timeout
-
- def fatal(msg):
- if log:
- log.warn(msg)
- doexit(EXIT_FAIL)
-
- known_errnos = (errno.ENOENT, errno.ESRCH)
-
- while True:
- if time.time() > end_time:
- msg = "timeout reached before %s ended" % pid
- break
-
- try:
- cmdline = givecmdline(pid)
- if cmdline != pidcmdline:
- msg = "cmdline changed for %s [now: %s]" % (pid, cmdline)
- break
-
- except IOError as ioerr:
- if ioerr.errno in known_errnos:
- msg = "pidfile gone [%d]" % ioerr.errno
- else:
- fatal("IOError during wait: %s" % ioerr)
- break
-
- except Exception as e:
- fatal("Unexpected Exception: %s" % e)
-
- time.sleep(.25)
-
- if not msg:
- fatal("Unexpected error in run_after_pid_gone")
-
- if log:
- log.debug(msg)
-
- try:
- if not check_condition(condition, log):
- return
- except Exception as e:
- fatal("Unexpected Exception when checking condition: %s" % e)
-
- func(*args)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
deleted file mode 100644
index 774d3322..00000000
--- a/cloudinit/config/cc_puppet.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-import os
-import socket
-
-from cloudinit import helpers
-from cloudinit import util
-
-PUPPET_CONF_PATH = '/etc/puppet/puppet.conf'
-PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/'
-PUPPET_SSL_DIR = '/var/lib/puppet/ssl'
-PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem'
-
-
-def _autostart_puppet(log):
- # Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- util.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
- elif os.path.exists('/bin/systemctl'):
- util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
- capture=False)
- elif os.path.exists('/sbin/chkconfig'):
- util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
- else:
- log.warn(("Sorry we do not know how to enable"
- " puppet services on this system"))
-
-
-def handle(name, cfg, cloud, log, _args):
- # If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'puppet' configuration found"), name)
- return
-
- puppet_cfg = cfg['puppet']
-
- # Start by installing the puppet package if necessary...
- install = util.get_cfg_option_bool(puppet_cfg, 'install', True)
- version = util.get_cfg_option_str(puppet_cfg, 'version', None)
- if not install and version:
- log.warn(("Puppet install set false but version supplied,"
- " doing nothing."))
- elif install:
- log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
- cloud.distro.install_packages(('puppet', version))
-
- # ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
- # Add all sections from the conf object to puppet.conf
- contents = util.load_file(PUPPET_CONF_PATH)
- # Create object for reading puppet.conf values
- puppet_config = helpers.DefaultingConfigParser()
- # Read puppet.conf values from original file in order to be able to
- # mix the rest up. First clean them up
- # (TODO(harlowja) is this really needed??)
- cleaned_lines = [i.lstrip() for i in contents.splitlines()]
- cleaned_contents = '\n'.join(cleaned_lines)
- puppet_config.readfp(StringIO(cleaned_contents),
- filename=PUPPET_CONF_PATH)
- for (cfg_name, cfg) in puppet_cfg['conf'].items():
- # Cert configuration is a special case
- # Dump the puppet master ca certificate in the correct place
- if cfg_name == 'ca_cert':
- # Puppet ssl sub-directory isn't created yet
- # Create it with the proper permissions and ownership
- util.ensure_dir(PUPPET_SSL_DIR, 0o771)
- util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
- util.ensure_dir(PUPPET_SSL_CERT_DIR)
- util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
- util.write_file(PUPPET_SSL_CERT_PATH, cfg)
- util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
- else:
- # Iterate throug the config items, we'll use ConfigParser.set
- # to overwrite or create new items as needed
- for (o, v) in cfg.items():
- if o == 'certname':
- # Expand %f as the fqdn
- # TODO(harlowja) should this use the cloud fqdn??
- v = v.replace("%f", socket.getfqdn())
- # Expand %i as the instance id
- v = v.replace("%i", cloud.get_instance_id())
- # certname needs to be downcased
- v = v.lower()
- puppet_config.set(cfg_name, o, v)
- # We got all our config as wanted we'll rename
- # the previous puppet.conf and create our new one
- util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH))
- util.write_file(PUPPET_CONF_PATH, puppet_config.stringify())
-
- # Set it up so it autostarts
- _autostart_puppet(log)
-
- # Start puppetd
- util.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
deleted file mode 100644
index 2a2a9f59..00000000
--- a/cloudinit/config/cc_resizefs.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import os
-import stat
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def _resize_btrfs(mount_point, devpth):
- return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
-
-
-def _resize_ext(mount_point, devpth):
- return ('resize2fs', devpth)
-
-
-def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', devpth)
-
-
-def _resize_ufs(mount_point, devpth):
- return ('growfs', devpth)
-
-# Do not use a dictionary as these commands should be able to be used
-# for multiple filesystem types if possible, e.g. one command for
-# ext2, ext3 and ext4.
-RESIZE_FS_PREFIXES_CMDS = [
- ('btrfs', _resize_btrfs),
- ('ext', _resize_ext),
- ('xfs', _resize_xfs),
- ('ufs', _resize_ufs),
-]
-
-NOBLOCK = "noblock"
-
-
-def rootdev_from_cmdline(cmdline):
- found = None
- for tok in cmdline.split():
- if tok.startswith("root="):
- found = tok[5:]
- break
- if found is None:
- return None
-
- if found.startswith("/dev/"):
- return found
- if found.startswith("LABEL="):
- return "/dev/disk/by-label/" + found[len("LABEL="):]
- if found.startswith("UUID="):
- return "/dev/disk/by-uuid/" + found[len("UUID="):]
-
- return "/dev/" + found
-
-
-def handle(name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = args[0]
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
-
- if not util.translate_bool(resize_root, addons=[NOBLOCK]):
- log.debug("Skipping module named %s, resizing disabled", name)
- return
-
- # TODO(harlowja) is the directory ok to be used??
- resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
- util.ensure_dir(resize_root_d)
-
- # TODO(harlowja): allow what is to be resized to be configurable??
- resize_what = "/"
- result = util.get_mount_info(resize_what, log)
- if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
- return
-
- (devpth, fs_type, mount_point) = result
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
- log.debug("resize_info: %s" % info)
-
- container = util.is_container()
-
- # Ensure the path is a block device.
- if (devpth == "/dev/root" and not os.path.exists(devpth) and
- not container):
- devpth = rootdev_from_cmdline(util.get_cmdline())
- if devpth is None:
- log.warn("Unable to find device '/dev/root'")
- return
- log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
-
- try:
- statret = os.stat(devpth)
- except OSError as exc:
- if container and exc.errno == errno.ENOENT:
- log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpth, info)
- elif exc.errno == errno.ENOENT:
- log.warn("Device '%s' did not exist. cannot resize: %s",
- devpth, info)
- else:
- raise exc
- return
-
- if not os.access(devpth, os.W_OK):
- if container:
- log.debug("'%s' not writable in container. cannot resize: %s",
- devpth, info)
- else:
- log.warn("'%s' not writable. cannot resize: %s", devpth, info)
- return
-
- if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
- if container:
- log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpth, info))
- else:
- log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpth, info))
- return
-
- resizer = None
- fstype_lc = fs_type.lower()
- for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
- if fstype_lc.startswith(pfix):
- resizer = root_cmd
- break
-
- if not resizer:
- log.warn("Not resizing unknown filesystem type %s for %s",
- fs_type, resize_what)
- return
-
- resize_cmd = resizer(resize_what, devpth)
- log.debug("Resizing %s (%s) using %s", resize_what, fs_type,
- ' '.join(resize_cmd))
-
- if resize_root == NOBLOCK:
- # Fork to a child that will run
- # the resize command
- util.fork_cb(
- util.log_time, logfunc=log.debug, msg="backgrounded Resizing",
- func=do_resize, args=(resize_cmd, log))
- else:
- util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
-
- action = 'Resized'
- if resize_root == NOBLOCK:
- action = 'Resizing (via forking)'
- log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type,
- resize_root)
-
-
-def do_resize(resize_cmd, log):
- try:
- util.subp(resize_cmd)
- except util.ProcessExecutionError:
- util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
- raise
- # TODO(harlowja): Should we add a fsck check after this to make
- # sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
deleted file mode 100644
index 71d9e3a7..00000000
--- a/cloudinit/config/cc_resolv_conf.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Craig Tracey
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Craig Tracey <craigtracey@gmail.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Note:
-# This module is intended to manage resolv.conf in environments where
-# early configuration of resolv.conf is necessary for further
-# bootstrapping and/or where configuration management such as puppet or
-# chef own dns configuration. As Debian/Ubuntu will, by default, utilize
-# resovlconf, and similarly RedHat will use sysconfig, this module is
-# likely to be of little use unless those are configured correctly.
-#
-# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
-# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS
-# be configured via the standard /etc/network/interfaces configuration
-# file.
-#
-#
-# Usage Example:
-#
-# #cloud-config
-# manage_resolv_conf: true
-#
-# resolv_conf:
-# nameservers: ['8.8.4.4', '8.8.8.8']
-# searchdomains:
-# - foo.example.com
-# - bar.example.com
-# domain: example.com
-# options:
-# rotate: true
-# timeout: 1
-#
-
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import templater
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-distros = ['fedora', 'rhel', 'sles']
-
-
-def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
- flags = []
- false_flags = []
-
- if 'options' in params:
- for key, val in params['options'].items():
- if isinstance(val, bool):
- if val:
- flags.append(key)
- else:
- false_flags.append(key)
-
- for flag in flags + false_flags:
- del params['options'][flag]
-
- if not params.get('options'):
- params['options'] = {}
-
- params['flags'] = flags
- LOG.debug("Writing resolv.conf from template %s" % template_fn)
- templater.render_to_file(template_fn, target_fname, params)
-
-
-def handle(name, cfg, cloud, log, _args):
- """
- Handler for resolv.conf
-
- @param name: The module name "resolv-conf" from cloud.cfg
- @param cfg: A nested dict containing the entire cloud config contents.
- @param cloud: The L{CloudInit} object in use.
- @param log: Pre-initialized Python logger object to use for logging.
- @param args: Any module arguments from cloud.cfg
- """
- if "manage_resolv_conf" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'manage_resolv_conf' key in configuration"), name)
- return
-
- if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False):
- log.debug(("Skipping module named %s,"
- " 'manage_resolv_conf' present but set to False"), name)
- return
-
- if "resolv_conf" not in cfg:
- log.warn("manage_resolv_conf True but no parameters provided!")
-
- template_fn = cloud.get_template_filename('resolv.conf')
- if not template_fn:
- log.warn("No template found, not rendering /etc/resolv.conf")
- return
-
- generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"])
- return
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
deleted file mode 100644
index 3a113aea..00000000
--- a/cloudinit/config/cc_rh_subscription.py
+++ /dev/null
@@ -1,408 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Red Hat, Inc.
-#
-# Author: Brent Baude <bbaude@redhat.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, _cloud, log, _args):
- sm = SubscriptionManager(cfg)
- sm.log = log
- if not sm.is_configured():
- log.debug("%s: module not configured.", name)
- return None
-
- if not sm.is_registered():
- try:
- verify, verify_msg = sm._verify_keys()
- if verify is not True:
- raise SubscriptionError(verify_msg)
- cont = sm.rhn_register()
- if not cont:
- raise SubscriptionError("Registration failed or did not "
- "run completely")
-
- # Splitting up the registration, auto-attach, and servicelevel
- # commands because the error codes, messages from subman are not
- # specific enough.
-
- # Attempt to change the service level
- if sm.auto_attach and sm.servicelevel is not None:
- if not sm._set_service_level():
- raise SubscriptionError("Setting of service-level "
- "failed")
- else:
- sm.log.debug("Completed auto-attach with service level")
- elif sm.auto_attach:
- if not sm._set_auto_attach():
- raise SubscriptionError("Setting auto-attach failed")
- else:
- sm.log.debug("Completed auto-attach")
-
- if sm.pools is not None:
- if not isinstance(sm.pools, list):
- pool_fail = "Pools must in the format of a list"
- raise SubscriptionError(pool_fail)
-
- return_stat = sm.addPool(sm.pools)
- if not return_stat:
- raise SubscriptionError("Unable to attach pools {0}"
- .format(sm.pools))
- if (sm.enable_repo is not None) or (sm.disable_repo is not None):
- return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
- if not return_stat:
- raise SubscriptionError("Unable to add or remove repos")
- sm.log_success("rh_subscription plugin completed successfully")
- except SubscriptionError as e:
- sm.log_warn(str(e))
- sm.log_warn("rh_subscription plugin did not complete successfully")
- else:
- sm.log_success("System is already registered")
-
-
-class SubscriptionError(Exception):
- pass
-
-
-class SubscriptionManager(object):
- valid_rh_keys = ['org', 'activation-key', 'username', 'password',
- 'disable-repo', 'enable-repo', 'add-pool',
- 'rhsm-baseurl', 'server-hostname',
- 'auto-attach', 'service-level']
-
- def __init__(self, cfg):
- self.cfg = cfg
- self.rhel_cfg = self.cfg.get('rh_subscription', {})
- self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
- self.server_hostname = self.rhel_cfg.get('server-hostname')
- self.pools = self.rhel_cfg.get('add-pool')
- self.activation_key = self.rhel_cfg.get('activation-key')
- self.org = self.rhel_cfg.get('org')
- self.userid = self.rhel_cfg.get('username')
- self.password = self.rhel_cfg.get('password')
- self.auto_attach = self.rhel_cfg.get('auto-attach')
- self.enable_repo = self.rhel_cfg.get('enable-repo')
- self.disable_repo = self.rhel_cfg.get('disable-repo')
- self.servicelevel = self.rhel_cfg.get('service-level')
- self.subman = ['subscription-manager']
-
- def log_success(self, msg):
- '''Simple wrapper for logging info messages. Useful for unittests'''
- self.log.info(msg)
-
- def log_warn(self, msg):
- '''Simple wrapper for logging warning messages. Useful for unittests'''
- self.log.warn(msg)
-
- def _verify_keys(self):
- '''
- Checks that the keys in the rh_subscription dict from the user-data
- are what we expect.
- '''
-
- for k in self.rhel_cfg:
- if k not in self.valid_rh_keys:
- bad_key = "{0} is not a valid key for rh_subscription. "\
- "Valid keys are: "\
- "{1}".format(k, ', '.join(self.valid_rh_keys))
- return False, bad_key
-
- # Check for bad auto-attach value
- if (self.auto_attach is not None) and \
- not (util.is_true(self.auto_attach) or
- util.is_false(self.auto_attach)):
- not_bool = "The key auto-attach must be a boolean value "\
- "(True/False "
- return False, not_bool
-
- if (self.servicelevel is not None) and ((not self.auto_attach) or
- (util.is_false(str(self.auto_attach)))):
- no_auto = ("The service-level key must be used in conjunction "
- "with the auto-attach key. Please re-run with "
- "auto-attach: True")
- return False, no_auto
- return True, None
-
- def is_registered(self):
- '''
- Checks if the system is already registered and returns
- True if so, else False
- '''
- cmd = ['identity']
-
- try:
- self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- return False
-
- return True
-
- def _sub_man_cli(self, cmd, logstring_val=False):
- '''
- Uses the prefered cloud-init subprocess def of util.subp
- and runs subscription-manager. Breaking this to a
- separate function for later use in mocking and unittests
- '''
- cmd = self.subman + cmd
- return util.subp(cmd, logstring=logstring_val)
-
- def rhn_register(self):
- '''
- Registers the system by userid and password or activation key
- and org. Returns True when successful False when not.
- '''
-
- if (self.activation_key is not None) and (self.org is not None):
- # register by activation key
- cmd = ['register', '--activationkey={0}'.
- format(self.activation_key), '--org={0}'.format(self.org)]
-
- # If the baseurl and/or server url are passed in, we register
- # with them.
-
- if self.rhsm_baseurl is not None:
- cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
-
- if self.server_hostname is not None:
- cmd.append("--serverurl={0}".format(self.server_hostname))
-
- try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
- except util.ProcessExecutionError as e:
- if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
- return False
-
- elif (self.userid is not None) and (self.password is not None):
- # register by username and password
- cmd = ['register', '--username={0}'.format(self.userid),
- '--password={0}'.format(self.password)]
-
- # If the baseurl and/or server url are passed in, we register
- # with them.
-
- if self.rhsm_baseurl is not None:
- cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
-
- if self.server_hostname is not None:
- cmd.append("--serverurl={0}".format(self.server_hostname))
-
- # Attempting to register the system only
- try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
- except util.ProcessExecutionError as e:
- if e.stdout == "":
- self.log_warn("Registration failed due "
- "to: {0}".format(e.stderr))
- return False
-
- else:
- self.log_warn("Unable to register system due to incomplete "
- "information.")
- self.log_warn("Use either activationkey and org *or* userid "
- "and password")
- return False
-
- reg_id = return_out.split("ID: ")[1].rstrip()
- self.log.debug("Registered successfully with ID {0}".format(reg_id))
- return True
-
- def _set_service_level(self):
- cmd = ['attach', '--auto', '--servicelevel={0}'
- .format(self.servicelevel)]
-
- try:
- return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
- if e.stdout.rstrip() != '':
- for line in e.stdout.split("\n"):
- if line is not '':
- self.log_warn(line)
- else:
- self.log_warn("Setting the service level failed with: "
- "{0}".format(e.stderr.strip()))
- return False
- for line in return_out.split("\n"):
- if line is not "":
- self.log.debug(line)
- return True
-
- def _set_auto_attach(self):
- cmd = ['attach', '--auto']
- try:
- return_out, return_err = self._sub_man_cli(cmd)
- except util.ProcessExecutionError:
- self.log_warn("Auto-attach failed with: "
- "{0}]".format(return_err.strip()))
- return False
- for line in return_out.split("\n"):
- if line is not "":
- self.log.debug(line)
- return True
-
- def _getPools(self):
- '''
- Gets the list pools for the active subscription and returns them
- in list form.
- '''
- available = []
- consumed = []
-
- # Get all available pools
- cmd = ['list', '--available', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
- available = (results.rstrip()).split("\n")
-
- # Get all consumed pools
- cmd = ['list', '--consumed', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
- consumed = (results.rstrip()).split("\n")
-
- return available, consumed
-
- def _getRepos(self):
- '''
- Obtains the current list of active yum repositories and returns
- them in list form.
- '''
-
- cmd = ['repos', '--list-enabled']
- return_out, return_err = self._sub_man_cli(cmd)
- active_repos = []
- for repo in return_out.split("\n"):
- if "Repo ID:" in repo:
- active_repos.append((repo.split(':')[1]).strip())
-
- cmd = ['repos', '--list-disabled']
- return_out, return_err = self._sub_man_cli(cmd)
-
- inactive_repos = []
- for repo in return_out.split("\n"):
- if "Repo ID:" in repo:
- inactive_repos.append((repo.split(':')[1]).strip())
- return active_repos, inactive_repos
-
- def addPool(self, pools):
- '''
- Takes a list of subscription pools and "attaches" them to the
- current subscription
- '''
-
- # An empty list was passed
- if len(pools) == 0:
- self.log.debug("No pools to attach")
- return True
-
- pool_available, pool_consumed = self._getPools()
- pool_list = []
- cmd = ['attach']
- for pool in pools:
- if (pool not in pool_consumed) and (pool in pool_available):
- pool_list.append('--pool={0}'.format(pool))
- else:
- self.log_warn("Pool {0} is not available".format(pool))
- if len(pool_list) > 0:
- cmd.extend(pool_list)
- try:
- self._sub_man_cli(cmd)
- self.log.debug("Attached the following pools to your "
- "system: %s" % (", ".join(pool_list))
- .replace('--pool=', ''))
- return True
- except util.ProcessExecutionError as e:
- self.log_warn("Unable to attach pool {0} "
- "due to {1}".format(pool, e))
- return False
-
- def update_repos(self, erepos, drepos):
- '''
- Takes a list of yum repo ids that need to be disabled or enabled; then
- it verifies if they are already enabled or disabled and finally
- executes the action to disable or enable
- '''
-
- if (erepos is not None) and (not isinstance(erepos, list)):
- self.log_warn("Repo IDs must in the format of a list.")
- return False
-
- if (drepos is not None) and (not isinstance(drepos, list)):
- self.log_warn("Repo IDs must in the format of a list.")
- return False
-
- # Bail if both lists are not populated
- if (len(erepos) == 0) and (len(drepos) == 0):
- self.log.debug("No repo IDs to enable or disable")
- return True
-
- active_repos, inactive_repos = self._getRepos()
- # Creating a list of repoids to be enabled
- enable_list = []
- enable_list_fail = []
- for repoid in erepos:
- if (repoid in inactive_repos):
- enable_list.append("--enable={0}".format(repoid))
- else:
- enable_list_fail.append(repoid)
-
- # Creating a list of repoids to be disabled
- disable_list = []
- disable_list_fail = []
- for repoid in drepos:
- if repoid in active_repos:
- disable_list.append("--disable={0}".format(repoid))
- else:
- disable_list_fail.append(repoid)
-
- # Logging any repos that are already enabled or disabled
- if len(enable_list_fail) > 0:
- for fail in enable_list_fail:
- # Check if the repo exists or not
- if fail in active_repos:
- self.log.debug("Repo {0} is already enabled".format(fail))
- else:
- self.log_warn("Repo {0} does not appear to "
- "exist".format(fail))
- if len(disable_list_fail) > 0:
- for fail in disable_list_fail:
- self.log.debug("Repo {0} not disabled "
- "because it is not enabled".format(fail))
-
- cmd = ['repos']
- if len(enable_list) > 0:
- cmd.extend(enable_list)
- if len(disable_list) > 0:
- cmd.extend(disable_list)
-
- try:
- self._sub_man_cli(cmd)
- except util.ProcessExecutionError as e:
- self.log_warn("Unable to alter repos due to {0}".format(e))
- return False
-
- if len(enable_list) > 0:
- self.log.debug("Enabled the following repos: %s" %
- (", ".join(enable_list)).replace('--enable=', ''))
- if len(disable_list) > 0:
- self.log.debug("Disabled the following repos: %s" %
- (", ".join(disable_list)).replace('--disable=', ''))
- return True
-
- def is_configured(self):
- return bool((self.userid and self.password) or self.activation_key)
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
deleted file mode 100644
index 8118fac4..00000000
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# The purpose of this script is to allow cloud-init to consume
-# rightscale style userdata. rightscale user data is key-value pairs
-# in a url-query-string like format.
-#
-# for cloud-init support, there will be a key named
-# 'CLOUD_INIT_REMOTE_HOOK'.
-#
-# This cloud-config module will
-# - read the blob of data from raw user data, and parse it as key/value
-# - for each key that is found, download the content to
-# the local instance/scripts directory and set them executable.
-# - the files in that directory will be run by the user-scripts module
-# Therefore, this must run before that.
-#
-#
-
-import os
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-from six.moves.urllib_parse import parse_qs
-
-frequency = PER_INSTANCE
-
-MY_NAME = "cc_rightscale_userdata"
-MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
-
-
-def handle(name, _cfg, cloud, log, _args):
- try:
- ud = cloud.get_userdata_raw()
- except Exception:
- log.debug("Failed to get raw userdata in module %s", name)
- return
-
- try:
- mdict = parse_qs(ud)
- if not mdict or MY_HOOKNAME not in mdict:
- log.debug(("Skipping module %s, "
- "did not find %s in parsed"
- " raw userdata"), name, MY_HOOKNAME)
- return
- except Exception:
- util.logexc(log, "Failed to parse query string %s into a dictionary",
- ud)
- raise
-
- wrote_fns = []
- captured_excps = []
-
- # These will eventually be then ran by the cc_scripts_user
- # TODO(harlowja): maybe this should just be a new user data handler??
- # Instead of a late module that acts like a user data handler?
- scripts_d = cloud.get_ipath_cur('scripts')
- urls = mdict[MY_HOOKNAME]
- for (i, url) in enumerate(urls):
- fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
- try:
- resp = uhelp.readurl(url)
- # Ensure its a valid http response (and something gotten)
- if resp.ok() and resp.contents:
- util.write_file(fname, resp, mode=0o700)
- wrote_fns.append(fname)
- except Exception as e:
- captured_excps.append(e)
- util.logexc(log, "%s failed to read %s and write %s", MY_NAME, url,
- fname)
-
- if wrote_fns:
- log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
-
- if len(wrote_fns) != len(urls):
- skipped = len(urls) - len(wrote_fns)
- log.debug("%s urls were skipped or failed", skipped)
-
- if captured_excps:
- log.warn("%s failed with exceptions, re-raising the last one",
- len(captured_excps))
- raise captured_excps[-1]
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
deleted file mode 100644
index b8642d65..00000000
--- a/cloudinit/config/cc_rsyslog.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# vi: ts=4 expandtab syntax=python
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-rsyslog module allows configuration of syslog logging via rsyslog
-Configuration is done under the cloud-config top level 'rsyslog'.
-
-Under 'rsyslog' you can define:
- - configs: [default=[]]
- this is a list. entries in it are a string or a dictionary.
- each entry has 2 parts:
- * content
- * filename
- if the entry is a string, then it is assigned to 'content'.
- for each entry, content is written to the provided filename.
- if filename is not provided, its default is read from 'config_filename'
-
- Content here can be any valid rsyslog configuration. No format
- specific format is enforced.
-
- For simply logging to an existing remote syslog server, via udp:
- configs: ["*.* @192.168.1.1"]
-
- - remotes: [default={}]
- This is a dictionary of name / value pairs.
- In comparison to 'config's, it is more focused in that it only supports
- remote syslog configuration. It is not rsyslog specific, and could
- convert to other syslog implementations.
-
- Each entry in remotes is a 'name' and a 'value'.
- * name: an string identifying the entry. good practice would indicate
- using a consistent and identifiable string for the producer.
- For example, the MAAS service could use 'maas' as the key.
- * value consists of the following parts:
- * optional filter for log messages
- default if not present: *.*
- * optional leading '@' or '@@' (indicates udp or tcp respectively).
- default if not present (udp): @
- This is rsyslog format for that. if not present, is '@'.
- * ipv4 or ipv6 or hostname
- ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
- * optional port
- port defaults to 514
-
- - config_filename: [default=20-cloud-config.conf]
- this is the file name to use if none is provided in a config entry.
-
- - config_dir: [default=/etc/rsyslog.d]
- this directory is used for filenames that are not absolute paths.
-
- - service_reload_command: [default="auto"]
- this command is executed if files have been written and thus the syslog
- daemon needs to be told.
-
-Note, since cloud-init 0.5 a legacy version of rsyslog config has been
-present and is still supported. See below for the mappings between old
-value and new value:
- old value -> new value
- 'rsyslog' -> rsyslog/configs
- 'rsyslog_filename' -> rsyslog/config_filename
- 'rsyslog_dir' -> rsyslog/config_dir
-
-the legacy config does not support 'service_reload_command'.
-
-Example config:
- #cloud-config
- rsyslog:
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- remotes:
- maas: "192.168.1.1"
- juju: "10.0.4.1"
- config_dir: config_dir
- config_filename: config_filename
- service_reload_command: [your, syslog, restart, command]
-
-Example Legacy config:
- #cloud-config
- rsyslog:
- - "*.* @@192.158.1.1"
- rsyslog_dir: /etc/rsyslog-config.d/
- rsyslog_filename: 99-local.conf
-"""
-
-import os
-import re
-import six
-
-from cloudinit import log as logging
-from cloudinit import util
-
-DEF_FILENAME = "20-cloud-config.conf"
-DEF_DIR = "/etc/rsyslog.d"
-DEF_RELOAD = "auto"
-DEF_REMOTES = {}
-
-KEYNAME_CONFIGS = 'configs'
-KEYNAME_FILENAME = 'config_filename'
-KEYNAME_DIR = 'config_dir'
-KEYNAME_RELOAD = 'service_reload_command'
-KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
-KEYNAME_LEGACY_DIR = 'rsyslog_dir'
-KEYNAME_REMOTES = 'remotes'
-
-LOG = logging.getLogger(__name__)
-
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
-HOST_PORT_RE = re.compile(
- r'^(?P<proto>[@]{0,2})'
- '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- '([:](?P<port>[0-9]+))?$')
-
-
-def reload_syslog(command=DEF_RELOAD, systemd=False):
- service = 'rsyslog'
- if command == DEF_RELOAD:
- if systemd:
- cmd = ['systemctl', 'reload-or-try-restart', service]
- else:
- cmd = ['service', service, 'restart']
- else:
- cmd = command
- util.subp(cmd, capture=True)
-
-
-def load_config(cfg):
- # return an updated config with entries of the correct type
- # support converting the old top level format into new format
- mycfg = cfg.get('rsyslog', {})
-
- if isinstance(cfg.get('rsyslog'), list):
- mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
- if KEYNAME_LEGACY_FILENAME in cfg:
- mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
- if KEYNAME_LEGACY_DIR in cfg:
- mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR]
-
- fillup = (
- (KEYNAME_CONFIGS, [], list),
- (KEYNAME_DIR, DEF_DIR, six.string_types),
- (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
- (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
- (KEYNAME_REMOTES, DEF_REMOTES, dict))
-
- for key, default, vtypes in fillup:
- if key not in mycfg or not isinstance(mycfg[key], vtypes):
- mycfg[key] = default
-
- return mycfg
-
-
-def apply_rsyslog_changes(configs, def_fname, cfg_dir):
- # apply the changes in 'configs' to the paths in def_fname and cfg_dir
- # return a list of the files changed
- files = []
- for cur_pos, ent in enumerate(configs):
- if isinstance(ent, dict):
- if "content" not in ent:
- LOG.warn("No 'content' entry in config entry %s", cur_pos + 1)
- continue
- content = ent['content']
- filename = ent.get("filename", def_fname)
- else:
- content = ent
- filename = def_fname
-
- filename = filename.strip()
- if not filename:
- LOG.warn("Entry %s has an empty filename", cur_pos + 1)
- continue
-
- filename = os.path.join(cfg_dir, filename)
-
- # Truncate filename first time you see it
- omode = "ab"
- if filename not in files:
- omode = "wb"
- files.append(filename)
-
- try:
- endl = ""
- if not content.endswith("\n"):
- endl = "\n"
- util.write_file(filename, content + endl, omode=omode)
- except Exception:
- util.logexc(LOG, "Failed to write to %s", filename)
-
- return files
-
-
-def parse_remotes_line(line, name=None):
- try:
- data, comment = COMMENT_RE.split(line)
- comment = comment.strip()
- except ValueError:
- data, comment = (line, None)
-
- toks = data.strip().split()
- match = None
- if len(toks) == 1:
- host_port = data
- elif len(toks) == 2:
- match, host_port = toks
- else:
- raise ValueError("line had multiple spaces: %s" % data)
-
- toks = HOST_PORT_RE.match(host_port)
-
- if not toks:
- raise ValueError("Invalid host specification '%s'" % host_port)
-
- proto = toks.group('proto')
- addr = toks.group('addr') or toks.group('bracket_addr')
- port = toks.group('port')
-
- if addr.startswith("[") and not addr.endswith("]"):
- raise ValueError("host spec had invalid brackets: %s" % addr)
-
- if comment and not name:
- name = comment
-
- t = SyslogRemotesLine(name=name, match=match, proto=proto,
- addr=addr, port=port)
- t.validate()
- return t
-
-
-class SyslogRemotesLine(object):
- def __init__(self, name=None, match=None, proto=None, addr=None,
- port=None):
- if not match:
- match = "*.*"
- self.name = name
- self.match = match
- if not proto:
- proto = "udp"
- if proto == "@":
- proto = "udp"
- elif proto == "@@":
- proto = "tcp"
- self.proto = proto
-
- self.addr = addr
- if port:
- self.port = int(port)
- else:
- self.port = None
-
- def validate(self):
- if self.port:
- try:
- int(self.port)
- except ValueError:
- raise ValueError("port '%s' is not an integer" % self.port)
-
- if not self.addr:
- raise ValueError("address is required")
-
- def __repr__(self):
- return "[name=%s match=%s proto=%s address=%s port=%s]" % (
- self.name, self.match, self.proto, self.addr, self.port
- )
-
- def __str__(self):
- buf = self.match + " "
- if self.proto == "udp":
- buf += "@"
- elif self.proto == "tcp":
- buf += "@@"
-
- if ":" in self.addr:
- buf += "[" + self.addr + "]"
- else:
- buf += self.addr
-
- if self.port:
- buf += ":%s" % self.port
-
- if self.name:
- buf += " # %s" % self.name
- return buf
-
-
-def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
- if not remotes:
- return None
- lines = []
- if header is not None:
- lines.append(header)
- for name, line in remotes.items():
- if not line:
- continue
- try:
- lines.append(str(parse_remotes_line(line, name=name)))
- except ValueError as e:
- LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
- if footer is not None:
- lines.append(footer)
- return '\n'.join(lines) + "\n"
-
-
-def handle(name, cfg, cloud, log, _args):
- if 'rsyslog' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'rsyslog' key in configuration"), name)
- return
-
- mycfg = load_config(cfg)
- configs = mycfg[KEYNAME_CONFIGS]
-
- if mycfg[KEYNAME_REMOTES]:
- configs.append(
- remotes_to_rsyslog_cfg(
- mycfg[KEYNAME_REMOTES],
- header="# begin remotes",
- footer="# end remotes",
- ))
-
- if not mycfg['configs']:
- log.debug("Empty config rsyslog['configs'], nothing to do")
- return
-
- changes = apply_rsyslog_changes(
- configs=mycfg[KEYNAME_CONFIGS],
- def_fname=mycfg[KEYNAME_FILENAME],
- cfg_dir=mycfg[KEYNAME_DIR])
-
- if not changes:
- log.debug("restart of syslog not necessary, no changes made")
- return
-
- try:
- restarted = reload_syslog(
- command=mycfg[KEYNAME_RELOAD],
- systemd=cloud.distro.uses_systemd()),
- except util.ProcessExecutionError as e:
- restarted = False
- log.warn("Failed to reload syslog", e)
-
- if restarted:
- # This only needs to run if we *actually* restarted
- # syslog above.
- cloud.cycle_logging()
- # This should now use rsyslog if
- # the logging was setup to use it...
- log.debug("%s configured %s files", name, changes)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
deleted file mode 100644
index bc09d38c..00000000
--- a/cloudinit/config/cc_runcmd.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, _args):
- if "runcmd" not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'runcmd' key in configuration"), name)
- return
-
- out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
- cmd = cfg["runcmd"]
- try:
- content = util.shellify(cmd)
- util.write_file(out_fn, content, 0o700)
- except Exception:
- util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
deleted file mode 100644
index f5786a31..00000000
--- a/cloudinit/config/cc_salt_minion.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Jeff Bauer <jbauer@rubic.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-# Note: see http://saltstack.org/topics/installation/
-
-
-def handle(name, cfg, cloud, log, _args):
- # If there isn't a salt key in the configuration don't do anything
- if 'salt_minion' not in cfg:
- log.debug(("Skipping module named %s,"
- " no 'salt_minion' key in configuration"), name)
- return
-
- salt_cfg = cfg['salt_minion']
-
- # Start by installing the salt package ...
- cloud.distro.install_packages(('salt-minion',))
-
- # Ensure we can configure files at the right dir
- config_dir = salt_cfg.get("config_dir", '/etc/salt')
- util.ensure_dir(config_dir)
-
- # ... and then update the salt configuration
- if 'conf' in salt_cfg:
- # Add all sections from the conf object to /etc/salt/minion
- minion_config = os.path.join(config_dir, 'minion')
- minion_data = util.yaml_dumps(salt_cfg.get('conf'))
- util.write_file(minion_config, minion_data)
-
- # ... copy the key pair if specified
- if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
- with util.umask(0o77):
- util.ensure_dir(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- util.write_file(pub_name, salt_cfg['public_key'])
- util.write_file(pem_name, salt_cfg['private_key'])
-
- # restart salt-minion. 'service' will start even if not started. if it
- # was started, it needs to be restarted for config change.
- util.subp(['service', 'salt-minion', 'restart'], capture=False)
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
deleted file mode 100644
index ee3b6c9f..00000000
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-SCRIPT_SUBDIR = 'per-boot'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
deleted file mode 100644
index c0d62b12..00000000
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'per-instance'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
deleted file mode 100644
index ecb527f6..00000000
--- a/cloudinit/config/cc_scripts_per_once.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_ONCE
-
-frequency = PER_ONCE
-
-SCRIPT_SUBDIR = 'per-once'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # Comes from the following:
- # https://forums.aws.amazon.com/thread.jspa?threadID=96918
- runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
deleted file mode 100644
index 699857d1..00000000
--- a/cloudinit/config/cc_scripts_user.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'scripts'
-
-
-def handle(name, _cfg, cloud, log, _args):
- # This is written to by the user data handlers
- # Ie, any custom shell scripts that come down
- # go here...
- runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
- try:
- util.runparts(runparts_path)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
deleted file mode 100644
index 80bf10ff..00000000
--- a/cloudinit/config/cc_scripts_vendor.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-SCRIPT_SUBDIR = 'vendor'
-
-
-def handle(name, cfg, cloud, log, _args):
- # This is written to by the vendor data handlers
- # any vendor data shell scripts get placed in runparts_path
- runparts_path = os.path.join(cloud.get_ipath_cur(), 'scripts',
- SCRIPT_SUBDIR)
-
- prefix = util.get_cfg_by_path(cfg, ('vendor_data', 'prefix'), [])
-
- try:
- util.runparts(runparts_path, exe_prefix=prefix)
- except Exception:
- log.warn("Failed to run module %s (%s in %s)",
- name, SCRIPT_SUBDIR, runparts_path)
- raise
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
deleted file mode 100644
index 5085c23a..00000000
--- a/cloudinit/config/cc_seed_random.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Yahoo! Inc.
-# Copyright (C) 2014 Canonical, Ltd
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Dustin Kirkland <kirkland@ubuntu.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-
-from six import BytesIO
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-LOG = logging.getLogger(__name__)
-
-
-def _decode(data, encoding=None):
- if not data:
- return b''
- if not encoding or encoding.lower() in ['raw']:
- return util.encode_text(data)
- elif encoding.lower() in ['base64', 'b64']:
- return base64.b64decode(data)
- elif encoding.lower() in ['gzip', 'gz']:
- return util.decomp_gzip(data, quiet=False, decode=None)
- else:
- raise IOError("Unknown random_seed encoding: %s" % (encoding))
-
-
-def handle_random_seed_command(command, required, env=None):
- if not command and required:
- raise ValueError("no command found but required=true")
- elif not command:
- LOG.debug("no command provided")
- return
-
- cmd = command[0]
- if not util.which(cmd):
- if required:
- raise ValueError("command '%s' not found but required=true", cmd)
- else:
- LOG.debug("command '%s' not found for seed_command", cmd)
- return
- util.subp(command, env=env, capture=False)
-
-
-def handle(name, cfg, cloud, log, _args):
- mycfg = cfg.get('random_seed', {})
- seed_path = mycfg.get('file', '/dev/urandom')
- seed_data = mycfg.get('data', b'')
-
- seed_buf = BytesIO()
- if seed_data:
- seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
-
- # 'random_seed' is set up by Azure datasource, and comes already in
- # openstack meta_data.json
- metadata = cloud.datasource.metadata
- if metadata and 'random_seed' in metadata:
- seed_buf.write(util.encode_text(metadata['random_seed']))
-
- seed_data = seed_buf.getvalue()
- if len(seed_data):
- log.debug("%s: adding %s bytes of random seed entropy to %s", name,
- len(seed_data), seed_path)
- util.append_file(seed_path, seed_data)
-
- command = mycfg.get('command', None)
- req = mycfg.get('command_required', False)
- try:
- env = os.environ.copy()
- env['RANDOM_SEED_FILE'] = seed_path
- handle_random_seed_command(command=command, required=req, env=env)
- except ValueError as e:
- log.warn("handling random command [%s] failed: %s", command, e)
- raise e
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
deleted file mode 100644
index f43d8d5a..00000000
--- a/cloudinit/config/cc_set_hostname.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
- return
-
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- log.debug("Setting the hostname to %s (%s)", fqdn, hostname)
- cloud.distro.set_hostname(hostname, fqdn)
- except Exception:
- util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn,
- hostname)
- raise
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
deleted file mode 100644
index 5c8c23b8..00000000
--- a/cloudinit/config/cc_set_passwords.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-from string import ascii_letters, digits
-
-# We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits
- if x not in 'loLOI01']))
-
-
-def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
- # if run from command line, and give args, wipe the chpasswd['list']
- password = args[0]
- if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
- del cfg['chpasswd']['list']
- else:
- password = util.get_cfg_option_str(cfg, "password", None)
-
- expire = True
- plist = None
-
- if 'chpasswd' in cfg:
- chfg = cfg['chpasswd']
- plist = util.get_cfg_option_str(chfg, 'list', plist)
- expire = util.get_cfg_option_bool(chfg, 'expire', expire)
-
- if not plist and password:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- if user:
- plist = "%s:%s" % (user, password)
- else:
- log.warn("No default or defined user to change password for.")
-
- errors = []
- if plist:
- plist_in = []
- randlist = []
- users = []
- for line in plist.splitlines():
- u, p = line.split(':', 1)
- if p == "R" or p == "RANDOM":
- p = rand_user_password()
- randlist.append("%s:%s" % (u, p))
- plist_in.append("%s:%s" % (u, p))
- users.append(u)
-
- ch_in = '\n'.join(plist_in) + '\n'
- try:
- log.debug("Changing password for %s:", users)
- util.subp(['chpasswd'], ch_in)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set passwords with chpasswd for %s",
- users)
-
- if len(randlist):
- blurb = ("Set the following 'random' passwords\n",
- '\n'.join(randlist))
- sys.stderr.write("%s\n%s\n" % blurb)
-
- if expire:
- expired_users = []
- for u in users:
- try:
- util.subp(['passwd', '--expire', u])
- expired_users.append(u)
- except Exception as e:
- errors.append(e)
- util.logexc(log, "Failed to set 'expire' for %s", u)
- if expired_users:
- log.debug("Expired passwords for: %s users", expired_users)
-
- change_pwauth = False
- pw_auth = None
- if 'ssh_pwauth' in cfg:
- if util.is_true(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'yes'
- elif util.is_false(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'no'
- elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not str(cfg['ssh_pwauth']).strip():
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not cfg['ssh_pwauth']:
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- else:
- msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
- util.logexc(log, msg)
-
- if change_pwauth:
- replaced_auth = False
-
- # See: man sshd_config
- old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
- new_lines = []
- i = 0
- for (i, line) in enumerate(old_lines):
- # Keywords are case-insensitive and arguments are case-sensitive
- if line.key == 'passwordauthentication':
- log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
- replaced_auth = True
- line.value = pw_auth
- new_lines.append(line)
-
- if not replaced_auth:
- log.debug("Adding new auth line %s", i + 1)
- replaced_auth = True
- new_lines.append(ssh_util.SshdConfigLine('',
- 'PasswordAuthentication',
- pw_auth))
-
- lines = [str(l) for l in new_lines]
- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
-
- try:
- cmd = cloud.distro.init_cmd # Default service
- cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
- cmd.append('restart')
- if 'systemctl' in cmd: # Switch action ordering
- cmd[1], cmd[2] = cmd[2], cmd[1]
- cmd = filter(None, cmd) # Remove empty arguments
- util.subp(cmd)
- log.debug("Restarted the ssh daemon")
- except Exception:
- util.logexc(log, "Restarting of the ssh daemon failed")
-
- if len(errors):
- log.debug("%s errors occured, re-raising the last one", len(errors))
- raise errors[-1]
-
-
-def rand_user_password(pwlen=9):
- return util.rand_str(pwlen, select_from=PW_SET)
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
deleted file mode 100644
index 1a485ee6..00000000
--- a/cloudinit/config/cc_snappy.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# vi: ts=4 expandtab
-#
-"""
-snappy modules allows configuration of snappy.
-Example config:
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-
- - ssh_enabled:
- This controls the system's ssh service. The default value is 'auto'.
- True: enable ssh service
- False: disable ssh service
- auto: enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
- - snap installation and config
- The above would install 'etcd', and then install 'pkg2.smoser' with a
- '<config-file>' argument where 'config-file' has 'config-blob' inside it.
- If 'pkgname' is installed already, then 'snappy config pkgname <file>'
- will be called where 'file' has 'pkgname-config-blob' as its content.
-
- Entries in 'config' can be namespaced or non-namespaced for a package.
- In either case, the config provided to snappy command is non-namespaced.
- The package name is provided as it appears.
-
- If 'packages_dir' has files in it that end in '.snap', then they are
- installed. Given 3 files:
- <packages_dir>/foo.snap
- <packages_dir>/foo.config
- <packages_dir>/bar.snap
- cloud-init will invoke:
- snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- snappy install <packages_dir>/bar.snap
-
- Note, that if provided a 'config' entry for 'ubuntu-core', then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
-"""
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import glob
-import os
-import tempfile
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-SNAPPY_CMD = "snappy"
-NAMESPACE_DELIM = '.'
-
-BUILTIN_CFG = {
- 'packages': [],
- 'packages_dir': '/writable/user-data/cloud-init/snaps',
- 'ssh_enabled': "auto",
- 'system_snappy': "auto",
- 'config': {},
-}
-
-
-def parse_filename(fname):
- fname = os.path.basename(fname)
- fname_noext = fname.rpartition(".")[0]
- name = fname_noext.partition("_")[0]
- shortname = name.partition(".")[0]
- return(name, shortname, fname_noext)
-
-
-def get_fs_package_ops(fspath):
- if not fspath:
- return []
- ops = []
- for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
- (name, shortname, fname_noext) = parse_filename(snapfile)
- cfg = None
- for cand in (fname_noext, name, shortname):
- fpcand = os.path.sep.join([fspath, cand]) + ".config"
- if os.path.isfile(fpcand):
- cfg = fpcand
- break
- ops.append(makeop('install', name, config=None,
- path=snapfile, cfgfile=cfg))
- return ops
-
-
-def makeop(op, name, config=None, path=None, cfgfile=None):
- return({'op': op, 'name': name, 'config': config, 'path': path,
- 'cfgfile': cfgfile})
-
-
-def get_package_config(configs, name):
- # load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
- # over short name entry (config-example)
- if name in configs:
- return configs[name]
- return configs.get(name.partition(NAMESPACE_DELIM)[0])
-
-
-def get_package_ops(packages, configs, installed=None, fspath=None):
- # get the install an config operations that should be done
- if installed is None:
- installed = read_installed_packages()
- short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
-
- if not packages:
- packages = []
- if not configs:
- configs = {}
-
- ops = []
- ops += get_fs_package_ops(fspath)
-
- for name in packages:
- ops.append(makeop('install', name, get_package_config(configs, name)))
-
- to_install = [f['name'] for f in ops]
- short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
-
- for name in configs:
- if name in to_install:
- continue
- shortname = name.partition(NAMESPACE_DELIM)[0]
- if shortname in short_to_install:
- continue
- if name in installed or shortname in short_installed:
- ops.append(makeop('config', name,
- config=get_package_config(configs, name)))
-
- # prefer config entries to filepath entries
- for op in ops:
- if op['op'] != 'install' or not op['cfgfile']:
- continue
- name = op['name']
- fromcfg = get_package_config(configs, op['name'])
- if fromcfg:
- LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
- op['cfgfile'] = None
- op['config'] = fromcfg
-
- return ops
-
-
-def render_snap_op(op, name, path=None, cfgfile=None, config=None):
- if op not in ('install', 'config'):
- raise ValueError("cannot render op '%s'" % op)
-
- shortname = name.partition(NAMESPACE_DELIM)[0]
- try:
- cfg_tmpf = None
- if config is not None:
- # input to 'snappy config packagename' must have nested data. odd.
- # config:
- # packagename:
- # config
- # Note, however, we do not touch config files on disk.
- nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = tempfile.mkstemp()
- os.write(fd, util.yaml_dumps(nested_cfg).encode())
- os.close(fd)
- cfgfile = cfg_tmpf
-
- cmd = [SNAPPY_CMD, op]
- if op == 'install':
- if path:
- cmd.append("--allow-unauthenticated")
- cmd.append(path)
- else:
- cmd.append(name)
- if cfgfile:
- cmd.append(cfgfile)
- elif op == 'config':
- cmd += [name, cfgfile]
-
- util.subp(cmd)
-
- finally:
- if cfg_tmpf:
- os.unlink(cfg_tmpf)
-
-
-def read_installed_packages():
- ret = []
- for (name, date, version, dev) in read_pkg_data():
- if dev:
- ret.append(NAMESPACE_DELIM.join([name, dev]))
- else:
- ret.append(name)
- return ret
-
-
-def read_pkg_data():
- out, err = util.subp([SNAPPY_CMD, "list"])
- pkg_data = []
- for line in out.splitlines()[1:]:
- toks = line.split(sep=None, maxsplit=3)
- if len(toks) == 3:
- (name, date, version) = toks
- dev = None
- else:
- (name, date, version, dev) = toks
- pkg_data.append((name, date, version, dev,))
- return pkg_data
-
-
-def disable_enable_ssh(enabled):
- LOG.debug("setting enablement of ssh to: %s", enabled)
- # do something here that would enable or disable
- not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
- if enabled:
- util.del_file(not_to_be_run)
- # this is an indempotent operation
- util.subp(["systemctl", "start", "ssh"])
- else:
- # this is an indempotent operation
- util.subp(["systemctl", "stop", "ssh"])
- util.write_file(not_to_be_run, "cloud-init\n")
-
-
-def system_is_snappy():
- # channel.ini is configparser loadable.
- # snappy will move to using /etc/system-image/config.d/*.ini
- # this is certainly not a perfect test, but good enough for now.
- content = util.load_file("/etc/system-image/channel.ini", quiet=True)
- if 'ubuntu-core' in content.lower():
- return True
- if os.path.isdir("/etc/system-image/config.d/"):
- return True
- return False
-
-
-def set_snappy_command():
- global SNAPPY_CMD
- if util.which("snappy-go"):
- SNAPPY_CMD = "snappy-go"
- else:
- SNAPPY_CMD = "snappy"
- LOG.debug("snappy command is '%s'", SNAPPY_CMD)
-
-
-def handle(name, cfg, cloud, log, args):
- cfgin = cfg.get('snappy')
- if not cfgin:
- cfgin = {}
- mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
- sys_snappy = str(mycfg.get("system_snappy", "auto"))
- if util.is_false(sys_snappy):
- LOG.debug("%s: System is not snappy. disabling", name)
- return
-
- if sys_snappy.lower() == "auto" and not(system_is_snappy()):
- LOG.debug("%s: 'auto' mode, and system not snappy", name)
- return
-
- set_snappy_command()
-
- pkg_ops = get_package_ops(packages=mycfg['packages'],
- configs=mycfg['config'],
- fspath=mycfg['packages_dir'])
-
- fails = []
- for pkg_op in pkg_ops:
- try:
- render_snap_op(**pkg_op)
- except Exception as e:
- fails.append((pkg_op, e,))
- LOG.warn("'%s' failed for '%s': %s",
- pkg_op['op'], pkg_op['name'], e)
-
- # Default to disabling SSH
- ssh_enabled = mycfg.get('ssh_enabled', "auto")
-
- # If the user has not explicitly enabled or disabled SSH, then enable it
- # when password SSH authentication is requested or there are SSH keys
- if ssh_enabled == "auto":
- user_ssh_keys = cloud.get_public_ssh_keys() or None
- password_auth_enabled = cfg.get('ssh_pwauth', False)
- if user_ssh_keys:
- LOG.debug("Enabling SSH, ssh keys found in datasource")
- ssh_enabled = True
- elif cfg.get('ssh_authorized_keys'):
- LOG.debug("Enabling SSH, ssh keys found in config")
- elif password_auth_enabled:
- LOG.debug("Enabling SSH, password authentication requested")
- ssh_enabled = True
- elif ssh_enabled not in (True, False):
- LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled)
-
- disable_enable_ssh(ssh_enabled)
-
- if fails:
- raise Exception("failed to install/configure snaps")
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
deleted file mode 100644
index cb9b70aa..00000000
--- a/cloudinit/config/cc_ssh.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import os
-import sys
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-DISABLE_ROOT_OPTS = (
- "no-port-forwarding,no-agent-forwarding,"
- "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
- " rather than the user \\\"root\\\".\';echo;sleep 10\"")
-
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
-
-CONFIG_KEY_TO_FILE = {}
-PRIV_TO_PUB = {}
-for k in GENERATE_KEY_NAMES:
- CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
- CONFIG_KEY_TO_FILE.update(
- {"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
- PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
-
-KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
-
-
-def handle(_name, cfg, cloud, log, _args):
-
- # remove the static keys from the pristine image
- if cfg.get("ssh_deletekeys", True):
- key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*")
- for f in glob.glob(key_pth):
- try:
- util.del_file(f)
- except Exception:
- util.logexc(log, "Failed deleting key file %s", f)
-
- if "ssh_keys" in cfg:
- # if there are keys in cloud-config, use them
- for (key, val) in cfg["ssh_keys"].items():
- if key in CONFIG_KEY_TO_FILE:
- tgt_fn = CONFIG_KEY_TO_FILE[key][0]
- tgt_perms = CONFIG_KEY_TO_FILE[key][1]
- util.write_file(tgt_fn, val, tgt_perms)
-
- for (priv, pub) in PRIV_TO_PUB.items():
- if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
- continue
- pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
- cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
- try:
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- util.subp(cmd, capture=False)
- log.debug("Generated a key for %s from %s", pair[0], pair[1])
- except Exception:
- util.logexc(log, "Failed generated a key for %s from %s",
- pair[0], pair[1])
- else:
- # if not, generate them
- genkeys = util.get_cfg_option_list(cfg,
- 'ssh_genkeytypes',
- GENERATE_KEY_NAMES)
- lang_c = os.environ.copy()
- lang_c['LANG'] = 'C'
- for keytype in genkeys:
- keyfile = KEY_FILE_TPL % (keytype)
- if os.path.exists(keyfile):
- continue
- util.ensure_dir(os.path.dirname(keyfile))
- cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
-
- # TODO(harlowja): Is this guard needed?
- with util.SeLinuxGuard("/etc/ssh", recursive=True):
- try:
- out, err = util.subp(cmd, capture=True, env=lang_c)
- sys.stdout.write(util.decode_binary(out))
- except util.ProcessExecutionError as e:
- err = util.decode_binary(e.stderr).lower()
- if (e.exit_code == 1 and
- err.lower().startswith("unknown key")):
- log.debug("ssh-keygen: unknown key type '%s'", keytype)
- else:
- util.logexc(log, "Failed generating key type %s to "
- "file %s", keytype, keyfile)
-
- try:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
- disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- DISABLE_ROOT_OPTS)
-
- keys = cloud.get_public_ssh_keys() or []
- if "ssh_authorized_keys" in cfg:
- cfgkeys = cfg["ssh_authorized_keys"]
- keys.extend(cfgkeys)
-
- apply_credentials(keys, user, disable_root, disable_root_opts)
- except Exception:
- util.logexc(log, "Applying ssh credentials failed!")
-
-
-def apply_credentials(keys, user, disable_root, disable_root_opts):
-
- keys = set(keys)
- if user:
- ssh_util.setup_user_keys(keys, user)
-
- if disable_root:
- if not user:
- user = "NONE"
- key_prefix = disable_root_opts.replace('$USER', user)
- else:
- key_prefix = ''
-
- ssh_util.setup_user_keys(keys, 'root', options=key_prefix)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
deleted file mode 100644
index 6ce831bc..00000000
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import hashlib
-
-from prettytable import PrettyTable
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import ssh_util
-from cloudinit import util
-
-
-def _split_hash(bin_hash):
- split_up = []
- for i in range(0, len(bin_hash), 2):
- split_up.append(bin_hash[i:i + 2])
- return split_up
-
-
-def _gen_fingerprint(b64_text, hash_meth='md5'):
- if not b64_text:
- return ''
- # TBD(harlowja): Maybe we should feed this into 'ssh -lf'?
- try:
- hasher = hashlib.new(hash_meth)
- hasher.update(base64.b64decode(b64_text))
- return ":".join(_split_hash(hasher.hexdigest()))
- except (TypeError, ValueError):
- # Raised when b64 not really b64...
- # or when the hash type is not really
- # a known/supported hash type...
- return '?'
-
-
-def _is_printable_key(entry):
- if any([entry.keytype, entry.base64, entry.comment, entry.options]):
- if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
- return True
- return False
-
-
-def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
- prefix='ci-info: '):
- if not key_entries:
- message = ("%sno authorized ssh keys fingerprints found for user %s.\n"
- % (prefix, user))
- util.multi_log(message)
- return
- tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
- 'Comment']
- tbl = PrettyTable(tbl_fields)
- for entry in key_entries:
- if _is_printable_key(entry):
- row = []
- row.append(entry.keytype or '-')
- row.append(_gen_fingerprint(entry.base64, hash_meth) or '-')
- row.append(entry.options or '-')
- row.append(entry.comment or '-')
- tbl.add_row(row)
- authtbl_s = tbl.get_string()
- authtbl_lines = authtbl_s.splitlines()
- max_len = len(max(authtbl_lines, key=len))
- lines = [
- util.center("Authorized keys from %s for user %s" %
- (key_fn, user), "+", max_len),
- ]
- lines.extend(authtbl_lines)
- for line in lines:
- util.multi_log(text="%s%s\n" % (prefix, line),
- stderr=False, console=True)
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.is_true(cfg.get('no_ssh_fingerprints', False)):
- log.debug(("Skipping module named %s, "
- "logging of ssh fingerprints disabled"), name)
- return
-
- hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- for (user_name, _cfg) in users.items():
- (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
- _pprint_key_entries(user_name, key_fn,
- key_entries, hash_meth)
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
deleted file mode 100644
index 28c4585b..00000000
--- a/cloudinit/config/cc_ssh_import_id.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit import util
-import pwd
-
-# https://launchpad.net/ssh-import-id
-distros = ['ubuntu', 'debian']
-
-
-def handle(_name, cfg, cloud, log, args):
-
- # import for "user: XXXXX"
- if len(args) != 0:
- user = args[0]
- ids = []
- if len(args) > 1:
- ids = args[1:]
-
- import_ssh_ids(ids, user, log)
- return
-
- # import for cloudinit created users
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- elist = []
- for (user, user_cfg) in users.items():
- import_ids = []
- if user_cfg['default']:
- import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
- else:
- try:
- import_ids = user_cfg['ssh_import_id']
- except Exception:
- log.debug("User %s is not configured for ssh_import_id", user)
- continue
-
- try:
- import_ids = util.uniq_merge(import_ids)
- import_ids = [str(i) for i in import_ids]
- except Exception:
- log.debug("User %s is not correctly configured for ssh_import_id",
- user)
- continue
-
- if not len(import_ids):
- continue
-
- try:
- import_ssh_ids(import_ids, user, log)
- except Exception as exc:
- util.logexc(log, "ssh-import-id failed for: %s %s", user,
- import_ids)
- elist.append(exc)
-
- if len(elist):
- raise elist[0]
-
-
-def import_ssh_ids(ids, user, log):
-
- if not (user and ids):
- log.debug("empty user(%s) or ids(%s). not importing", user, ids)
- return
-
- try:
- pwd.getpwnam(user)
- except KeyError as exc:
- raise exc
-
- cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
- log.debug("Importing ssh ids for user %s.", user)
-
- try:
- util.subp(cmd, capture=False)
- except util.ProcessExecutionError as exc:
- util.logexc(log, "Failed to run command to import %s ssh ids", user)
- raise exc
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
deleted file mode 100644
index b9eb85b2..00000000
--- a/cloudinit/config/cc_timezone.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import util
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-
-def handle(name, cfg, cloud, log, args):
- if len(args) != 0:
- timezone = args[0]
- else:
- timezone = util.get_cfg_option_str(cfg, "timezone", False)
-
- if not timezone:
- log.debug("Skipping module named %s, no 'timezone' specified", name)
- return
-
- # Let the distro handle settings its timezone
- cloud.distro.set_timezone(timezone)
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
deleted file mode 100644
index 884d79f1..00000000
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-**Summary:** reboot system into another init.
-
-**Description:** This module provides a way for the user to boot with systemd
-even if the image is set to boot with upstart. It should be run as one of the
-first ``cloud_init_modules``, and will switch the init system and then issue a
-reboot. The next boot will come up in the target init system and no action will
-be taken.
-
-This should be inert on non-ubuntu systems, and also exit quickly.
-
-It can be configured with the following option structure::
-
- init_switch:
- target: systemd (can be 'systemd' or 'upstart')
- reboot: true (reboot if a change was made, or false to not reboot)
-
-.. note::
-
- Best effort is made, but it's possible
- this system will break, and probably won't interact well with any other
- mechanism you've used to switch the init system.
-"""
-
-from cloudinit.distros import ubuntu
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import os
-import time
-
-frequency = PER_INSTANCE
-REBOOT_CMD = ["/sbin/reboot", "--force"]
-
-DEFAULT_CONFIG = {
- 'init_switch': {'target': None, 'reboot': True}
-}
-
-SWITCH_INIT = """
-#!/bin/sh
-# switch_init: [upstart | systemd]
-
-is_systemd() {
- [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ]
-}
-debug() { echo "$@" 1>&2; }
-fail() { echo "$@" 1>&2; exit 1; }
-
-if [ "$1" = "systemd" ]; then
- if is_systemd; then
- debug "already systemd, nothing to do"
- else
- [ -f /lib/systemd/systemd ] || fail "no systemd available";
- dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\
- --rename /sbin/init
- fi
- [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init
-elif [ "$1" = "upstart" ]; then
- if is_systemd; then
- rm -f /sbin/init
- dpkg-divert --package systemd-sysv --rename --remove /sbin/init
- else
- debug "already upstart, nothing to do."
- fi
-else
- fail "Error. expect 'upstart' or 'systemd'"
-fi
-"""
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- if not isinstance(cloud.distro, ubuntu.Distro):
- log.debug("%s: distro is '%s', not ubuntu. returning",
- name, cloud.distro.__class__)
- return
-
- cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
- target = cfg['init_switch']['target']
- reboot = cfg['init_switch']['reboot']
-
- if len(args) != 0:
- target = args[0]
- if len(args) > 1:
- reboot = util.is_true(args[1])
-
- if not target:
- log.debug("%s: target=%s. nothing to do", name, target)
- return
-
- if not util.which('dpkg'):
- log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
- return
-
- supported = ('upstart', 'systemd')
- if target not in supported:
- log.warn("%s: target set to %s, expected one of: %s",
- name, target, str(supported))
-
- if os.path.exists("/run/systemd/system"):
- current = "systemd"
- else:
- current = "upstart"
-
- if current == target:
- log.debug("%s: current = target = %s. nothing to do", name, target)
- return
-
- try:
- util.subp(['sh', '-s', target], data=SWITCH_INIT)
- except util.ProcessExecutionError as e:
- log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
- return
-
- if util.is_false(reboot):
- log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
- name, current, target)
- return
-
- try:
- log.warn("%s: switched '%s' to '%s'. rebooting.",
- name, current, target)
- logging.flushLoggers(log)
- _fire_reboot(log, wait_attempts=4, initial_sleep=4)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- raise
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
deleted file mode 100644
index 15703efe..00000000
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import templater
-from cloudinit import util
-
-from cloudinit.settings import PER_ALWAYS
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
- manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if util.translate_bool(manage_hosts, addons=['template']):
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
- return
-
- # Render from a template file
- tpl_fn_name = cloud.get_template_filename("hosts.%s" %
- (cloud.distro.osfamily))
- if not tpl_fn_name:
- raise RuntimeError(("No hosts template could be"
- " found for distro %s") %
- (cloud.distro.osfamily))
-
- templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
-
- elif manage_hosts == "localhost":
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- if not hostname:
- log.warn(("Option 'manage_etc_hosts' was set,"
- " but no hostname was found"))
- return
-
- log.debug("Managing localhost in /etc/hosts")
- cloud.distro.update_etc_hosts(hostname, fqdn)
- else:
- log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
deleted file mode 100644
index 5b78afe1..00000000
--- a/cloudinit/config/cc_update_hostname.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import util
-
-frequency = PER_ALWAYS
-
-
-def handle(name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
- return
-
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
- log.debug("Updating hostname to %s (%s)", fqdn, hostname)
- cloud.distro.update_hostname(hostname, fqdn, prev_fn)
- except Exception:
- util.logexc(log, "Failed to update the hostname to %s (%s)", fqdn,
- hostname)
- raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
deleted file mode 100644
index bf5b4581..00000000
--- a/cloudinit/config/cc_users_groups.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
-from cloudinit.settings import PER_INSTANCE
-
-frequency = PER_INSTANCE
-
-
-def handle(name, cfg, cloud, _log, _args):
- (users, groups) = ds.normalize_users_groups(cfg, cloud.distro)
- for (name, members) in groups.items():
- cloud.distro.create_group(name, members)
- for (user, config) in users.items():
- cloud.distro.create_user(user, **config)
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
deleted file mode 100644
index b1096b9b..00000000
--- a/cloudinit/config/cc_write_files.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-import six
-
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-frequency = PER_INSTANCE
-
-DEFAULT_OWNER = "root:root"
-DEFAULT_PERMS = 0o644
-UNKNOWN_ENC = 'text/plain'
-
-
-def handle(name, cfg, _cloud, log, _args):
- files = cfg.get('write_files')
- if not files:
- log.debug(("Skipping module named %s,"
- " no/empty 'write_files' key in configuration"), name)
- return
- write_files(name, files, log)
-
-
-def canonicalize_extraction(encoding_type, log):
- if not encoding_type:
- encoding_type = ''
- encoding_type = encoding_type.lower().strip()
- if encoding_type in ['gz', 'gzip']:
- return ['application/x-gzip']
- if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']:
- return ['application/base64', 'application/x-gzip']
- # Yaml already encodes binary data as base64 if it is given to the
- # yaml file as binary, so those will be automatically decoded for you.
- # But the above b64 is just for people that are more 'comfortable'
- # specifing it manually (which might be a possiblity)
- if encoding_type in ['b64', 'base64']:
- return ['application/base64']
- if encoding_type:
- log.warn("Unknown encoding type %s, assuming %s",
- encoding_type, UNKNOWN_ENC)
- return [UNKNOWN_ENC]
-
-
-def write_files(name, files, log):
- if not files:
- return
-
- for (i, f_info) in enumerate(files):
- path = f_info.get('path')
- if not path:
- log.warn("No path provided to write for entry %s in module %s",
- i + 1, name)
- continue
- path = os.path.abspath(path)
- extractions = canonicalize_extraction(f_info.get('encoding'), log)
- contents = extract_contents(f_info.get('content', ''), extractions)
- (u, g) = util.extract_usergroup(f_info.get('owner', DEFAULT_OWNER))
- perms = decode_perms(f_info.get('permissions'), DEFAULT_PERMS, log)
- util.write_file(path, contents, mode=perms)
- util.chownbyname(path, u, g)
-
-
-def decode_perms(perm, default, log):
- if perm is None:
- return default
- try:
- if isinstance(perm, six.integer_types + (float,)):
- # Just 'downcast' it (if a float)
- return int(perm)
- else:
- # Force to string and try octal conversion
- return int(str(perm), 8)
- except (TypeError, ValueError):
- log.warn("Undecodable permissions %s, assuming %s", perm, default)
- return default
-
-
-def extract_contents(contents, extraction_types):
- result = contents
- for t in extraction_types:
- if t == 'application/x-gzip':
- result = util.decomp_gzip(result, quiet=False, decode=False)
- elif t == 'application/base64':
- result = base64.b64decode(result)
- elif t == UNKNOWN_ENC:
- pass
- return result
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
deleted file mode 100644
index 64fba869..00000000
--- a/cloudinit/config/cc_yum_add_repo.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import configobj
-import six
-
-from cloudinit import util
-
-
-def _canonicalize_id(repo_id):
- repo_id = repo_id.lower().replace("-", "_")
- repo_id = repo_id.replace(" ", "_")
- return repo_id
-
-
-def _format_repo_value(val):
- if isinstance(val, (bool)):
- # Seems like yum prefers 1/0
- return str(int(val))
- if isinstance(val, (list, tuple)):
- # Can handle 'lists' in certain cases
- # See: http://bit.ly/Qqrf1t
- return "\n ".join([_format_repo_value(v) for v in val])
- if not isinstance(val, six.string_types):
- return str(val)
- return val
-
-
-# TODO(harlowja): move to distro?
-# See man yum.conf
-def _format_repository_config(repo_id, repo_config):
- to_be = configobj.ConfigObj()
- to_be[repo_id] = {}
- # Do basic translation of the items -> values
- for (k, v) in repo_config.items():
- # For now assume that people using this know
- # the format of yum and don't verify keys/values further
- to_be[repo_id][k] = _format_repo_value(v)
- lines = to_be.write()
- lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822()))
- return "\n".join(lines)
-
-
-def handle(name, cfg, _cloud, log, _args):
- repos = cfg.get('yum_repos')
- if not repos:
- log.debug(("Skipping module named %s,"
- " no 'yum_repos' configuration found"), name)
- return
- repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir',
- '/etc/yum.repos.d/')
- repo_locations = {}
- repo_configs = {}
- for (repo_id, repo_config) in repos.items():
- canon_repo_id = _canonicalize_id(repo_id)
- repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
- if os.path.exists(repo_fn_pth):
- log.info("Skipping repo %s, file %s already exists!",
- repo_id, repo_fn_pth)
- continue
- elif canon_repo_id in repo_locations:
- log.info("Skipping repo %s, file %s already pending!",
- repo_id, repo_fn_pth)
- continue
- if not repo_config:
- repo_config = {}
- # Do some basic sanity checks/cleaning
- n_repo_config = {}
- for (k, v) in repo_config.items():
- k = k.lower().strip().replace("-", "_")
- if k:
- n_repo_config[k] = v
- repo_config = n_repo_config
- missing_required = 0
- for req_field in ['baseurl']:
- if req_field not in repo_config:
- log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
- repo_id, req_field)
- missing_required += 1
- if not missing_required:
- repo_configs[canon_repo_id] = repo_config
- repo_locations[canon_repo_id] = repo_fn_pth
- else:
- log.warn("Repository %s is missing %s required fields, skipping!",
- repo_id, missing_required)
- for (c_repo_id, path) in repo_locations.items():
- repo_blob = _format_repository_config(c_repo_id,
- repo_configs.get(c_repo_id))
- util.write_file(path, repo_blob)
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
deleted file mode 100644
index 412431f2..00000000
--- a/cloudinit/cs_utils.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 CloudSigma
-#
-# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-"""
-cepko implements easy-to-use communication with CloudSigma's VMs through
-a virtual serial port without bothering with formatting the messages
-properly nor parsing the output with the specific and sometimes
-confusing shell tools for that purpose.
-
-Having the server definition accessible by the VM can ve useful in various
-ways. For example it is possible to easily determine from within the VM,
-which network interfaces are connected to public and which to private network.
-Another use is to pass some data to initial VM setup scripts, like setting the
-hostname to the VM name or passing ssh public keys through server meta.
-
-For more information take a look at the Server Context section of CloudSigma
-API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
-"""
-import json
-import platform
-
-from cloudinit import serial
-
-
-# these high timeouts are necessary as read may read a lot of data.
-READ_TIMEOUT = 60
-WRITE_TIMEOUT = 10
-
-SERIAL_PORT = '/dev/ttyS1'
-if platform.system() == 'Windows':
- SERIAL_PORT = 'COM2'
-
-
-class Cepko(object):
- """
- One instance of that object could be use for one or more
- queries to the serial port.
- """
- request_pattern = "<\n{}\n>"
-
- def get(self, key="", request_pattern=None):
- if request_pattern is None:
- request_pattern = self.request_pattern
- return CepkoResult(request_pattern.format(key))
-
- def all(self):
- return self.get()
-
- def meta(self, key=""):
- request_pattern = self.request_pattern.format("/meta/{}")
- return self.get(key, request_pattern)
-
- def global_context(self, key=""):
- request_pattern = self.request_pattern.format("/global_context/{}")
- return self.get(key, request_pattern)
-
-
-class CepkoResult(object):
- """
- CepkoResult executes the request to the virtual serial port as soon
- as the instance is initialized and stores the result in both raw and
- marshalled format.
- """
- def __init__(self, request):
- self.request = request
- self.raw_result = self._execute()
- self.result = self._marshal(self.raw_result)
-
- def _execute(self):
- connection = serial.Serial(port=SERIAL_PORT,
- timeout=READ_TIMEOUT,
- writeTimeout=WRITE_TIMEOUT)
- connection.write(self.request.encode('ascii'))
- return connection.readline().strip(b'\x04\n').decode('ascii')
-
- def _marshal(self, raw_result):
- try:
- return json.loads(raw_result)
- except ValueError:
- return raw_result
-
- def __len__(self):
- return self.result.__len__()
-
- def __getitem__(self, key):
- return self.result.__getitem__(key)
-
- def __contains__(self, item):
- return self.result.__contains__(item)
-
- def __iter__(self):
- return self.result.__iter__()
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
deleted file mode 100644
index 40af8802..00000000
--- a/cloudinit/distros/__init__.py
+++ /dev/null
@@ -1,980 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import StringIO
-
-import abc
-import os
-import re
-import stat
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net import eni
-from cloudinit.net import network_state
-from cloudinit import ssh_util
-from cloudinit import type_utils
-from cloudinit import util
-
-from cloudinit.distros.parsers import hosts
-
-
-OSFAMILIES = {
- 'debian': ['debian', 'ubuntu'],
- 'redhat': ['fedora', 'rhel'],
- 'gentoo': ['gentoo'],
- 'freebsd': ['freebsd'],
- 'suse': ['sles'],
- 'arch': ['arch'],
-}
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Distro(object):
-
- usr_lib_exec = "/usr/lib"
- hosts_fn = "/etc/hosts"
- ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
- hostname_conf_fn = "/etc/hostname"
- tz_zone_dir = "/usr/share/zoneinfo"
- init_cmd = ['service'] # systemctl, service etc
-
- def __init__(self, name, cfg, paths):
- self._paths = paths
- self._cfg = cfg
- self.name = name
-
- @abc.abstractmethod
- def install_packages(self, pkglist):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _write_network(self, settings):
- # In the future use the http://fedorahosted.org/netcf/
- # to write this blob out in a distro format
- raise NotImplementedError()
-
- def _write_network_config(self, settings):
- raise NotImplementedError()
-
- def _find_tz_file(self, tz):
- tz_file = os.path.join(self.tz_zone_dir, str(tz))
- if not os.path.isfile(tz_file):
- raise IOError(("Invalid timezone %s,"
- " no file found at %s") % (tz, tz_file))
- return tz_file
-
- def get_option(self, opt_name, default=None):
- return self._cfg.get(opt_name, default)
-
- def set_hostname(self, hostname, fqdn=None):
- writeable_hostname = self._select_hostname(hostname, fqdn)
- self._write_hostname(writeable_hostname, self.hostname_conf_fn)
- self._apply_hostname(writeable_hostname)
-
- def uses_systemd(self):
- try:
- res = os.lstat('/run/systemd/system')
- return stat.S_ISDIR(res.st_mode)
- except Exception:
- return False
-
- @abc.abstractmethod
- def package_command(self, cmd, args=None, pkgs=None):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def update_package_sources(self):
- raise NotImplementedError()
-
- def get_primary_arch(self):
- arch = os.uname[4]
- if arch in ("i386", "i486", "i586", "i686"):
- return "i386"
- return arch
-
- def _get_arch_package_mirror_info(self, arch=None):
- mirror_info = self.get_option("package_mirrors", [])
- if not arch:
- arch = self.get_primary_arch()
- return _get_arch_package_mirror_info(mirror_info, arch)
-
- def get_package_mirror_info(self, arch=None, data_source=None):
- # This resolves the package_mirrors config option
- # down to a single dict of {mirror_name: mirror_url}
- arch_info = self._get_arch_package_mirror_info(arch)
- return _get_package_mirror_info(data_source=data_source,
- mirror_info=arch_info)
-
- def apply_network(self, settings, bring_up=True):
- # this applies network where 'settings' is interfaces(5) style
- # it is obsolete compared to apply_network_config
- # Write it out
- dev_names = self._write_network(settings)
- # Now try to bring them up
- if bring_up:
- return self._bring_up_interfaces(dev_names)
- return False
-
- def _apply_network_from_network_config(self, netconfig, bring_up=True):
- distro = self.__class__
- LOG.warn("apply_network_config is not currently implemented "
- "for distribution '%s'. Attempting to use apply_network",
- distro)
- header = '\n'.join([
- "# Converted from network_config for distro %s" % distro,
- "# Implmentation of _write_network_config is needed."
- ])
- ns = network_state.parse_net_config_data(netconfig)
- contents = eni.network_state_to_eni(
- ns, header=header, render_hwaddress=True)
- return self.apply_network(contents, bring_up=bring_up)
-
- def apply_network_config(self, netconfig, bring_up=False):
- # apply network config netconfig
- # This method is preferred to apply_network which only takes
- # a much less complete network config format (interfaces(5)).
- try:
- dev_names = self._write_network_config(netconfig)
- except NotImplementedError:
- # backwards compat until all distros have apply_network_config
- return self._apply_network_from_network_config(
- netconfig, bring_up=bring_up)
-
- # Now try to bring them up
- if bring_up:
- return self._bring_up_interfaces(dev_names)
- return False
-
- def apply_network_config_names(self, netconfig):
- net.apply_network_config_names(netconfig)
-
- @abc.abstractmethod
- def apply_locale(self, locale, out_fn=None):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def set_timezone(self, tz):
- raise NotImplementedError()
-
- def _get_localhost_ip(self):
- return "127.0.0.1"
-
- @abc.abstractmethod
- def _read_hostname(self, filename, default=None):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _write_hostname(self, hostname, filename):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _read_system_hostname(self):
- raise NotImplementedError()
-
- def _apply_hostname(self, hostname):
- # This really only sets the hostname
- # temporarily (until reboot so it should
- # not be depended on). Use the write
- # hostname functions for 'permanent' adjustments.
- LOG.debug("Non-persistently setting the system hostname to %s",
- hostname)
- try:
- util.subp(['hostname', hostname])
- except util.ProcessExecutionError:
- util.logexc(LOG, "Failed to non-persistently adjust the system "
- "hostname to %s", hostname)
-
- def _select_hostname(self, hostname, fqdn):
- # Prefer the short hostname over the long
- # fully qualified domain name
- if not hostname:
- return fqdn
- return hostname
-
- @staticmethod
- def expand_osfamily(family_list):
- distros = []
- for family in family_list:
- if family not in OSFAMILIES:
- raise ValueError("No distibutions found for osfamily %s"
- % (family))
- distros.extend(OSFAMILIES[family])
- return distros
-
- def update_hostname(self, hostname, fqdn, prev_hostname_fn):
- applying_hostname = hostname
-
- # Determine what the actual written hostname should be
- hostname = self._select_hostname(hostname, fqdn)
-
- # If the previous hostname file exists lets see if we
- # can get a hostname from it
- if prev_hostname_fn and os.path.exists(prev_hostname_fn):
- prev_hostname = self._read_hostname(prev_hostname_fn)
- else:
- prev_hostname = None
-
- # Lets get where we should write the system hostname
- # and what the system hostname is
- (sys_fn, sys_hostname) = self._read_system_hostname()
- update_files = []
-
- # If there is no previous hostname or it differs
- # from what we want, lets update it or create the
- # file in the first place
- if not prev_hostname or prev_hostname != hostname:
- update_files.append(prev_hostname_fn)
-
- # If the system hostname is different than the previous
- # one or the desired one lets update it as well
- if ((not sys_hostname) or (sys_hostname == prev_hostname and
- sys_hostname != hostname)):
- update_files.append(sys_fn)
-
- # If something else has changed the hostname after we set it
- # initially, we should not overwrite those changes (we should
- # only be setting the hostname once per instance)
- if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
- LOG.info("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
- return
-
- # Remove duplicates (incase the previous config filename)
- # is the same as the system config filename, don't bother
- # doing it twice
- update_files = set([f for f in update_files if f])
- LOG.debug("Attempting to update hostname to %s in %s files",
- hostname, len(update_files))
-
- for fn in update_files:
- try:
- self._write_hostname(hostname, fn)
- except IOError:
- util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
- fn)
-
- # If the system hostname file name was provided set the
- # non-fqdn as the transient hostname.
- if sys_fn in update_files:
- self._apply_hostname(applying_hostname)
-
- def update_etc_hosts(self, hostname, fqdn):
- header = ''
- if os.path.exists(self.hosts_fn):
- eh = hosts.HostsConf(util.load_file(self.hosts_fn))
- else:
- eh = hosts.HostsConf('')
- header = util.make_header(base="added")
- local_ip = self._get_localhost_ip()
- prev_info = eh.get_entry(local_ip)
- need_change = False
- if not prev_info:
- eh.add_entry(local_ip, fqdn, hostname)
- need_change = True
- else:
- need_change = True
- for entry in prev_info:
- entry_fqdn = None
- entry_aliases = []
- if len(entry) >= 1:
- entry_fqdn = entry[0]
- if len(entry) >= 2:
- entry_aliases = entry[1:]
- if entry_fqdn is not None and entry_fqdn == fqdn:
- if hostname in entry_aliases:
- # Exists already, leave it be
- need_change = False
- if need_change:
- # Doesn't exist, add that entry in...
- new_entries = list(prev_info)
- new_entries.append([fqdn, hostname])
- eh.del_entries(local_ip)
- for entry in new_entries:
- if len(entry) == 1:
- eh.add_entry(local_ip, entry[0])
- elif len(entry) >= 2:
- eh.add_entry(local_ip, *entry)
- if need_change:
- contents = StringIO()
- if header:
- contents.write("%s\n" % (header))
- contents.write("%s\n" % (eh))
- util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
-
- def _bring_up_interface(self, device_name):
- cmd = ['ifup', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- return True
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
-
- def _bring_up_interfaces(self, device_names):
- am_failed = 0
- for d in device_names:
- if not self._bring_up_interface(d):
- am_failed += 1
- if am_failed == 0:
- return True
- return False
-
- def get_default_user(self):
- return self.get_option('default_user')
-
- def add_user(self, name, **kwargs):
- """
- Add a user to the system using standard GNU tools
- """
- if util.is_user(name):
- LOG.info("User %s already exists, skipping." % name)
- return
-
- if 'create_groups' in kwargs:
- create_groups = kwargs.pop('create_groups')
- else:
- create_groups = True
-
- adduser_cmd = ['useradd', name]
- log_adduser_cmd = ['useradd', name]
-
- # Since we are creating users, we want to carefully validate the
- # inputs. If something goes wrong, we can end up with a system
- # that nobody can login to.
- adduser_opts = {
- "gecos": '--comment',
- "homedir": '--home',
- "primary_group": '--gid',
- "uid": '--uid',
- "groups": '--groups',
- "passwd": '--password',
- "shell": '--shell',
- "expiredate": '--expiredate',
- "inactive": '--inactive',
- "selinux_user": '--selinux-user',
- }
-
- adduser_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
- }
-
- redact_opts = ['passwd']
-
- # support kwargs having groups=[list] or groups="g1,g2"
- groups = kwargs.get('groups')
- if groups:
- if isinstance(groups, (list, tuple)):
- # kwargs.items loop below wants a comma delimeted string
- # that can go right through to the command.
- kwargs['groups'] = ",".join(groups)
- else:
- groups = groups.split(",")
-
- primary_group = kwargs.get('primary_group')
- if primary_group:
- groups.append(primary_group)
-
- if create_groups and groups:
- for group in groups:
- if not util.is_group(group):
- self.create_group(group)
- LOG.debug("created group %s for user %s", name, group)
-
- # Check the values and create the command
- for key, val in kwargs.items():
-
- if key in adduser_opts and val and isinstance(val, str):
- adduser_cmd.extend([adduser_opts[key], val])
-
- # Redact certain fields from the logs
- if key in redact_opts:
- log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
- else:
- log_adduser_cmd.extend([adduser_opts[key], val])
-
- elif key in adduser_flags and val:
- adduser_cmd.append(adduser_flags[key])
- log_adduser_cmd.append(adduser_flags[key])
-
- # Don't create the home directory if directed so or if the user is a
- # system user
- if 'no_create_home' in kwargs or 'system' in kwargs:
- adduser_cmd.append('-M')
- log_adduser_cmd.append('-M')
- else:
- adduser_cmd.append('-m')
- log_adduser_cmd.append('-m')
-
- # Run the command
- LOG.debug("Adding user %s", name)
- try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to create user %s", name)
- raise e
-
- def create_user(self, name, **kwargs):
- """
- Creates users for the system using the GNU passwd tools. This
- will work on an GNU system. This should be overriden on
- distros where useradd is not desirable or not available.
- """
-
- # Add the user
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Set password if hashed password is provided and non-empty
- if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
- self.set_passwd(name, kwargs['hashed_passwd'], hashed=True)
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- # Try to handle this in a smart manner.
- keys = kwargs['ssh_authorized_keys']
- if isinstance(keys, six.string_types):
- keys = [keys]
- elif isinstance(keys, dict):
- keys = list(keys.values())
- if keys is not None:
- if not isinstance(keys, (tuple, list, set)):
- LOG.warn("Invalid type '%s' detected for"
- " 'ssh_authorized_keys', expected list,"
- " string, dict, or set.", type(keys))
- else:
- keys = set(keys) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- return True
-
- def lock_passwd(self, name):
- """
- Lock the password of a user, i.e., disable password logins
- """
- try:
- # Need to use the short option name '-l' instead of '--lock'
- # (which would be more descriptive) since SLES 11 doesn't know
- # about long names.
- util.subp(['passwd', '-l', name])
- except Exception as e:
- util.logexc(LOG, 'Failed to disable password for user %s', name)
- raise e
-
- def set_passwd(self, user, passwd, hashed=False):
- pass_string = '%s:%s' % (user, passwd)
- cmd = ['chpasswd']
-
- if hashed:
- # Need to use the short option name '-e' instead of '--encrypted'
- # (which would be more descriptive) since SLES 11 doesn't know
- # about long names.
- cmd.append('-e')
-
- try:
- util.subp(cmd, pass_string, logstring="chpasswd for %s" % user)
- except Exception as e:
- util.logexc(LOG, "Failed to set password for %s", user)
- raise e
-
- return True
-
- def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'):
- # Ensure the dir is included and that
- # it actually exists as a directory
- sudoers_contents = ''
- base_exists = False
- if os.path.exists(sudo_base):
- sudoers_contents = util.load_file(sudo_base)
- base_exists = True
- found_include = False
- for line in sudoers_contents.splitlines():
- line = line.strip()
- include_match = re.search(r"^#includedir\s+(.*)$", line)
- if not include_match:
- continue
- included_dir = include_match.group(1).strip()
- if not included_dir:
- continue
- included_dir = os.path.abspath(included_dir)
- if included_dir == path:
- found_include = True
- break
- if not found_include:
- try:
- if not base_exists:
- lines = [('# See sudoers(5) for more information'
- ' on "#include" directives:'), '',
- util.make_header(base="added"),
- "#includedir %s" % (path), '']
- sudoers_contents = "\n".join(lines)
- util.write_file(sudo_base, sudoers_contents, 0o440)
- else:
- lines = ['', util.make_header(base="added"),
- "#includedir %s" % (path), '']
- sudoers_contents = "\n".join(lines)
- util.append_file(sudo_base, sudoers_contents)
- LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base))
- except IOError as e:
- util.logexc(LOG, "Failed to write %s", sudo_base)
- raise e
- util.ensure_dir(path, 0o750)
-
- def write_sudo_rules(self, user, rules, sudo_file=None):
- if not sudo_file:
- sudo_file = self.ci_sudoers_fn
-
- lines = [
- '',
- "# User rules for %s" % user,
- ]
- if isinstance(rules, (list, tuple)):
- for rule in rules:
- lines.append("%s %s" % (user, rule))
- elif isinstance(rules, six.string_types):
- lines.append("%s %s" % (user, rules))
- else:
- msg = "Can not create sudoers rule addition with type %r"
- raise TypeError(msg % (type_utils.obj_name(rules)))
- content = "\n".join(lines)
- content += "\n" # trailing newline
-
- self.ensure_sudo_dir(os.path.dirname(sudo_file))
- if not os.path.exists(sudo_file):
- contents = [
- util.make_header(),
- content,
- ]
- try:
- util.write_file(sudo_file, "\n".join(contents), 0o440)
- except IOError as e:
- util.logexc(LOG, "Failed to write sudoers file %s", sudo_file)
- raise e
- else:
- try:
- util.append_file(sudo_file, content)
- except IOError as e:
- util.logexc(LOG, "Failed to append sudoers file %s", sudo_file)
- raise e
-
- def create_group(self, name, members=None):
- group_add_cmd = ['groupadd', name]
- if not members:
- members = []
-
- # Check if group exists, and then add it doesn't
- if util.is_group(name):
- LOG.warn("Skipping creation of existing group '%s'" % name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s" % name)
- except Exception:
- util.logexc(LOG, "Failed to create group %s", name)
-
- # Add members to the group, if so defined
- if len(members) > 0:
- for member in members:
- if not util.is_user(member):
- LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
-
- util.subp(['usermod', '-a', '-G', name, member])
- LOG.info("Added user '%s' to group '%s'" % (member, name))
-
-
-def _get_package_mirror_info(mirror_info, data_source=None,
- mirror_filter=util.search_for_mirror):
- # given a arch specific 'mirror_info' entry (from package_mirrors)
- # search through the 'search' entries, and fallback appropriately
- # return a dict with only {name: mirror} entries.
- if not mirror_info:
- mirror_info = {}
-
- # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
- # the region is us-east-1. so region = az[0:-1]
- directions_re = '|'.join([
- 'central', 'east', 'north', 'northeast', 'northwest',
- 'south', 'southeast', 'southwest', 'west'])
- ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
-
- subst = {}
- if data_source and data_source.availability_zone:
- subst['availability_zone'] = data_source.availability_zone
-
- if re.match(ec2_az_re, data_source.availability_zone):
- subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
-
- if data_source and data_source.region:
- subst['region'] = data_source.region
-
- results = {}
- for (name, mirror) in mirror_info.get('failsafe', {}).items():
- results[name] = mirror
-
- for (name, searchlist) in mirror_info.get('search', {}).items():
- mirrors = []
- for tmpl in searchlist:
- try:
- mirrors.append(tmpl % subst)
- except KeyError:
- pass
-
- found = mirror_filter(mirrors)
- if found:
- results[name] = found
-
- LOG.debug("filtered distro mirror info: %s" % results)
-
- return results
-
-
-def _get_arch_package_mirror_info(package_mirrors, arch):
- # pull out the specific arch from a 'package_mirrors' config option
- default = None
- for item in package_mirrors:
- arches = item.get("arches")
- if arch in arches:
- return item
- if "default" in arches:
- default = item
- return default
-
-
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# group names, or a list of group names
-# or a python dictionary of group names
-# to a list of members of that group.
-#
-# The output is a dictionary of group
-# names => members of that group which
-# is the standard form used in the rest
-# of cloud-init
-def _normalize_groups(grp_cfg):
- if isinstance(grp_cfg, six.string_types):
- grp_cfg = grp_cfg.strip().split(",")
- if isinstance(grp_cfg, list):
- c_grp_cfg = {}
- for i in grp_cfg:
- if isinstance(i, dict):
- for k, v in i.items():
- if k not in c_grp_cfg:
- if isinstance(v, list):
- c_grp_cfg[k] = list(v)
- elif isinstance(v, six.string_types):
- c_grp_cfg[k] = [v]
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
- else:
- if isinstance(v, list):
- c_grp_cfg[k].extend(v)
- elif isinstance(v, six.string_types):
- c_grp_cfg[k].append(v)
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
- elif isinstance(i, six.string_types):
- if i not in c_grp_cfg:
- c_grp_cfg[i] = []
- else:
- raise TypeError("Unknown group name type %s" %
- type_utils.obj_name(i))
- grp_cfg = c_grp_cfg
- groups = {}
- if isinstance(grp_cfg, dict):
- for (grp_name, grp_members) in grp_cfg.items():
- groups[grp_name] = util.uniq_merge_sorted(grp_members)
- else:
- raise TypeError(("Group config must be list, dict "
- " or string types only and not %s") %
- type_utils.obj_name(grp_cfg))
- return groups
-
-
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# user names, or a list of string user names
-# or a list of dictionaries with components
-# that define the user config + 'name' (if
-# a 'name' field does not exist then the
-# default user is assumed to 'own' that
-# configuration.
-#
-# The output is a dictionary of user
-# names => user config which is the standard
-# form used in the rest of cloud-init. Note
-# the default user will have a special config
-# entry 'default' which will be marked as true
-# all other users will be marked as false.
-def _normalize_users(u_cfg, def_user_cfg=None):
- if isinstance(u_cfg, dict):
- ad_ucfg = []
- for (k, v) in u_cfg.items():
- if isinstance(v, (bool, int, float) + six.string_types):
- if util.is_true(v):
- ad_ucfg.append(str(k))
- elif isinstance(v, dict):
- v['name'] = k
- ad_ucfg.append(v)
- else:
- raise TypeError(("Unmappable user value type %s"
- " for key %s") % (type_utils.obj_name(v), k))
- u_cfg = ad_ucfg
- elif isinstance(u_cfg, six.string_types):
- u_cfg = util.uniq_merge_sorted(u_cfg)
-
- users = {}
- for user_config in u_cfg:
- if isinstance(user_config, (list,) + six.string_types):
- for u in util.uniq_merge(user_config):
- if u and u not in users:
- users[u] = {}
- elif isinstance(user_config, dict):
- if 'name' in user_config:
- n = user_config.pop('name')
- prev_config = users.get(n) or {}
- users[n] = util.mergemanydict([prev_config,
- user_config])
- else:
- # Assume the default user then
- prev_config = users.get('default') or {}
- users['default'] = util.mergemanydict([prev_config,
- user_config])
- else:
- raise TypeError(("User config must be dictionary/list "
- " or string types only and not %s") %
- type_utils.obj_name(user_config))
-
- # Ensure user options are in the right python friendly format
- if users:
- c_users = {}
- for (uname, uconfig) in users.items():
- c_uconfig = {}
- for (k, v) in uconfig.items():
- k = k.replace('-', '_').strip()
- if k:
- c_uconfig[k] = v
- c_users[uname] = c_uconfig
- users = c_users
-
- # Fixup the default user into the real
- # default user name and replace it...
- def_user = None
- if users and 'default' in users:
- def_config = users.pop('default')
- if def_user_cfg:
- # Pickup what the default 'real name' is
- # and any groups that are provided by the
- # default config
- def_user_cfg = def_user_cfg.copy()
- def_user = def_user_cfg.pop('name')
- def_groups = def_user_cfg.pop('groups', [])
- # Pickup any config + groups for that user name
- # that we may have previously extracted
- parsed_config = users.pop(def_user, {})
- parsed_groups = parsed_config.get('groups', [])
- # Now merge our extracted groups with
- # anything the default config provided
- users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
- parsed_config['groups'] = ",".join(users_groups)
- # The real config for the default user is the
- # combination of the default user config provided
- # by the distro, the default user config provided
- # by the above merging for the user 'default' and
- # then the parsed config from the user's 'real name'
- # which does not have to be 'default' (but could be)
- users[def_user] = util.mergemanydict([def_user_cfg,
- def_config,
- parsed_config])
-
- # Ensure that only the default user that we
- # found (if any) is actually marked as being
- # the default user
- if users:
- for (uname, uconfig) in users.items():
- if def_user and uname == def_user:
- uconfig['default'] = True
- else:
- uconfig['default'] = False
-
- return users
-
-
-# Normalizes a set of user/users and group
-# dictionary configuration into a useable
-# format that the rest of cloud-init can
-# understand using the default user
-# provided by the input distrobution (if any)
-# to allow for mapping of the 'default' user.
-#
-# Output is a dictionary of group names -> [member] (list)
-# and a dictionary of user names -> user configuration (dict)
-#
-# If 'user' exists it will override
-# the 'users'[0] entry (if a list) otherwise it will
-# just become an entry in the returned dictionary (no override)
-def normalize_users_groups(cfg, distro):
- if not cfg:
- cfg = {}
-
- users = {}
- groups = {}
- if 'groups' in cfg:
- groups = _normalize_groups(cfg['groups'])
-
- # Handle the previous style of doing this where the first user
- # overrides the concept of the default user if provided in the user: XYZ
- # format.
- old_user = {}
- if 'user' in cfg and cfg['user']:
- old_user = cfg['user']
- # Translate it into the format that is more useful
- # going forward
- if isinstance(old_user, six.string_types):
- old_user = {
- 'name': old_user,
- }
- if not isinstance(old_user, dict):
- LOG.warn(("Format for 'user' key must be a string or "
- "dictionary and not %s"), type_utils.obj_name(old_user))
- old_user = {}
-
- # If no old user format, then assume the distro
- # provides what the 'default' user maps to, but notice
- # that if this is provided, we won't automatically inject
- # a 'default' user into the users list, while if a old user
- # format is provided we will.
- distro_user_config = {}
- try:
- distro_user_config = distro.get_default_user()
- except NotImplementedError:
- LOG.warn(("Distro has not implemented default user "
- "access. No distribution provided default user"
- " will be normalized."))
-
- # Merge the old user (which may just be an empty dict when not
- # present with the distro provided default user configuration so
- # that the old user style picks up all the distribution specific
- # attributes (if any)
- default_user_config = util.mergemanydict([old_user, distro_user_config])
-
- base_users = cfg.get('users', [])
- if not isinstance(base_users, (list, dict) + six.string_types):
- LOG.warn(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list and not %s"),
- type_utils.obj_name(base_users))
- base_users = []
-
- if old_user:
- # Ensure that when user: is provided that this user
- # always gets added (as the default user)
- if isinstance(base_users, list):
- # Just add it on at the end...
- base_users.append({'name': 'default'})
- elif isinstance(base_users, dict):
- base_users['default'] = dict(base_users).get('default', True)
- elif isinstance(base_users, six.string_types):
- # Just append it on to be re-parsed later
- base_users += ",default"
-
- users = _normalize_users(base_users, default_user_config)
- return (users, groups)
-
-
-# Given a user dictionary config it will
-# extract the default user name and user config
-# from that list and return that tuple or
-# return (None, None) if no default user is
-# found in the given input
-def extract_default(users, default_name=None, default_config=None):
- if not users:
- users = {}
-
- def safe_find(entry):
- config = entry[1]
- if not config or 'default' not in config:
- return False
- else:
- return config['default']
-
- tmp_users = users.items()
- tmp_users = dict(filter(safe_find, tmp_users))
- if not tmp_users:
- return (default_name, default_config)
- else:
- name = list(tmp_users)[0]
- config = tmp_users[name]
- config.pop('default', None)
- return (name, config)
-
-
-def fetch(name):
- locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
- if not locs:
- raise ImportError("No distribution found for distro %s (searched %s)"
- % (name, looked_locs))
- mod = importer.import_module(locs[0])
- cls = getattr(mod, 'Distro')
- return cls
-
-
-def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
- tz_local="/etc/localtime"):
- util.write_file(tz_conf, str(tz).rstrip() + "\n")
- # This ensures that the correct tz will be used for the system
- if tz_local and tz_file:
- # use a symlink if there exists a symlink or tz_local is not present
- islink = os.path.islink(tz_local)
- if islink or not os.path.exists(tz_local):
- if islink:
- util.del_file(tz_local)
- os.symlink(tz_file, tz_local)
- else:
- util.copy(tz_file, tz_local)
- return
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
deleted file mode 100644
index 66209f22..00000000
--- a/cloudinit/distros/arch.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Rackspace, US Inc.
-#
-# Author: Nate House <nathan.house@rackspace.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
- network_conf_dir = "/etc/netctl"
- resolve_conf_fn = "/etc/resolv.conf"
- init_cmd = ['systemctl'] # init scripts
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'arch'
- cfg['ssh_svcname'] = 'sshd'
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('', pkgs=pkglist)
-
- def _write_network(self, settings):
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- dev_names = entries.keys()
- # Format for netctl
- for (dev, info) in entries.items():
- nameservers = []
- net_fn = self.network_conf_dir + dev
- net_cfg = {
- 'Connection': 'ethernet',
- 'Interface': dev,
- 'IP': info.get('bootproto'),
- 'Address': "('%s/%s')" % (info.get('address'),
- info.get('netmask')),
- 'Gateway': info.get('gateway'),
- 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
- }
- util.write_file(net_fn, convert_netctl(net_cfg))
- if info.get('auto'):
- self._enable_interface(dev)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
-
- if nameservers:
- util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
-
- return dev_names
-
- def _enable_interface(self, device_name):
- cmd = ['netctl', 'reenable', device_name]
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
-
- def _bring_up_interface(self, device_name):
- cmd = ['netctl', 'restart', device_name]
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- return True
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
-
- def _bring_up_interfaces(self, device_names):
- for d in device_names:
- if not self._bring_up_interface(d):
- return False
- return True
-
- def _write_hostname(self, your_hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['pacman']
- # Redirect output
- cmd.append("-Sy")
- cmd.append("--quiet")
- cmd.append("--noconfirm")
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-y"], freq=PER_INSTANCE)
-
-
-def convert_netctl(settings):
- """Returns a settings string formatted for netctl."""
- result = ''
- if isinstance(settings, dict):
- for k, v in settings.items():
- result = result + '%s=%s\n' % (k, v)
- return result
-
-
-def convert_resolv_conf(settings):
- """Returns a settings string formatted for resolv.conf."""
- result = ''
- if isinstance(settings, list):
- for ns in settings:
- result = result + 'nameserver %s\n' % ns
- return result
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
deleted file mode 100644
index f9b3b92e..00000000
--- a/cloudinit/distros/debian.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit.net import eni
-from cloudinit.net.network_state import parse_net_config_data
-from cloudinit import util
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold',
- '--option=Dpkg::options::=--force-unsafe-io',
- '--assume-yes', '--quiet')
-APT_GET_WRAPPER = {
- 'command': 'eatmydata',
- 'enabled': 'auto',
-}
-
-ENI_HEADER = """# This file is generated from information provided by
-# the datasource. Changes to it will not persist across an instance.
-# To disable cloud-init's network configuration capabilities, write a file
-# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following:
-# network: {config: disabled}
-"""
-
-
-class Distro(distros.Distro):
- hostname_conf_fn = "/etc/hostname"
- locale_conf_fn = "/etc/default/locale"
- network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg"
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'debian'
- self._net_renderer = eni.Renderer({
- 'eni_path': self.network_conf_fn,
- 'eni_header': ENI_HEADER,
- 'links_path_prefix': None,
- 'netrules_path': None,
- })
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', locale], capture=False)
- util.subp(['update-locale', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
- return ['all']
-
- def _write_network_config(self, netconfig):
- ns = parse_net_config_data(netconfig)
- self._net_renderer.render_network_state("/", ns)
- _maybe_remove_legacy_eth0()
- return []
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- return distros.Distro._bring_up_interface(self, '--all')
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def _write_hostname(self, your_hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _get_localhost_ip(self):
- # Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/
- return "127.0.1.1"
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- e = os.environ.copy()
- # See: http://tiny.cc/kg91fw
- # Or: http://tiny.cc/mh91fw
- e['DEBIAN_FRONTEND'] = 'noninteractive'
-
- wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER)
- cmd = _get_wrapper_prefix(
- wcfg.get('command', APT_GET_WRAPPER['command']),
- wcfg.get('enabled', APT_GET_WRAPPER['enabled']))
-
- cmd.extend(list(self.get_option("apt_get_command", APT_GET_COMMAND)))
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- subcmd = command
- if command == "upgrade":
- subcmd = self.get_option("apt_get_upgrade_subcommand",
- "dist-upgrade")
-
- cmd.append(subcmd)
-
- pkglist = util.expand_package_list('%s=%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.log_time(logfunc=LOG.debug,
- msg="apt-%s [%s]" % (command, ' '.join(cmd)),
- func=util.subp,
- args=(cmd,), kwargs={'env': e, 'capture': False})
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
-
- def get_primary_arch(self):
- (arch, _err) = util.subp(['dpkg', '--print-architecture'])
- return str(arch).strip()
-
-
-def _get_wrapper_prefix(cmd, mode):
- if isinstance(cmd, str):
- cmd = [str(cmd)]
-
- if (util.is_true(mode) or
- (str(mode).lower() == "auto" and cmd[0] and
- util.which(cmd[0]))):
- return cmd
- else:
- return []
-
-
-def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"):
- """Ubuntu cloud images previously included a 'eth0.cfg' that had
- hard coded content. That file would interfere with the rendered
- configuration if it was present.
-
- if the file does not exist do nothing.
- If the file exists:
- - with known content, remove it and warn
- - with unknown content, leave it and warn
- """
-
- if not os.path.exists(path):
- return
-
- bmsg = "Dynamic networking config may not apply."
- try:
- contents = util.load_file(path)
- known_contents = ["auto eth0", "iface eth0 inet dhcp"]
- lines = [f.strip() for f in contents.splitlines()
- if not f.startswith("#")]
- if lines == known_contents:
- util.del_file(path)
- msg = "removed %s with known contents" % path
- else:
- msg = (bmsg + " '%s' exists with user configured content." % path)
- except Exception:
- msg = bmsg + " %s exists, but could not be read." % path
-
- LOG.warn(msg)
diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py
deleted file mode 100644
index c777845d..00000000
--- a/cloudinit/distros/fedora.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.distros import rhel
-
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(rhel.Distro):
- pass
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
deleted file mode 100644
index 91bf4a4e..00000000
--- a/cloudinit/distros/freebsd.py
+++ /dev/null
@@ -1,417 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Harm Weites
-#
-# Author: Harm Weites <harm@weites.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import six
-from six import StringIO
-
-import re
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import ssh_util
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- rc_conf_fn = "/etc/rc.conf"
- login_conf_fn = '/etc/login.conf'
- login_conf_fn_bak = '/etc/login.conf.orig'
- resolv_conf_fn = '/etc/resolv.conf'
- ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'freebsd'
-
- # Updates a key in /etc/rc.conf.
- def updatercconf(self, key, value):
- LOG.debug("Checking %s for: %s = %s", self.rc_conf_fn, key, value)
- conf = self.loadrcconf()
- config_changed = False
- if key not in conf:
- LOG.debug("Adding key in %s: %s = %s", self.rc_conf_fn, key,
- value)
- conf[key] = value
- config_changed = True
- else:
- for item in conf.keys():
- if item == key and conf[item] != value:
- conf[item] = value
- LOG.debug("Changing key in %s: %s = %s", self.rc_conf_fn,
- key, value)
- config_changed = True
-
- if config_changed:
- LOG.info("Writing %s", self.rc_conf_fn)
- buf = StringIO()
- for keyval in conf.items():
- buf.write('%s="%s"\n' % keyval)
- util.write_file(self.rc_conf_fn, buf.getvalue())
-
- # Load the contents of /etc/rc.conf and store all keys in a dict. Make sure
- # quotes are ignored:
- # hostname="bla"
- def loadrcconf(self):
- RE_MATCH = re.compile(r'^(\w+)\s*=\s*(.*)\s*')
- conf = {}
- lines = util.load_file(self.rc_conf_fn).splitlines()
- for line in lines:
- m = RE_MATCH.match(line)
- if not m:
- LOG.debug("Skipping line from /etc/rc.conf: %s", line)
- continue
- key = m.group(1).rstrip()
- val = m.group(2).rstrip()
- # Kill them quotes (not completely correct, aka won't handle
- # quoted values, but should be ok ...)
- if val[0] in ('"', "'"):
- val = val[1:]
- if val[-1] in ('"', "'"):
- val = val[0:-1]
- if len(val) == 0:
- LOG.debug("Skipping empty value from /etc/rc.conf: %s", line)
- continue
- conf[key] = val
- return conf
-
- def readrcconf(self, key):
- conf = self.loadrcconf()
- try:
- val = conf[key]
- except KeyError:
- val = None
- return val
-
- # NOVA will inject something like eth0, rewrite that to use the FreeBSD
- # adapter. Since this adapter is based on the used driver, we need to
- # figure out which interfaces are available. On KVM platforms this is
- # vtnet0, where Xen would use xn0.
- def getnetifname(self, dev):
- LOG.debug("Translating network interface %s", dev)
- if dev.startswith('lo'):
- return dev
-
- n = re.search('\d+$', dev)
- index = n.group(0)
-
- (out, err) = util.subp(['ifconfig', '-a'])
- ifconfigoutput = [x for x in (out.strip()).splitlines()
- if len(x.split()) > 0]
- for line in ifconfigoutput:
- m = re.match('^\w+', line)
- if m:
- if m.group(0).startswith('lo'):
- continue
- # Just settle with the first non-lo adapter we find, since it's
- # rather unlikely there will be multiple nicdrivers involved.
- bsddev = m.group(0)
- break
-
- # Replace the index with the one we're after.
- bsddev = re.sub('\d+$', index, bsddev)
- LOG.debug("Using network interface %s", bsddev)
- return bsddev
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(filename=None)
- return ('rc.conf', sys_hostname)
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- hostname = self.readrcconf('hostname')
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _write_hostname(self, hostname, filename):
- self.updatercconf('hostname', hostname)
-
- def create_group(self, name, members):
- group_add_cmd = ['pw', '-n', name]
- if util.is_group(name):
- LOG.warn("Skipping creation of existing group '%s'", name)
- else:
- try:
- util.subp(group_add_cmd)
- LOG.info("Created new group %s", name)
- except Exception as e:
- util.logexc(LOG, "Failed to create group %s", name)
- raise e
-
- if len(members) > 0:
- for member in members:
- if not util.is_user(member):
- LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
- continue
- try:
- util.subp(['pw', 'usermod', '-n', name, '-G', member])
- LOG.info("Added user '%s' to group '%s'", member, name)
- except Exception:
- util.logexc(LOG, "Failed to add user '%s' to group '%s'",
- member, name)
-
- def add_user(self, name, **kwargs):
- if util.is_user(name):
- LOG.info("User %s already exists, skipping.", name)
- return False
-
- adduser_cmd = ['pw', 'useradd', '-n', name]
- log_adduser_cmd = ['pw', 'useradd', '-n', name]
-
- adduser_opts = {
- "homedir": '-d',
- "gecos": '-c',
- "primary_group": '-g',
- "groups": '-G',
- "passwd": '-h',
- "shell": '-s',
- "inactive": '-E',
- }
- adduser_flags = {
- "no_user_group": '--no-user-group',
- "system": '--system',
- "no_log_init": '--no-log-init',
- }
-
- redact_opts = ['passwd']
-
- for key, val in kwargs.items():
- if (key in adduser_opts and val and
- isinstance(val, six.string_types)):
- adduser_cmd.extend([adduser_opts[key], val])
-
- # Redact certain fields from the logs
- if key in redact_opts:
- log_adduser_cmd.extend([adduser_opts[key], 'REDACTED'])
- else:
- log_adduser_cmd.extend([adduser_opts[key], val])
-
- elif key in adduser_flags and val:
- adduser_cmd.append(adduser_flags[key])
- log_adduser_cmd.append(adduser_flags[key])
-
- if 'no_create_home' in kwargs or 'system' in kwargs:
- adduser_cmd.append('-d/nonexistent')
- log_adduser_cmd.append('-d/nonexistent')
- else:
- adduser_cmd.append('-d/usr/home/%s' % name)
- adduser_cmd.append('-m')
- log_adduser_cmd.append('-d/usr/home/%s' % name)
- log_adduser_cmd.append('-m')
-
- # Run the command
- LOG.info("Adding user %s", name)
- try:
- util.subp(adduser_cmd, logstring=log_adduser_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to create user %s", name)
- raise e
-
- def set_passwd(self, user, passwd, hashed=False):
- cmd = ['pw', 'usermod', user]
-
- if hashed:
- cmd.append('-H')
- else:
- cmd.append('-h')
-
- cmd.append('0')
-
- try:
- util.subp(cmd, passwd, logstring="chpasswd for %s" % user)
- except Exception as e:
- util.logexc(LOG, "Failed to set password for %s", user)
- raise e
-
- def lock_passwd(self, name):
- try:
- util.subp(['pw', 'usermod', name, '-h', '-'])
- except Exception as e:
- util.logexc(LOG, "Failed to lock user %s", name)
- raise e
-
- def create_user(self, name, **kwargs):
- self.add_user(name, **kwargs)
-
- # Set password if plain-text password provided and non-empty
- if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
- self.set_passwd(name, kwargs['plain_text_passwd'])
-
- # Default locking down the account. 'lock_passwd' defaults to True.
- # lock account unless lock_password is False.
- if kwargs.get('lock_passwd', True):
- self.lock_passwd(name)
-
- # Configure sudo access
- if 'sudo' in kwargs:
- self.write_sudo_rules(name, kwargs['sudo'])
-
- # Import SSH keys
- if 'ssh_authorized_keys' in kwargs:
- keys = set(kwargs['ssh_authorized_keys']) or []
- ssh_util.setup_user_keys(keys, name, options=None)
-
- def _write_network(self, settings):
- entries = net_util.translate_network(settings)
- nameservers = []
- searchdomains = []
- dev_names = entries.keys()
- for (device, info) in entries.items():
- # Skip the loopback interface.
- if device.startswith('lo'):
- continue
-
- dev = self.getnetifname(device)
-
- LOG.info('Configuring interface %s', dev)
-
- if info.get('bootproto') == 'static':
- LOG.debug('Configuring dev %s with %s / %s', dev,
- info.get('address'), info.get('netmask'))
- # Configure an ipv4 address.
- ifconfig = (info.get('address') + ' netmask ' +
- info.get('netmask'))
-
- # Configure the gateway.
- self.updatercconf('defaultrouter', info.get('gateway'))
-
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchdomains.extend(info['dns-search'])
- else:
- ifconfig = 'DHCP'
-
- self.updatercconf('ifconfig_' + dev, ifconfig)
-
- # Try to read the /etc/resolv.conf or just start from scratch if that
- # fails.
- try:
- resolvconf = ResolvConf(util.load_file(self.resolv_conf_fn))
- resolvconf.parse()
- except IOError:
- util.logexc(LOG, "Failed to parse %s, use new empty file",
- self.resolv_conf_fn)
- resolvconf = ResolvConf('')
- resolvconf.parse()
-
- # Add some nameservers
- for server in nameservers:
- try:
- resolvconf.add_nameserver(server)
- except ValueError:
- util.logexc(LOG, "Failed to add nameserver %s", server)
-
- # And add any searchdomains.
- for domain in searchdomains:
- try:
- resolvconf.add_search_domain(domain)
- except ValueError:
- util.logexc(LOG, "Failed to add search domain %s", domain)
- util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644)
-
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- # Adjust the locals value to the new value
- newconf = StringIO()
- for line in util.load_file(self.login_conf_fn).splitlines():
- newconf.write(re.sub(r'^default:',
- r'default:lang=%s:' % locale, line))
- newconf.write("\n")
-
- # Make a backup of login.conf.
- util.copy(self.login_conf_fn, self.login_conf_fn_bak)
-
- # And write the new login.conf.
- util.write_file(self.login_conf_fn, newconf.getvalue())
-
- try:
- LOG.debug("Running cap_mkdb for %s", locale)
- util.subp(['cap_mkdb', self.login_conf_fn])
- except util.ProcessExecutionError:
- # cap_mkdb failed, so restore the backup.
- util.logexc(LOG, "Failed to apply locale %s", locale)
- try:
- util.copy(self.login_conf_fn_bak, self.login_conf_fn)
- except IOError:
- util.logexc(LOG, "Failed to restore %s backup",
- self.login_conf_fn)
-
- def _bring_up_interface(self, device_name):
- if device_name.startswith('lo'):
- return
- dev = self.getnetifname(device_name)
- cmd = ['/etc/rc.d/netif', 'start', dev]
- LOG.debug("Attempting to bring up interface %s using command %s",
- dev, cmd)
- # This could return 1 when the interface has already been put UP by the
- # OS. This is just fine.
- (_out, err) = util.subp(cmd, rcs=[0, 1])
- if len(err):
- LOG.warn("Error running %s: %s", cmd, err)
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('install', pkgs=pkglist)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- e = os.environ.copy()
- e['ASSUME_ALWAYS_YES'] = 'YES'
-
- cmd = ['pkg']
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, env=e, capture=False)
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["update"], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
deleted file mode 100644
index 6267dd6e..00000000
--- a/cloudinit/distros/gentoo.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Rackspace, US Inc.
-#
-# Author: Nate House <nathan.house@rackspace.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- locale_conf_fn = "/etc/locale.gen"
- network_conf_fn = "/etc/conf.d/net"
- init_cmd = [''] # init scripts
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'gentoo'
- # Fix sshd restarts
- cfg['ssh_svcname'] = '/etc/init.d/sshd'
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- util.subp(['locale-gen', '-G', locale], capture=False)
- # "" provides trailing newline during join
- lines = [
- util.make_header(),
- 'LANG="%s"' % (locale),
- "",
- ]
- util.write_file(out_fn, "\n".join(lines))
-
- def install_packages(self, pkglist):
- self.update_package_sources()
- self.package_command('', pkgs=pkglist)
-
- def _write_network(self, settings):
- util.write_file(self.network_conf_fn, settings)
- return ['all']
-
- def _bring_up_interface(self, device_name):
- cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
- LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
- return True
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
-
- def _bring_up_interfaces(self, device_names):
- use_all = False
- for d in device_names:
- if d == 'all':
- use_all = True
- if use_all:
- # Grab device names from init scripts
- cmd = ['ls', '/etc/init.d/net.*']
- try:
- (_out, err) = util.subp(cmd)
- if len(err):
- LOG.warn("Running %s resulted in stderr output: %s", cmd,
- err)
- except util.ProcessExecutionError:
- util.logexc(LOG, "Running interface command %s failed", cmd)
- return False
- devices = [x.split('.')[2] for x in _out.split(' ')]
- return distros.Distro._bring_up_interfaces(self, devices)
- else:
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def _write_hostname(self, your_hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(your_hostname)
- util.write_file(out_fn, conf, 0o644)
-
- def _read_system_hostname(self):
- sys_hostname = self._read_hostname(self.hostname_conf_fn)
- return (self.hostname_conf_fn, sys_hostname)
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def set_timezone(self, tz):
- distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['emerge']
- # Redirect output
- cmd.append("--quiet")
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- if command:
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["-u", "world"], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
deleted file mode 100644
index cadfa6b6..00000000
--- a/cloudinit/distros/net_util.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-# This is a util function to translate debian based distro interface blobs as
-# given in /etc/network/interfaces to an *somewhat* agnostic format for
-# distributions that use other formats.
-#
-# TODO(harlowja) remove when we have python-netcf active...
-#
-# The format is the following:
-# {
-# <device-name>: {
-# # All optional (if not existent in original format)
-# "netmask": <ip>,
-# "broadcast": <ip>,
-# "gateway": <ip>,
-# "address": <ip>,
-# "bootproto": "static"|"dhcp",
-# "dns-search": <hostname>,
-# "hwaddress": <mac-address>,
-# "auto": True (or non-existent),
-# "dns-nameservers": [<ip/hostname>, ...],
-# }
-# }
-#
-# Things to note, comments are removed, if a ubuntu/debian interface is
-# marked as auto then only then first segment (?) is retained, ie
-# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1).
-#
-# Example input:
-#
-# auto lo
-# iface lo inet loopback
-#
-# auto eth0
-# iface eth0 inet static
-# address 10.0.0.1
-# netmask 255.255.252.0
-# broadcast 10.0.0.255
-# gateway 10.0.0.2
-# dns-nameservers 98.0.0.1 98.0.0.2
-#
-# Example output:
-# {
-# "lo": {
-# "auto": true
-# },
-# "eth0": {
-# "auto": true,
-# "dns-nameservers": [
-# "98.0.0.1",
-# "98.0.0.2"
-# ],
-# "broadcast": "10.0.0.255",
-# "netmask": "255.255.252.0",
-# "bootproto": "static",
-# "address": "10.0.0.1",
-# "gateway": "10.0.0.2"
-# }
-# }
-
-def translate_network(settings):
- # Get the standard cmd, args from the ubuntu format
- entries = []
- for line in settings.splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- split_up = line.split(None, 1)
- if len(split_up) <= 1:
- continue
- entries.append(split_up)
- # Figure out where each iface section is
- ifaces = []
- consume = {}
- for (cmd, args) in entries:
- if cmd == 'iface':
- if consume:
- ifaces.append(consume)
- consume = {}
- consume[cmd] = args
- else:
- consume[cmd] = args
- # Check if anything left over to consume
- absorb = False
- for (cmd, args) in consume.items():
- if cmd == 'iface':
- absorb = True
- if absorb:
- ifaces.append(consume)
- # Now translate
- real_ifaces = {}
- for info in ifaces:
- if 'iface' not in info:
- continue
- iface_details = info['iface'].split(None)
- # Check if current device *may* have an ipv6 IP
- use_ipv6 = False
- if 'inet6' in iface_details:
- use_ipv6 = True
- dev_name = None
- if len(iface_details) >= 1:
- dev = iface_details[0].strip().lower()
- if dev:
- dev_name = dev
- if not dev_name:
- continue
- iface_info = {}
- iface_info['ipv6'] = {}
- if len(iface_details) >= 3:
- proto_type = iface_details[2].strip().lower()
- # Seems like this can be 'loopback' which we don't
- # really care about
- if proto_type in ['dhcp', 'static']:
- iface_info['bootproto'] = proto_type
- # These can just be copied over
- if use_ipv6:
- for k in ['address', 'gateway']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info['ipv6'][k] = val
- else:
- for k in ['netmask', 'address', 'gateway', 'broadcast']:
- if k in info:
- val = info[k].strip().lower()
- if val:
- iface_info[k] = val
- # Name server info provided??
- if 'dns-nameservers' in info:
- iface_info['dns-nameservers'] = info['dns-nameservers'].split()
- # Name server search info provided??
- if 'dns-search' in info:
- iface_info['dns-search'] = info['dns-search'].split()
- # Is any mac address spoofing going on??
- if 'hwaddress' in info:
- hw_info = info['hwaddress'].lower().strip()
- hw_split = hw_info.split(None, 1)
- if len(hw_split) == 2 and hw_split[0].startswith('ether'):
- hw_addr = hw_split[1]
- if hw_addr:
- iface_info['hwaddress'] = hw_addr
- # If ipv6 is enabled, device will have multiple IPs, so we need to
- # update the dictionary instead of overwriting it...
- if dev_name in real_ifaces:
- real_ifaces[dev_name].update(iface_info)
- else:
- real_ifaces[dev_name] = iface_info
- # Check for those that should be started on boot via 'auto'
- for (cmd, args) in entries:
- args = args.split(None)
- if not args:
- continue
- dev_name = args[0].strip().lower()
- if cmd == 'auto':
- # Seems like auto can be like 'auto eth0 eth0:1' so just get the
- # first part out as the device name
- if dev_name in real_ifaces:
- real_ifaces[dev_name]['auto'] = True
- if cmd == 'iface' and 'inet6' in args:
- real_ifaces[dev_name]['inet6'] = True
- return real_ifaces
diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py
deleted file mode 100644
index 1c413eaa..00000000
--- a/cloudinit/distros/parsers/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-def chop_comment(text, comment_chars):
- comment_locations = [text.find(c) for c in comment_chars]
- comment_locations = [c for c in comment_locations if c != -1]
- if not comment_locations:
- return (text, '')
- min_comment = min(comment_locations)
- before_comment = text[0:min_comment]
- comment = text[min_comment:]
- return (before_comment, comment)
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
deleted file mode 100644
index efb185d4..00000000
--- a/cloudinit/distros/parsers/hostname.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-from cloudinit.distros.parsers import chop_comment
-
-
-# Parser that knows how to work with /etc/hostname format
-class HostnameConf(object):
- def __init__(self, text):
- self._text = text
- self._contents = None
-
- def parse(self):
- if self._contents is None:
- self._contents = self._parse(self._text)
-
- def __str__(self):
- self.parse()
- contents = StringIO()
- for (line_type, components) in self._contents:
- if line_type == 'blank':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'hostname':
- (hostname, tail) = components
- contents.write("%s%s\n" % (hostname, tail))
- # Ensure trailing newline
- contents = contents.getvalue()
- if not contents.endswith("\n"):
- contents += "\n"
- return contents
-
- @property
- def hostname(self):
- self.parse()
- for (line_type, components) in self._contents:
- if line_type == 'hostname':
- return components[0]
- return None
-
- def set_hostname(self, your_hostname):
- your_hostname = your_hostname.strip()
- if not your_hostname:
- return
- self.parse()
- replaced = False
- for (line_type, components) in self._contents:
- if line_type == 'hostname':
- components[0] = str(your_hostname)
- replaced = True
- if not replaced:
- self._contents.append(('hostname', [str(your_hostname), '']))
-
- def _parse(self, contents):
- entries = []
- hostnames_found = set()
- for line in contents.splitlines():
- if not len(line.strip()):
- entries.append(('blank', [line]))
- continue
- (head, tail) = chop_comment(line.strip(), '#')
- if not len(head):
- entries.append(('all_comment', [line]))
- continue
- entries.append(('hostname', [head, tail]))
- hostnames_found.add(head)
- if len(hostnames_found) > 1:
- raise IOError("Multiple hostnames (%s) found!"
- % (hostnames_found))
- return entries
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
deleted file mode 100644
index 3c5498ee..00000000
--- a/cloudinit/distros/parsers/hosts.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-from cloudinit.distros.parsers import chop_comment
-
-
-# See: man hosts
-# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
-# or http://tinyurl.com/6lmox3
-class HostsConf(object):
- def __init__(self, text):
- self._text = text
- self._contents = None
-
- def parse(self):
- if self._contents is None:
- self._contents = self._parse(self._text)
-
- def get_entry(self, ip):
- self.parse()
- options = []
- for (line_type, components) in self._contents:
- if line_type == 'option':
- (pieces, _tail) = components
- if len(pieces) and pieces[0] == ip:
- options.append(pieces[1:])
- return options
-
- def del_entries(self, ip):
- self.parse()
- n_entries = []
- for (line_type, components) in self._contents:
- if line_type != 'option':
- n_entries.append((line_type, components))
- continue
- else:
- (pieces, _tail) = components
- if len(pieces) and pieces[0] == ip:
- pass
- elif len(pieces):
- n_entries.append((line_type, list(components)))
- self._contents = n_entries
-
- def add_entry(self, ip, canonical_hostname, *aliases):
- self.parse()
- self._contents.append(('option',
- ([ip, canonical_hostname] + list(aliases), '')))
-
- def _parse(self, contents):
- entries = []
- for line in contents.splitlines():
- if not len(line.strip()):
- entries.append(('blank', [line]))
- continue
- (head, tail) = chop_comment(line.strip(), '#')
- if not len(head):
- entries.append(('all_comment', [line]))
- continue
- entries.append(('option', [head.split(None), tail]))
- return entries
-
- def __str__(self):
- self.parse()
- contents = StringIO()
- for (line_type, components) in self._contents:
- if line_type == 'blank':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'all_comment':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
- (pieces, tail) = components
- pieces = [str(p) for p in pieces]
- pieces = "\t".join(pieces)
- contents.write("%s%s\n" % (pieces, tail))
- return contents.getvalue()
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
deleted file mode 100644
index 2ed13d9c..00000000
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from six import StringIO
-
-from cloudinit import util
-
-from cloudinit.distros.parsers import chop_comment
-
-
-# See: man resolv.conf
-class ResolvConf(object):
- def __init__(self, text):
- self._text = text
- self._contents = None
-
- def parse(self):
- if self._contents is None:
- self._contents = self._parse(self._text)
-
- @property
- def nameservers(self):
- self.parse()
- return self._retr_option('nameserver')
-
- @property
- def local_domain(self):
- self.parse()
- dm = self._retr_option('domain')
- if dm:
- return dm[0]
- return None
-
- @property
- def search_domains(self):
- self.parse()
- current_sds = self._retr_option('search')
- flat_sds = []
- for sdlist in current_sds:
- for sd in sdlist.split(None):
- if sd:
- flat_sds.append(sd)
- return flat_sds
-
- def __str__(self):
- self.parse()
- contents = StringIO()
- for (line_type, components) in self._contents:
- if line_type == 'blank':
- contents.write("\n")
- elif line_type == 'all_comment':
- contents.write("%s\n" % (components[0]))
- elif line_type == 'option':
- (cfg_opt, cfg_value, comment_tail) = components
- line = "%s %s" % (cfg_opt, cfg_value)
- if len(comment_tail):
- line += comment_tail
- contents.write("%s\n" % (line))
- return contents.getvalue()
-
- def _retr_option(self, opt_name):
- found = []
- for (line_type, components) in self._contents:
- if line_type == 'option':
- (cfg_opt, cfg_value, _comment_tail) = components
- if cfg_opt == opt_name:
- found.append(cfg_value)
- return found
-
- def add_nameserver(self, ns):
- self.parse()
- current_ns = self._retr_option('nameserver')
- new_ns = list(current_ns)
- new_ns.append(str(ns))
- new_ns = util.uniq_list(new_ns)
- if len(new_ns) == len(current_ns):
- return current_ns
- if len(current_ns) >= 3:
- # Hard restriction on only 3 name servers
- raise ValueError(("Adding %r would go beyond the "
- "'3' maximum name servers") % (ns))
- self._remove_option('nameserver')
- for n in new_ns:
- self._contents.append(('option', ['nameserver', n, '']))
- return new_ns
-
- def _remove_option(self, opt_name):
-
- def remove_opt(item):
- line_type, components = item
- if line_type != 'option':
- return False
- (cfg_opt, _cfg_value, _comment_tail) = components
- if cfg_opt != opt_name:
- return False
- return True
-
- new_contents = []
- for c in self._contents:
- if not remove_opt(c):
- new_contents.append(c)
- self._contents = new_contents
-
- def add_search_domain(self, search_domain):
- flat_sds = self.search_domains
- new_sds = list(flat_sds)
- new_sds.append(str(search_domain))
- new_sds = util.uniq_list(new_sds)
- if len(flat_sds) == len(new_sds):
- return new_sds
- if len(flat_sds) >= 6:
- # Hard restriction on only 6 search domains
- raise ValueError(("Adding %r would go beyond the "
- "'6' maximum search domains") % (search_domain))
- s_list = " ".join(new_sds)
- if len(s_list) > 256:
- # Some hard limit on 256 chars total
- raise ValueError(("Adding %r would go beyond the "
- "256 maximum search list character limit")
- % (search_domain))
- self._remove_option('search')
- self._contents.append(('option', ['search', s_list, '']))
- return flat_sds
-
- @local_domain.setter
- def local_domain(self, domain):
- self.parse()
- self._remove_option('domain')
- self._contents.append(('option', ['domain', str(domain), '']))
- return domain
-
- def _parse(self, contents):
- entries = []
- for (i, line) in enumerate(contents.splitlines()):
- sline = line.strip()
- if not sline:
- entries.append(('blank', [line]))
- continue
- (head, tail) = chop_comment(line, ';#')
- if not len(head.strip()):
- entries.append(('all_comment', [line]))
- continue
- if not tail:
- tail = ''
- try:
- (cfg_opt, cfg_values) = head.split(None, 1)
- except (IndexError, ValueError):
- raise IOError("Incorrectly formatted resolv.conf line %s"
- % (i + 1))
- if cfg_opt not in ['nameserver', 'domain',
- 'search', 'sortlist', 'options']:
- raise IOError("Unexpected resolv.conf option %s" % (cfg_opt))
- entries.append(("option", [cfg_opt, cfg_values, tail]))
- return entries
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
deleted file mode 100644
index 6157cf32..00000000
--- a/cloudinit/distros/parsers/sys_conf.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from six import StringIO
-
-import pipes
-import re
-
-# This library is used to parse/write
-# out the various sysconfig files edited (best attempt effort)
-#
-# It has to be slightly modified though
-# to ensure that all values are quoted/unquoted correctly
-# since these configs are usually sourced into
-# bash scripts...
-import configobj
-
-# See: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html
-# or look at the 'param_expand()' function in the subst.c file in the bash
-# source tarball...
-SHELL_VAR_RULE = r'[a-zA-Z_]+[a-zA-Z0-9_]*'
-SHELL_VAR_REGEXES = [
- # Basic variables
- re.compile(r"\$" + SHELL_VAR_RULE),
- # Things like $?, $0, $-, $@
- re.compile(r"\$[0-9#\?\-@\*]"),
- # Things like ${blah:1} - but this one
- # gets very complex so just try the
- # simple path
- re.compile(r"\$\{.+\}"),
-]
-
-
-def _contains_shell_variable(text):
- for r in SHELL_VAR_REGEXES:
- if r.search(text):
- return True
- return False
-
-
-class SysConf(configobj.ConfigObj):
- def __init__(self, contents):
- configobj.ConfigObj.__init__(self, contents,
- interpolation=False,
- write_empty_values=True)
-
- def __str__(self):
- contents = self.write()
- out_contents = StringIO()
- if isinstance(contents, (list, tuple)):
- out_contents.write("\n".join(contents))
- else:
- out_contents.write(str(contents))
- return out_contents.getvalue()
-
- def _quote(self, value, multiline=False):
- if not isinstance(value, six.string_types):
- raise ValueError('Value "%s" is not a string' % (value))
- if len(value) == 0:
- return ''
- quot_func = None
- if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
- if len(value) == 1:
- quot_func = (lambda x: self._get_single_quote(x) % x)
- else:
- # Quote whitespace if it isn't the start + end of a shell command
- if value.strip().startswith("$(") and value.strip().endswith(")"):
- pass
- else:
- if re.search(r"[\t\r\n ]", value):
- if _contains_shell_variable(value):
- # If it contains shell variables then we likely want to
- # leave it alone since the pipes.quote function likes
- # to use single quotes which won't get expanded...
- if re.search(r"[\n\"']", value):
- quot_func = (lambda x:
- self._get_triple_quote(x) % x)
- else:
- quot_func = (lambda x:
- self._get_single_quote(x) % x)
- else:
- quot_func = pipes.quote
- if not quot_func:
- return value
- return quot_func(value)
-
- def _write_line(self, indent_string, entry, this_entry, comment):
- # Ensure it is formatted fine for
- # how these sysconfig scripts are used
- val = self._decode_element(self._quote(this_entry))
- key = self._decode_element(self._quote(entry))
- cmnt = self._decode_element(comment)
- return '%s%s%s%s%s' % (indent_string,
- key,
- self._a_to_u('='),
- val,
- cmnt)
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
deleted file mode 100644
index 1aa42d75..00000000
--- a/cloudinit/distros/rhel.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit.net.network_state import parse_net_config_data
-from cloudinit.net import sysconfig
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros import rhel_util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-def _make_sysconfig_bool(val):
- if val:
- return 'yes'
- else:
- return 'no'
-
-
-class Distro(distros.Distro):
- # See: http://tiny.cc/6r99fw
- clock_conf_fn = "/etc/sysconfig/clock"
- locale_conf_fn = '/etc/sysconfig/i18n'
- systemd_locale_conf_fn = '/etc/locale.conf'
- network_conf_fn = "/etc/sysconfig/network"
- hostname_conf_fn = "/etc/sysconfig/network"
- systemd_hostname_conf_fn = "/etc/hostname"
- network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s'
- resolve_conf_fn = "/etc/resolv.conf"
- tz_local_fn = "/etc/localtime"
- usr_lib_exec = "/usr/libexec"
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'redhat'
- self._net_renderer = sysconfig.Renderer()
-
- def install_packages(self, pkglist):
- self.package_command('install', pkgs=pkglist)
-
- def _write_network_config(self, netconfig):
- ns = parse_net_config_data(netconfig)
- self._net_renderer.render_network_state("/", ns)
- return []
-
- def _write_network(self, settings):
- # TODO(harlowja) fix this... since this is the ubuntu format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the rhel format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- use_ipv6 = False
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- net_cfg = {
- 'DEVICE': dev,
- 'NETMASK': info.get('netmask'),
- 'IPADDR': info.get('address'),
- 'BOOTPROTO': info.get('bootproto'),
- 'GATEWAY': info.get('gateway'),
- 'BROADCAST': info.get('broadcast'),
- 'MACADDR': info.get('hwaddress'),
- 'ONBOOT': _make_sysconfig_bool(info.get('auto')),
- }
- if info.get('inet6'):
- use_ipv6 = True
- net_cfg.update({
- 'IPV6INIT': _make_sysconfig_bool(True),
- 'IPV6ADDR': info.get('ipv6').get('address'),
- 'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
- })
- rhel_util.update_sysconfig_file(net_fn, net_cfg)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- if dev_names:
- net_cfg = {
- 'NETWORKING': _make_sysconfig_bool(True),
- }
- # If IPv6 interface present, enable ipv6 networking
- if use_ipv6:
- net_cfg['NETWORKING_IPV6'] = _make_sysconfig_bool(True)
- net_cfg['IPV6_AUTOCONF'] = _make_sysconfig_bool(False)
- rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- if self.uses_systemd():
- if not out_fn:
- out_fn = self.systemd_locale_conf_fn
- out_fn = self.systemd_locale_conf_fn
- else:
- if not out_fn:
- out_fn = self.locale_conf_fn
- locale_cfg = {
- 'LANG': locale,
- }
- rhel_util.update_sysconfig_file(out_fn, locale_cfg)
-
- def _write_hostname(self, hostname, out_fn):
- # systemd will never update previous-hostname for us, so
- # we need to do it ourselves
- if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
- util.write_file(out_fn, hostname)
- elif self.uses_systemd():
- util.subp(['hostnamectl', 'set-hostname', str(hostname)])
- else:
- host_cfg = {
- 'HOSTNAME': hostname,
- }
- rhel_util.update_sysconfig_file(out_fn, host_cfg)
-
- def _select_hostname(self, hostname, fqdn):
- # See: http://bit.ly/TwitgL
- # Should be fqdn if we can use it
- if fqdn:
- return fqdn
- return hostname
-
- def _read_system_hostname(self):
- if self.uses_systemd():
- host_fn = self.systemd_hostname_conf_fn
- else:
- host_fn = self.hostname_conf_fn
- return (host_fn, self._read_hostname(host_fn))
-
- def _read_hostname(self, filename, default=None):
- if self.uses_systemd() and filename.endswith('/previous-hostname'):
- return util.load_file(filename).strip()
- elif self.uses_systemd():
- (out, _err) = util.subp(['hostname'])
- if len(out):
- return out
- else:
- return default
- else:
- (_exists, contents) = rhel_util.read_sysconfig_file(filename)
- if 'HOSTNAME' in contents:
- return contents['HOSTNAME']
- else:
- return default
-
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def set_timezone(self, tz):
- tz_file = self._find_tz_file(tz)
- if self.uses_systemd():
- # Currently, timedatectl complains if invoked during startup
- # so for compatibility, create the link manually.
- util.del_file(self.tz_local_fn)
- util.sym_link(tz_file, self.tz_local_fn)
- else:
- # Adjust the sysconfig clock zone setting
- clock_cfg = {
- 'ZONE': str(tz),
- }
- rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
- # This ensures that the correct tz will be used for the system
- util.copy(tz_file, self.tz_local_fn)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['yum']
- # If enabled, then yum will be tolerant of errors on the command line
- # with regard to packages.
- # For example: if you request to install foo, bar and baz and baz is
- # installed; yum won't error out complaining that baz is already
- # installed.
- cmd.append("-t")
- # Determines whether or not yum prompts for confirmation
- # of critical actions. We don't want to prompt...
- cmd.append("-y")
-
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- cmd.append(command)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ["makecache"], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
deleted file mode 100644
index 903d7793..00000000
--- a/cloudinit/distros/rhel_util.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from cloudinit.distros.parsers.resolv_conf import ResolvConf
-from cloudinit.distros.parsers.sys_conf import SysConf
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-# Helper function to update a RHEL/SUSE /etc/sysconfig/* file
-def update_sysconfig_file(fn, adjustments, allow_empty=False):
- if not adjustments:
- return
- (exists, contents) = read_sysconfig_file(fn)
- updated_am = 0
- for (k, v) in adjustments.items():
- if v is None:
- continue
- v = str(v)
- if len(v) == 0 and not allow_empty:
- continue
- contents[k] = v
- updated_am += 1
- if updated_am:
- lines = [
- str(contents),
- ]
- if not exists:
- lines.insert(0, util.make_header())
- util.write_file(fn, "\n".join(lines) + "\n", 0o644)
-
-
-# Helper function to read a RHEL/SUSE /etc/sysconfig/* file
-def read_sysconfig_file(fn):
- exists = False
- try:
- contents = util.load_file(fn).splitlines()
- exists = True
- except IOError:
- contents = []
- return (exists, SysConf(contents))
-
-
-# Helper function to update RHEL/SUSE /etc/resolv.conf
-def update_resolve_conf_file(fn, dns_servers, search_servers):
- try:
- r_conf = ResolvConf(util.load_file(fn))
- r_conf.parse()
- except IOError:
- util.logexc(LOG, "Failed at parsing %s reverting to an empty "
- "instance", fn)
- r_conf = ResolvConf('')
- r_conf.parse()
- if dns_servers:
- for s in dns_servers:
- try:
- r_conf.add_nameserver(s)
- except ValueError:
- util.logexc(LOG, "Failed at adding nameserver %s", s)
- if search_servers:
- for s in search_servers:
- try:
- r_conf.add_search_domain(s)
- except ValueError:
- util.logexc(LOG, "Failed at adding search domain %s", s)
- util.write_file(fn, str(r_conf), 0o644)
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
deleted file mode 100644
index 620c974c..00000000
--- a/cloudinit/distros/sles.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# Leaning very heavily on the RHEL and Debian implementation
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import distros
-
-from cloudinit.distros.parsers.hostname import HostnameConf
-
-from cloudinit import helpers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.distros import net_util
-from cloudinit.distros import rhel_util
-from cloudinit.settings import PER_INSTANCE
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(distros.Distro):
- clock_conf_fn = '/etc/sysconfig/clock'
- locale_conf_fn = '/etc/sysconfig/language'
- network_conf_fn = '/etc/sysconfig/network'
- hostname_conf_fn = '/etc/HOSTNAME'
- network_script_tpl = '/etc/sysconfig/network/ifcfg-%s'
- resolve_conf_fn = '/etc/resolv.conf'
- tz_local_fn = '/etc/localtime'
-
- def __init__(self, name, cfg, paths):
- distros.Distro.__init__(self, name, cfg, paths)
- # This will be used to restrict certain
- # calls from repeatly happening (when they
- # should only happen say once per instance...)
- self._runner = helpers.Runners(paths)
- self.osfamily = 'suse'
-
- def install_packages(self, pkglist):
- self.package_command('install', args='-l', pkgs=pkglist)
-
- def _write_network(self, settings):
- # Convert debian settings to ifcfg format
- entries = net_util.translate_network(settings)
- LOG.debug("Translated ubuntu style network settings %s into %s",
- settings, entries)
- # Make the intermediate format as the suse format...
- nameservers = []
- searchservers = []
- dev_names = entries.keys()
- for (dev, info) in entries.items():
- net_fn = self.network_script_tpl % (dev)
- mode = info.get('auto')
- if mode and mode.lower() == 'true':
- mode = 'auto'
- else:
- mode = 'manual'
- net_cfg = {
- 'BOOTPROTO': info.get('bootproto'),
- 'BROADCAST': info.get('broadcast'),
- 'GATEWAY': info.get('gateway'),
- 'IPADDR': info.get('address'),
- 'LLADDR': info.get('hwaddress'),
- 'NETMASK': info.get('netmask'),
- 'STARTMODE': mode,
- 'USERCONTROL': 'no'
- }
- if dev != 'lo':
- net_cfg['ETHERDEVICE'] = dev
- net_cfg['ETHTOOL_OPTIONS'] = ''
- else:
- net_cfg['FIREWALL'] = 'no'
- rhel_util.update_sysconfig_file(net_fn, net_cfg, True)
- if 'dns-nameservers' in info:
- nameservers.extend(info['dns-nameservers'])
- if 'dns-search' in info:
- searchservers.extend(info['dns-search'])
- if nameservers or searchservers:
- rhel_util.update_resolve_conf_file(self.resolve_conf_fn,
- nameservers, searchservers)
- return dev_names
-
- def apply_locale(self, locale, out_fn=None):
- if not out_fn:
- out_fn = self.locale_conf_fn
- locale_cfg = {
- 'RC_LANG': locale,
- }
- rhel_util.update_sysconfig_file(out_fn, locale_cfg)
-
- def _write_hostname(self, hostname, out_fn):
- conf = None
- try:
- # Try to update the previous one
- # so lets see if we can read it first.
- conf = self._read_hostname_conf(out_fn)
- except IOError:
- pass
- if not conf:
- conf = HostnameConf('')
- conf.set_hostname(hostname)
- util.write_file(out_fn, str(conf), 0o644)
-
- def _read_system_hostname(self):
- host_fn = self.hostname_conf_fn
- return (host_fn, self._read_hostname(host_fn))
-
- def _read_hostname_conf(self, filename):
- conf = HostnameConf(util.load_file(filename))
- conf.parse()
- return conf
-
- def _read_hostname(self, filename, default=None):
- hostname = None
- try:
- conf = self._read_hostname_conf(filename)
- hostname = conf.hostname
- except IOError:
- pass
- if not hostname:
- return default
- return hostname
-
- def _bring_up_interfaces(self, device_names):
- if device_names and 'all' in device_names:
- raise RuntimeError(('Distro %s can not translate '
- 'the device name "all"') % (self.name))
- return distros.Distro._bring_up_interfaces(self, device_names)
-
- def set_timezone(self, tz):
- tz_file = self._find_tz_file(tz)
- # Adjust the sysconfig clock zone setting
- clock_cfg = {
- 'TIMEZONE': str(tz),
- }
- rhel_util.update_sysconfig_file(self.clock_conf_fn, clock_cfg)
- # This ensures that the correct tz will be used for the system
- util.copy(tz_file, self.tz_local_fn)
-
- def package_command(self, command, args=None, pkgs=None):
- if pkgs is None:
- pkgs = []
-
- cmd = ['zypper']
- # No user interaction possible, enable non-interactive mode
- cmd.append('--non-interactive')
-
- # Comand is the operation, such as install
- cmd.append(command)
-
- # args are the arguments to the command, not global options
- if args and isinstance(args, str):
- cmd.append(args)
- elif args and isinstance(args, list):
- cmd.extend(args)
-
- pkglist = util.expand_package_list('%s-%s', pkgs)
- cmd.extend(pkglist)
-
- # Allow the output of this to flow outwards (ie not be captured)
- util.subp(cmd, capture=False)
-
- def update_package_sources(self):
- self._runner.run("update-sources", self.package_command,
- ['refresh'], freq=PER_INSTANCE)
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
deleted file mode 100644
index c527f248..00000000
--- a/cloudinit/distros/ubuntu.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.distros import debian
-from cloudinit import log as logging
-
-LOG = logging.getLogger(__name__)
-
-
-class Distro(debian.Distro):
- pass
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
deleted file mode 100644
index 76dda042..00000000
--- a/cloudinit/ec2_utils.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import functools
-import json
-
-from cloudinit import log as logging
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
-
-
-class MetadataLeafDecoder(object):
- """Decodes a leaf blob into something meaningful."""
-
- def _maybe_json_object(self, text):
- if not text:
- return False
- text = text.strip()
- if text.startswith("{") and text.endswith("}"):
- return True
- return False
-
- def __call__(self, field, blob):
- if not blob:
- return blob
- try:
- blob = util.decode_binary(blob)
- except UnicodeDecodeError:
- return blob
- if self._maybe_json_object(blob):
- try:
- # Assume it's json, unless it fails parsing...
- return json.loads(blob)
- except (ValueError, TypeError) as e:
- LOG.warn("Field %s looked like a json object, but it was"
- " not: %s", field, e)
- if blob.find("\n") != -1:
- return blob.splitlines()
- return blob
-
-
-# See: http://bit.ly/TyoUQs
-#
-class MetadataMaterializer(object):
- def __init__(self, blob, base_url, caller, leaf_decoder=None):
- self._blob = blob
- self._md = None
- self._base_url = base_url
- self._caller = caller
- if leaf_decoder is None:
- self._leaf_decoder = MetadataLeafDecoder()
- else:
- self._leaf_decoder = leaf_decoder
-
- def _parse(self, blob):
- leaves = {}
- children = []
- blob = util.decode_binary(blob)
-
- if not blob:
- return (leaves, children)
-
- def has_children(item):
- if item.endswith("/"):
- return True
- else:
- return False
-
- def get_name(item):
- if item.endswith("/"):
- return item.rstrip("/")
- return item
-
- for field in blob.splitlines():
- field = field.strip()
- field_name = get_name(field)
- if not field or not field_name:
- continue
- if has_children(field):
- if field_name not in children:
- children.append(field_name)
- else:
- contents = field.split("=", 1)
- resource = field_name
- if len(contents) > 1:
- # What a PITA...
- (ident, sub_contents) = contents
- ident = util.safe_int(ident)
- if ident is not None:
- resource = "%s/openssh-key" % (ident)
- field_name = sub_contents
- leaves[field_name] = resource
- return (leaves, children)
-
- def materialize(self):
- if self._md is not None:
- return self._md
- self._md = self._materialize(self._blob, self._base_url)
- return self._md
-
- def _materialize(self, blob, base_url):
- (leaves, children) = self._parse(blob)
- child_contents = {}
- for c in children:
- child_url = url_helper.combine_url(base_url, c)
- if not child_url.endswith("/"):
- child_url += "/"
- child_blob = self._caller(child_url)
- child_contents[c] = self._materialize(child_blob, child_url)
- leaf_contents = {}
- for (field, resource) in leaves.items():
- leaf_url = url_helper.combine_url(base_url, resource)
- leaf_blob = self._caller(leaf_url)
- leaf_contents[field] = self._leaf_decoder(field, leaf_blob)
- joined = {}
- joined.update(child_contents)
- for field in leaf_contents.keys():
- if field in joined:
- LOG.warn("Duplicate key found in results from %s", base_url)
- else:
- joined[field] = leaf_contents[field]
- return joined
-
-
-def _skip_retry_on_codes(status_codes, _request_args, cause):
- """Returns if a request should retry based on a given set of codes that
- case retrying to be stopped/skipped.
- """
- return cause.code in status_codes
-
-
-def get_instance_userdata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5):
- ud_url = url_helper.combine_url(metadata_address, api_version)
- ud_url = url_helper.combine_url(ud_url, 'user-data')
- user_data = ''
- try:
- # It is ok for userdata to not exist (thats why we are stopping if
- # NOT_FOUND occurs) and just in that case returning an empty string.
- exception_cb = functools.partial(_skip_retry_on_codes,
- SKIP_USERDATA_CODES)
- response = util.read_file_or_url(ud_url,
- ssl_details=ssl_details,
- timeout=timeout,
- retries=retries,
- exception_cb=exception_cb)
- user_data = response.contents
- except url_helper.UrlError as e:
- if e.code not in SKIP_USERDATA_CODES:
- util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
- except Exception:
- util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
- return user_data
-
-
-def get_instance_metadata(api_version='latest',
- metadata_address='http://169.254.169.254',
- ssl_details=None, timeout=5, retries=5,
- leaf_decoder=None):
- md_url = url_helper.combine_url(metadata_address, api_version)
- # Note, 'meta-data' explicitly has trailing /.
- # this is required for CloudStack (LP: #1356855)
- md_url = url_helper.combine_url(md_url, 'meta-data/')
- caller = functools.partial(util.read_file_or_url,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries)
-
- def mcaller(url):
- return caller(url).contents
-
- try:
- response = caller(md_url)
- materializer = MetadataMaterializer(response.contents,
- md_url, mcaller,
- leaf_decoder=leaf_decoder)
- md = materializer.materialize()
- if not isinstance(md, (dict)):
- md = {}
- return md
- except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
- return {}
diff --git a/cloudinit/filters/__init__.py b/cloudinit/filters/__init__.py
deleted file mode 100644
index da124641..00000000
--- a/cloudinit/filters/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
deleted file mode 100644
index baecdac9..00000000
--- a/cloudinit/filters/launch_index.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-
-from cloudinit import log as logging
-from cloudinit import user_data as ud
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class Filter(object):
- def __init__(self, wanted_idx, allow_none=True):
- self.wanted_idx = wanted_idx
- self.allow_none = allow_none
-
- def _select(self, message):
- msg_idx = message.get('Launch-Index', None)
- if self.allow_none and msg_idx is None:
- return True
- msg_idx = util.safe_int(msg_idx)
- if msg_idx != self.wanted_idx:
- return False
- return True
-
- def _do_filter(self, message):
- # Don't use walk() here since we want to do the reforming of the
- # messages ourselves and not flatten the message listings...
- if not self._select(message):
- return None
- if message.is_multipart():
- # Recreate it and its child messages
- prev_msgs = message.get_payload(decode=False)
- new_msgs = []
- discarded = 0
- for m in prev_msgs:
- m = self._do_filter(m)
- if m is not None:
- new_msgs.append(m)
- else:
- discarded += 1
- LOG.debug(("Discarding %s multipart messages "
- "which do not match launch index %s"),
- discarded, self.wanted_idx)
- new_message = copy.copy(message)
- new_message.set_payload(new_msgs)
- new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
- return new_message
- else:
- return copy.copy(message)
-
- def apply(self, root_message):
- if self.wanted_idx is None:
- return root_message
- return self._do_filter(root_message)
diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py
deleted file mode 100644
index 6a76d785..00000000
--- a/cloudinit/gpg.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""gpg.py - Collection of gpg key related functions"""
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-def export_armour(key):
- """Export gpg key, armoured key gets returned"""
- try:
- (armour, _) = util.subp(["gpg", "--export", "--armour", key],
- capture=True)
- except util.ProcessExecutionError as error:
- # debug, since it happens for any key not on the system initially
- LOG.debug('Failed to export armoured key "%s": %s', key, error)
- armour = None
- return armour
-
-
-def receive_key(key, keyserver):
- """Receive gpg key from the specified keyserver"""
- LOG.debug('Receive gpg key "%s"', key)
- try:
- util.subp(["gpg", "--keyserver", keyserver, "--recv-keys", key],
- capture=True)
- except util.ProcessExecutionError as error:
- raise ValueError(('Failed to import key "%s" '
- 'from server "%s" - error %s') %
- (key, keyserver, error))
-
-
-def delete_key(key):
- """Delete the specified key from the local gpg ring"""
- try:
- util.subp(["gpg", "--batch", "--yes", "--delete-keys", key],
- capture=True)
- except util.ProcessExecutionError as error:
- LOG.warn('Failed delete key "%s": %s', key, error)
-
-
-def get_key_by_id(keyid, keyserver="keyserver.ubuntu.com"):
- """get gpg keyid from keyserver"""
- armour = export_armour(keyid)
- if not armour:
- try:
- receive_key(keyid, keyserver=keyserver)
- armour = export_armour(keyid)
- except ValueError:
- LOG.exception('Failed to obtain gpg key %s', keyid)
- raise
- finally:
- # delete just imported key to leave environment as it was before
- delete_key(keyid)
-
- return armour
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
deleted file mode 100644
index b6c43ce8..00000000
--- a/cloudinit/handlers/__init__.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import os
-import six
-
-from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-# Used as the content type when a message is not multipart
-# and it doesn't contain its own content-type
-NOT_MULTIPART_TYPE = "text/x-not-multipart"
-
-# When none is assigned this gets used
-OCTET_TYPE = 'application/octet-stream'
-
-# Special content types that signal the start and end of processing
-CONTENT_END = "__end__"
-CONTENT_START = "__begin__"
-CONTENT_SIGNALS = [CONTENT_START, CONTENT_END]
-
-# Used when a part-handler type is encountered
-# to allow for registration of new types.
-PART_CONTENT_TYPES = ["text/part-handler"]
-PART_HANDLER_FN_TMPL = 'part-handler-%03d'
-
-# For parts without filenames
-PART_FN_TPL = 'part-%03d'
-
-# Different file beginnings to there content type
-INCLUSION_TYPES_MAP = {
- '#include': 'text/x-include-url',
- '#include-once': 'text/x-include-once-url',
- '#!': 'text/x-shellscript',
- '#cloud-config': 'text/cloud-config',
- '#upstart-job': 'text/upstart-job',
- '#part-handler': 'text/part-handler',
- '#cloud-boothook': 'text/cloud-boothook',
- '#cloud-config-archive': 'text/cloud-config-archive',
- '#cloud-config-jsonp': 'text/cloud-config-jsonp',
-}
-
-# Sorted longest first
-INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
- key=(lambda e: 0 - len(e)))
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Handler(object):
-
- def __init__(self, frequency, version=2):
- self.handler_version = version
- self.frequency = frequency
-
- def __repr__(self):
- return "%s: [%s]" % (type_utils.obj_name(self), self.list_types())
-
- @abc.abstractmethod
- def list_types(self):
- raise NotImplementedError()
-
- @abc.abstractmethod
- def handle_part(self, *args, **kwargs):
- raise NotImplementedError()
-
-
-def run_part(mod, data, filename, payload, frequency, headers):
- mod_freq = mod.frequency
- if not (mod_freq == PER_ALWAYS or
- (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
- return
- # Sanity checks on version (should be an int convertable)
- try:
- mod_ver = mod.handler_version
- mod_ver = int(mod_ver)
- except (TypeError, ValueError, AttributeError):
- mod_ver = 1
- content_type = headers['Content-Type']
- try:
- LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
- mod, content_type, filename, mod_ver, frequency)
- if mod_ver == 3:
- # Treat as v. 3 which does get a frequency + headers
- mod.handle_part(data, content_type, filename,
- payload, frequency, headers)
- elif mod_ver == 2:
- # Treat as v. 2 which does get a frequency
- mod.handle_part(data, content_type, filename,
- payload, frequency)
- elif mod_ver == 1:
- # Treat as v. 1 which gets no frequency
- mod.handle_part(data, content_type, filename, payload)
- else:
- raise ValueError("Unknown module version %s" % (mod_ver))
- except Exception:
- util.logexc(LOG, "Failed calling handler %s (%s, %s, %s) with "
- "frequency %s", mod, content_type, filename, mod_ver,
- frequency)
-
-
-def call_begin(mod, data, frequency):
- # Create a fake header set
- headers = {
- 'Content-Type': CONTENT_START,
- }
- run_part(mod, data, None, None, frequency, headers)
-
-
-def call_end(mod, data, frequency):
- # Create a fake header set
- headers = {
- 'Content-Type': CONTENT_END,
- }
- run_part(mod, data, None, None, frequency, headers)
-
-
-def walker_handle_handler(pdata, _ctype, _filename, payload):
- curcount = pdata['handlercount']
- modname = PART_HANDLER_FN_TMPL % (curcount)
- frequency = pdata['frequency']
- modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
- if not modfname.endswith(".py"):
- modfname = "%s.py" % (modfname)
- # TODO(harlowja): Check if path exists??
- util.write_file(modfname, payload, 0o600)
- handlers = pdata['handlers']
- try:
- mod = fixup_handler(importer.import_module(modname))
- call_begin(mod, pdata['data'], frequency)
- # Only register and increment after the above have worked, so we don't
- # register if it fails starting.
- handlers.register(mod, initialized=True)
- pdata['handlercount'] = curcount + 1
- except Exception:
- util.logexc(LOG, "Failed at registering python file: %s (part "
- "handler %s)", modfname, curcount)
-
-
-def _extract_first_or_bytes(blob, size):
- # Extract the first line or upto X symbols for text objects
- # Extract first X bytes for binary objects
- try:
- if isinstance(blob, six.string_types):
- start = blob.split("\n", 1)[0]
- else:
- # We want to avoid decoding the whole blob (it might be huge)
- # By taking 4*size bytes we guarantee to decode size utf8 chars
- start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
- if len(start) >= size:
- start = start[:size]
- except UnicodeDecodeError:
- # Bytes array doesn't contain text so return chunk of raw bytes
- start = blob[0:size]
- return start
-
-
-def _escape_string(text):
- try:
- return text.encode("string_escape")
- except (LookupError, TypeError):
- try:
- # Unicode (and Python 3's str) doesn't support string_escape...
- return text.encode('unicode_escape')
- except TypeError:
- # Give up...
- pass
- except AttributeError:
- # We're in Python3 and received blob as text
- # No escaping is needed because bytes are printed
- # as 'b\xAA\xBB' automatically in Python3
- pass
- return text
-
-
-def walker_callback(data, filename, payload, headers):
- content_type = headers['Content-Type']
- if content_type in data.get('excluded'):
- LOG.debug('content_type "%s" is excluded', content_type)
- return
-
- if content_type in PART_CONTENT_TYPES:
- walker_handle_handler(data, content_type, filename, payload)
- return
- handlers = data['handlers']
- if content_type in handlers:
- run_part(handlers[content_type], data['data'], filename,
- payload, data['frequency'], headers)
- elif payload:
- # Extract the first line or 24 bytes for displaying in the log
- start = _extract_first_or_bytes(payload, 24)
- details = "'%s...'" % (_escape_string(start))
- if content_type == NOT_MULTIPART_TYPE:
- LOG.warning("Unhandled non-multipart (%s) userdata: %s",
- content_type, details)
- else:
- LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
- content_type, details)
- else:
- LOG.debug("Empty payload of type %s", content_type)
-
-
-# Callback is a function that will be called with
-# (data, content_type, filename, payload)
-def walk(msg, callback, data):
- partnum = 0
- for part in msg.walk():
- # multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
- continue
-
- ctype = part.get_content_type()
- if ctype is None:
- ctype = OCTET_TYPE
-
- filename = part.get_filename()
- if not filename:
- filename = PART_FN_TPL % (partnum)
-
- headers = dict(part)
- LOG.debug(headers)
- headers['Content-Type'] = ctype
- payload = util.fully_decoded_payload(part)
- callback(data, filename, payload, headers)
- partnum = partnum + 1
-
-
-def fixup_handler(mod, def_freq=PER_INSTANCE):
- if not hasattr(mod, "handler_version"):
- setattr(mod, "handler_version", 1)
- if not hasattr(mod, 'frequency'):
- setattr(mod, 'frequency', def_freq)
- else:
- freq = mod.frequency
- if freq and freq not in FREQUENCIES:
- LOG.warn("Handler %s has an unknown frequency %s", mod, freq)
- return mod
-
-
-def type_from_starts_with(payload, default=None):
- try:
- payload_lc = util.decode_binary(payload).lower()
- except UnicodeDecodeError:
- return default
- payload_lc = payload_lc.lstrip()
- for text in INCLUSION_SRCH:
- if payload_lc.startswith(text):
- return INCLUSION_TYPES_MAP[text]
- return default
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
deleted file mode 100644
index a4ea47ac..00000000
--- a/cloudinit/handlers/boot_hook.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
-
-LOG = logging.getLogger(__name__)
-BOOTHOOK_PREFIX = "#cloud-boothook"
-
-
-class BootHookPartHandler(handlers.Handler):
- def __init__(self, paths, datasource, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS)
- self.boothook_dir = paths.get_ipath("boothooks")
- self.instance_id = None
- if datasource:
- self.instance_id = datasource.get_instance_id()
-
- def list_types(self):
- return [
- handlers.type_from_starts_with(BOOTHOOK_PREFIX),
- ]
-
- def _write_part(self, payload, filename):
- filename = util.clean_filename(filename)
- filepath = os.path.join(self.boothook_dir, filename)
- contents = util.strip_prefix_suffix(util.dos2unix(payload),
- prefix=BOOTHOOK_PREFIX)
- util.write_file(filepath, contents.lstrip(), 0o700)
- return filepath
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- return
-
- filepath = self._write_part(payload, filename)
- try:
- env = os.environ.copy()
- if self.instance_id is not None:
- env['INSTANCE_ID'] = str(self.instance_id)
- util.subp([filepath], env=env)
- except util.ProcessExecutionError:
- util.logexc(LOG, "Boothooks script %s execution error", filepath)
- except Exception:
- util.logexc(LOG, "Boothooks unknown error when running %s",
- filepath)
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
deleted file mode 100644
index cad4dc0f..00000000
--- a/cloudinit/handlers/cloud_config.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import jsonpatch
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
-
-LOG = logging.getLogger(__name__)
-
-MERGE_HEADER = 'Merge-Type'
-
-# Due to the way the loading of yaml configuration was done previously,
-# where previously each cloud config part was appended to a larger yaml
-# file and then finally that file was loaded as one big yaml file we need
-# to mimic that behavior by altering the default strategy to be replacing
-# keys of prior merges.
-#
-#
-# For example
-# #file 1
-# a: 3
-# #file 2
-# a: 22
-# #combined file (comments not included)
-# a: 3
-# a: 22
-#
-# This gets loaded into yaml with final result {'a': 22}
-DEF_MERGERS = mergers.string_extract_mergers('dict(replace)+list()+str()')
-CLOUD_PREFIX = "#cloud-config"
-JSONP_PREFIX = "#cloud-config-jsonp"
-
-# The file header -> content types this module will handle.
-CC_TYPES = {
- JSONP_PREFIX: handlers.type_from_starts_with(JSONP_PREFIX),
- CLOUD_PREFIX: handlers.type_from_starts_with(CLOUD_PREFIX),
-}
-
-
-class CloudConfigPartHandler(handlers.Handler):
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS, version=3)
- self.cloud_buf = None
- self.cloud_fn = paths.get_ipath("cloud_config")
- if 'cloud_config_path' in _kwargs:
- self.cloud_fn = paths.get_ipath(_kwargs["cloud_config_path"])
- self.file_names = []
-
- def list_types(self):
- return list(CC_TYPES.values())
-
- def _write_cloud_config(self):
- if not self.cloud_fn:
- return
- # Capture which files we merged from...
- file_lines = []
- if self.file_names:
- file_lines.append("# from %s files" % (len(self.file_names)))
- for fn in self.file_names:
- if not fn:
- fn = '?'
- file_lines.append("# %s" % (fn))
- file_lines.append("")
- if self.cloud_buf is not None:
- # Something was actually gathered....
- lines = [
- CLOUD_PREFIX,
- '',
- ]
- lines.extend(file_lines)
- lines.append(util.yaml_dumps(self.cloud_buf))
- else:
- lines = []
- util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
-
- def _extract_mergers(self, payload, headers):
- merge_header_headers = ''
- for h in [MERGE_HEADER, 'X-%s' % (MERGE_HEADER)]:
- tmp_h = headers.get(h, '')
- if tmp_h:
- merge_header_headers = tmp_h
- break
- # Select either the merge-type from the content
- # or the merge type from the headers or default to our own set
- # if neither exists (or is empty) from the later.
- payload_yaml = util.load_yaml(payload)
- mergers_yaml = mergers.dict_extract_mergers(payload_yaml)
- mergers_header = mergers.string_extract_mergers(merge_header_headers)
- all_mergers = []
- all_mergers.extend(mergers_yaml)
- all_mergers.extend(mergers_header)
- if not all_mergers:
- all_mergers = DEF_MERGERS
- return (payload_yaml, all_mergers)
-
- def _merge_patch(self, payload):
- # JSON doesn't handle comments in this manner, so ensure that
- # if we started with this 'type' that we remove it before
- # attempting to load it as json (which the jsonpatch library will
- # attempt to do).
- payload = payload.lstrip()
- payload = util.strip_prefix_suffix(payload, prefix=JSONP_PREFIX)
- patch = jsonpatch.JsonPatch.from_string(payload)
- LOG.debug("Merging by applying json patch %s", patch)
- self.cloud_buf = patch.apply(self.cloud_buf, in_place=False)
-
- def _merge_part(self, payload, headers):
- (payload_yaml, my_mergers) = self._extract_mergers(payload, headers)
- LOG.debug("Merging by applying %s", my_mergers)
- merger = mergers.construct(my_mergers)
- self.cloud_buf = merger.merge(self.cloud_buf, payload_yaml)
-
- def _reset(self):
- self.file_names = []
- self.cloud_buf = None
-
- def handle_part(self, data, ctype, filename, payload, frequency, headers):
- if ctype == handlers.CONTENT_START:
- self._reset()
- return
- if ctype == handlers.CONTENT_END:
- self._write_cloud_config()
- self._reset()
- return
- try:
- # First time through, merge with an empty dict...
- if self.cloud_buf is None or not self.file_names:
- self.cloud_buf = {}
- if ctype == CC_TYPES[JSONP_PREFIX]:
- self._merge_patch(payload)
- else:
- self._merge_part(payload, headers)
- # Ensure filename is ok to store
- for i in ("\n", "\r", "\t"):
- filename = filename.replace(i, " ")
- self.file_names.append(filename.strip())
- except Exception:
- util.logexc(LOG, "Failed at merging in cloud config part from %s",
- filename)
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
deleted file mode 100644
index b5087693..00000000
--- a/cloudinit/handlers/shell_script.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.settings import (PER_ALWAYS)
-
-LOG = logging.getLogger(__name__)
-SHELL_PREFIX = "#!"
-
-
-class ShellScriptPartHandler(handlers.Handler):
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_ALWAYS)
- self.script_dir = paths.get_ipath_cur('scripts')
- if 'script_path' in _kwargs:
- self.script_dir = paths.get_ipath_cur(_kwargs['script_path'])
-
- def list_types(self):
- return [
- handlers.type_from_starts_with(SHELL_PREFIX),
- ]
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- # TODO(harlowja): maybe delete existing things here
- return
-
- filename = util.clean_filename(filename)
- payload = util.dos2unix(payload)
- path = os.path.join(self.script_dir, filename)
- util.write_file(path, payload, 0o700)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
deleted file mode 100644
index ab381e00..00000000
--- a/cloudinit/handlers/upstart_job.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-import os
-import re
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-from cloudinit.settings import (PER_INSTANCE)
-
-LOG = logging.getLogger(__name__)
-UPSTART_PREFIX = "#upstart-job"
-
-
-class UpstartJobPartHandler(handlers.Handler):
- def __init__(self, paths, **_kwargs):
- handlers.Handler.__init__(self, PER_INSTANCE)
- self.upstart_dir = paths.upstart_conf_d
-
- def list_types(self):
- return [
- handlers.type_from_starts_with(UPSTART_PREFIX),
- ]
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- if ctype in handlers.CONTENT_SIGNALS:
- return
-
- # See: https://bugs.launchpad.net/bugs/819507
- if frequency != PER_INSTANCE:
- return
-
- if not self.upstart_dir:
- return
-
- filename = util.clean_filename(filename)
- (_name, ext) = os.path.splitext(filename)
- if not ext:
- ext = ''
- ext = ext.lower()
- if ext != ".conf":
- filename = filename + ".conf"
-
- payload = util.dos2unix(payload)
- path = os.path.join(self.upstart_dir, filename)
- util.write_file(path, payload, 0o644)
-
- if SUITABLE_UPSTART:
- util.subp(["initctl", "reload-configuration"], capture=False)
-
-
-def _has_suitable_upstart():
- # (LP: #1124384)
- # a bug in upstart means that invoking reload-configuration
- # at this stage in boot causes havoc. So, try to determine if upstart
- # is installed, and reloading configuration is OK.
- if not os.path.exists("/sbin/initctl"):
- return False
- try:
- (version_out, _err) = util.subp(["initctl", "version"])
- except Exception:
- util.logexc(LOG, "initctl version failed")
- return False
-
- # expecting 'initctl version' to output something like: init (upstart X.Y)
- if re.match("upstart 1.[0-7][)]", version_out):
- return False
- if "upstart 0." in version_out:
- return False
- elif "upstart 1.8" in version_out:
- if not os.path.exists("/usr/bin/dpkg-query"):
- return False
- try:
- (dpkg_ver, _err) = util.subp(["dpkg-query",
- "--showformat=${Version}",
- "--show", "upstart"], rcs=[0, 1])
- except Exception:
- util.logexc(LOG, "dpkg-query failed")
- return False
-
- try:
- good = "1.8-0ubuntu1.2"
- util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good])
- return True
- except util.ProcessExecutionError as e:
- if e.exit_code is 1:
- pass
- else:
- util.logexc(LOG, "dpkg --compare-versions failed [%s]",
- e.exit_code)
- except Exception as e:
- util.logexc(LOG, "dpkg --compare-versions failed")
- return False
- else:
- return True
-
-SUITABLE_UPSTART = _has_suitable_upstart()
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
deleted file mode 100644
index fb95babc..00000000
--- a/cloudinit/helpers.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from time import time
-
-import contextlib
-import os
-
-import six
-from six.moves.configparser import (
- NoSectionError, NoOptionError, RawConfigParser)
-
-from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
- CFG_ENV_NAME)
-
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class LockFailure(Exception):
- pass
-
-
-class DummyLock(object):
- pass
-
-
-class DummySemaphores(object):
- def __init__(self):
- pass
-
- @contextlib.contextmanager
- def lock(self, _name, _freq, _clear_on_fail=False):
- yield DummyLock()
-
- def has_run(self, _name, _freq):
- return False
-
- def clear(self, _name, _freq):
- return True
-
- def clear_all(self):
- pass
-
-
-class FileLock(object):
- def __init__(self, fn):
- self.fn = fn
-
- def __str__(self):
- return "<%s using file %r>" % (type_utils.obj_name(self), self.fn)
-
-
-def canon_sem_name(name):
- return name.replace("-", "_")
-
-
-class FileSemaphores(object):
- def __init__(self, sem_path):
- self.sem_path = sem_path
-
- @contextlib.contextmanager
- def lock(self, name, freq, clear_on_fail=False):
- name = canon_sem_name(name)
- try:
- yield self._acquire(name, freq)
- except Exception:
- if clear_on_fail:
- self.clear(name, freq)
- raise
-
- def clear(self, name, freq):
- name = canon_sem_name(name)
- sem_file = self._get_path(name, freq)
- try:
- util.del_file(sem_file)
- except (IOError, OSError):
- util.logexc(LOG, "Failed deleting semaphore %s", sem_file)
- return False
- return True
-
- def clear_all(self):
- try:
- util.del_dir(self.sem_path)
- except (IOError, OSError):
- util.logexc(LOG, "Failed deleting semaphore directory %s",
- self.sem_path)
-
- def _acquire(self, name, freq):
- # Check again if its been already gotten
- if self.has_run(name, freq):
- return None
- # This is a race condition since nothing atomic is happening
- # here, but this should be ok due to the nature of when
- # and where cloud-init runs... (file writing is not a lock...)
- sem_file = self._get_path(name, freq)
- contents = "%s: %s\n" % (os.getpid(), time())
- try:
- util.write_file(sem_file, contents)
- except (IOError, OSError):
- util.logexc(LOG, "Failed writing semaphore file %s", sem_file)
- return None
- return FileLock(sem_file)
-
- def has_run(self, name, freq):
- if not freq or freq == PER_ALWAYS:
- return False
-
- cname = canon_sem_name(name)
- sem_file = self._get_path(cname, freq)
- # This isn't really a good atomic check
- # but it suffices for where and when cloudinit runs
- if os.path.exists(sem_file):
- return True
-
- # this case could happen if the migrator module hadn't run yet
- # but the item had run before we did canon_sem_name.
- if cname != name and os.path.exists(self._get_path(name, freq)):
- LOG.warn("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. "
- "It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator"
- % (name, cname))
- return True
-
- return False
-
- def _get_path(self, name, freq):
- sem_path = self.sem_path
- if not freq or freq == PER_INSTANCE:
- return os.path.join(sem_path, name)
- else:
- return os.path.join(sem_path, "%s.%s" % (name, freq))
-
-
-class Runners(object):
- def __init__(self, paths):
- self.paths = paths
- self.sems = {}
-
- def _get_sem(self, freq):
- if freq == PER_ALWAYS or not freq:
- return None
- sem_path = None
- if freq == PER_INSTANCE:
- # This may not exist,
- # so thats why we still check for none
- # below if say the paths object
- # doesn't have a datasource that can
- # provide this instance path...
- sem_path = self.paths.get_ipath("sem")
- elif freq == PER_ONCE:
- sem_path = self.paths.get_cpath("sem")
- if not sem_path:
- return None
- if sem_path not in self.sems:
- self.sems[sem_path] = FileSemaphores(sem_path)
- return self.sems[sem_path]
-
- def run(self, name, functor, args, freq=None, clear_on_fail=False):
- sem = self._get_sem(freq)
- if not sem:
- sem = DummySemaphores()
- if not args:
- args = []
- if sem.has_run(name, freq):
- LOG.debug("%s already ran (freq=%s)", name, freq)
- return (False, None)
- with sem.lock(name, freq, clear_on_fail) as lk:
- if not lk:
- raise LockFailure("Failed to acquire lock for %s" % name)
- else:
- LOG.debug("Running %s using lock (%s)", name, lk)
- if isinstance(args, (dict)):
- results = functor(**args)
- else:
- results = functor(*args)
- return (True, results)
-
-
-class ConfigMerger(object):
- def __init__(self, paths=None, datasource=None,
- additional_fns=None, base_cfg=None,
- include_vendor=True):
- self._paths = paths
- self._ds = datasource
- self._fns = additional_fns
- self._base_cfg = base_cfg
- self._include_vendor = include_vendor
- # Created on first use
- self._cfg = None
-
- def _get_datasource_configs(self):
- d_cfgs = []
- if self._ds:
- try:
- ds_cfg = self._ds.get_config_obj()
- if ds_cfg and isinstance(ds_cfg, (dict)):
- d_cfgs.append(ds_cfg)
- except Exception:
- util.logexc(LOG, "Failed loading of datasource config object "
- "from %s", self._ds)
- return d_cfgs
-
- def _get_env_configs(self):
- e_cfgs = []
- if CFG_ENV_NAME in os.environ:
- e_fn = os.environ[CFG_ENV_NAME]
- try:
- e_cfgs.append(util.read_conf(e_fn))
- except Exception:
- util.logexc(LOG, 'Failed loading of env. config from %s',
- e_fn)
- return e_cfgs
-
- def _get_instance_configs(self):
- i_cfgs = []
- # If cloud-config was written, pick it up as
- # a configuration file to use when running...
- if not self._paths:
- return i_cfgs
-
- cc_paths = ['cloud_config']
- if self._include_vendor:
- cc_paths.append('vendor_cloud_config')
-
- for cc_p in cc_paths:
- cc_fn = self._paths.get_ipath_cur(cc_p)
- if cc_fn and os.path.isfile(cc_fn):
- try:
- i_cfgs.append(util.read_conf(cc_fn))
- except Exception:
- util.logexc(LOG, 'Failed loading of cloud-config from %s',
- cc_fn)
- return i_cfgs
-
- def _read_cfg(self):
- # Input config files override
- # env config files which
- # override instance configs
- # which override datasource
- # configs which override
- # base configuration
- cfgs = []
- if self._fns:
- for c_fn in self._fns:
- try:
- cfgs.append(util.read_conf(c_fn))
- except Exception:
- util.logexc(LOG, "Failed loading of configuration from %s",
- c_fn)
-
- cfgs.extend(self._get_env_configs())
- cfgs.extend(self._get_instance_configs())
- cfgs.extend(self._get_datasource_configs())
- if self._base_cfg:
- cfgs.append(self._base_cfg)
- return util.mergemanydict(cfgs)
-
- @property
- def cfg(self):
- # None check to avoid empty case causing re-reading
- if self._cfg is None:
- self._cfg = self._read_cfg()
- return self._cfg
-
-
-class ContentHandlers(object):
-
- def __init__(self):
- self.registered = {}
- self.initialized = []
-
- def __contains__(self, item):
- return self.is_registered(item)
-
- def __getitem__(self, key):
- return self._get_handler(key)
-
- def is_registered(self, content_type):
- return content_type in self.registered
-
- def register(self, mod, initialized=False, overwrite=True):
- types = set()
- for t in mod.list_types():
- if overwrite:
- types.add(t)
- else:
- if not self.is_registered(t):
- types.add(t)
- for t in types:
- self.registered[t] = mod
- if initialized and mod not in self.initialized:
- self.initialized.append(mod)
- return types
-
- def _get_handler(self, content_type):
- return self.registered[content_type]
-
- def items(self):
- return list(self.registered.items())
-
-
-class Paths(object):
- def __init__(self, path_cfgs, ds=None):
- self.cfgs = path_cfgs
- # Populate all the initial paths
- self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud')
- self.run_dir = path_cfgs.get('run_dir', '/run/cloud-init')
- self.instance_link = os.path.join(self.cloud_dir, 'instance')
- self.boot_finished = os.path.join(self.instance_link, "boot-finished")
- self.upstart_conf_d = path_cfgs.get('upstart_dir')
- self.seed_dir = os.path.join(self.cloud_dir, 'seed')
- # This one isn't joined, since it should just be read-only
- template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
- self.template_tpl = os.path.join(template_dir, '%s.tmpl')
- self.lookups = {
- "handlers": "handlers",
- "scripts": "scripts",
- "vendor_scripts": "scripts/vendor",
- "sem": "sem",
- "boothooks": "boothooks",
- "userdata_raw": "user-data.txt",
- "userdata": "user-data.txt.i",
- "obj_pkl": "obj.pkl",
- "cloud_config": "cloud-config.txt",
- "vendor_cloud_config": "vendor-cloud-config.txt",
- "data": "data",
- "vendordata_raw": "vendor-data.txt",
- "vendordata": "vendor-data.txt.i",
- "instance_id": ".instance-id",
- }
- # Set when a datasource becomes active
- self.datasource = ds
-
- # get_ipath_cur: get the current instance path for an item
- def get_ipath_cur(self, name=None):
- return self._get_path(self.instance_link, name)
-
- # get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
- # for a name in dirmap
- def get_cpath(self, name=None):
- return self._get_path(self.cloud_dir, name)
-
- # _get_ipath : get the instance path for a name in pathmap
- # (/var/lib/cloud/instances/<instance>/<name>)
- def _get_ipath(self, name=None):
- if not self.datasource:
- return None
- iid = self.datasource.get_instance_id()
- if iid is None:
- return None
- path_safe_iid = str(iid).replace(os.sep, '_')
- ipath = os.path.join(self.cloud_dir, 'instances', path_safe_iid)
- add_on = self.lookups.get(name)
- if add_on:
- ipath = os.path.join(ipath, add_on)
- return ipath
-
- # get_ipath : get the instance path for a name in pathmap
- # (/var/lib/cloud/instances/<instance>/<name>)
- # returns None + warns if no active datasource....
- def get_ipath(self, name=None):
- ipath = self._get_ipath(name)
- if not ipath:
- LOG.warn(("No per instance data available, "
- "is there an datasource/iid set?"))
- return None
- else:
- return ipath
-
- def _get_path(self, base, name=None):
- if name is None:
- return base
- return os.path.join(base, self.lookups[name])
-
- def get_runpath(self, name=None):
- return self._get_path(self.run_dir, name)
-
-
-# This config parser will not throw when sections don't exist
-# and you are setting values on those sections which is useful
-# when writing to new options that may not have corresponding
-# sections. Also it can default other values when doing gets
-# so that if those sections/options do not exist you will
-# get a default instead of an error. Another useful case where
-# you can avoid catching exceptions that you typically don't
-# care about...
-
-class DefaultingConfigParser(RawConfigParser):
- DEF_INT = 0
- DEF_FLOAT = 0.0
- DEF_BOOLEAN = False
- DEF_BASE = None
-
- def get(self, section, option):
- value = self.DEF_BASE
- try:
- value = RawConfigParser.get(self, section, option)
- except NoSectionError:
- pass
- except NoOptionError:
- pass
- return value
-
- def set(self, section, option, value=None):
- if not self.has_section(section) and section.lower() != 'default':
- self.add_section(section)
- RawConfigParser.set(self, section, option, value)
-
- def remove_option(self, section, option):
- if self.has_option(section, option):
- RawConfigParser.remove_option(self, section, option)
-
- def getboolean(self, section, option):
- if not self.has_option(section, option):
- return self.DEF_BOOLEAN
- return RawConfigParser.getboolean(self, section, option)
-
- def getfloat(self, section, option):
- if not self.has_option(section, option):
- return self.DEF_FLOAT
- return RawConfigParser.getfloat(self, section, option)
-
- def getint(self, section, option):
- if not self.has_option(section, option):
- return self.DEF_INT
- return RawConfigParser.getint(self, section, option)
-
- def stringify(self, header=None):
- contents = ''
- with six.StringIO() as outputstream:
- self.write(outputstream)
- outputstream.flush()
- contents = outputstream.getvalue()
- if header:
- contents = "\n".join([header, contents])
- return contents
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
deleted file mode 100644
index fb57253c..00000000
--- a/cloudinit/importer.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-
-def import_module(module_name):
- __import__(module_name)
- return sys.modules[module_name]
-
-
-def find_module(base_name, search_paths, required_attrs=None):
- if not required_attrs:
- required_attrs = []
- # NOTE(harlowja): translate the search paths to include the base name.
- lookup_paths = []
- for path in search_paths:
- real_path = []
- if path:
- real_path.extend(path.split("."))
- real_path.append(base_name)
- full_path = '.'.join(real_path)
- lookup_paths.append(full_path)
- found_paths = []
- for full_path in lookup_paths:
- mod = None
- try:
- mod = import_module(full_path)
- except ImportError:
- pass
- if not mod:
- continue
- found_attrs = 0
- for attr in required_attrs:
- if hasattr(mod, attr):
- found_attrs += 1
- if found_attrs == len(required_attrs):
- found_paths.append(full_path)
- return (found_paths, lookup_paths)
diff --git a/cloudinit/log.py b/cloudinit/log.py
deleted file mode 100644
index 3c79b9c9..00000000
--- a/cloudinit/log.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import logging.config
-import logging.handlers
-
-import collections
-import os
-import sys
-
-import six
-from six import StringIO
-
-# Logging levels for easy access
-CRITICAL = logging.CRITICAL
-FATAL = logging.FATAL
-ERROR = logging.ERROR
-WARNING = logging.WARNING
-WARN = logging.WARN
-INFO = logging.INFO
-DEBUG = logging.DEBUG
-NOTSET = logging.NOTSET
-
-# Default basic format
-DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
-
-
-def setupBasicLogging(level=DEBUG):
- root = logging.getLogger()
- console = logging.StreamHandler(sys.stderr)
- console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
- console.setLevel(level)
- root.addHandler(console)
- root.setLevel(level)
-
-
-def flushLoggers(root):
- if not root:
- return
- for h in root.handlers:
- if isinstance(h, (logging.StreamHandler)):
- try:
- h.flush()
- except IOError:
- pass
- flushLoggers(root.parent)
-
-
-def setupLogging(cfg=None):
- # See if the config provides any logging conf...
- if not cfg:
- cfg = {}
-
- log_cfgs = []
- log_cfg = cfg.get('logcfg')
- if log_cfg and isinstance(log_cfg, six.string_types):
- # If there is a 'logcfg' entry in the config,
- # respect it, it is the old keyname
- log_cfgs.append(str(log_cfg))
- elif "log_cfgs" in cfg:
- for a_cfg in cfg['log_cfgs']:
- if isinstance(a_cfg, six.string_types):
- log_cfgs.append(a_cfg)
- elif isinstance(a_cfg, (collections.Iterable)):
- cfg_str = [str(c) for c in a_cfg]
- log_cfgs.append('\n'.join(cfg_str))
- else:
- log_cfgs.append(str(a_cfg))
-
- # See if any of them actually load...
- am_tried = 0
- for log_cfg in log_cfgs:
- try:
- am_tried += 1
- # Assume its just a string if not a filename
- if log_cfg.startswith("/") and os.path.isfile(log_cfg):
- # Leave it as a file and do not make it look like
- # something that is a file (but is really a buffer that
- # is acting as a file)
- pass
- else:
- log_cfg = StringIO(log_cfg)
- # Attempt to load its config
- logging.config.fileConfig(log_cfg)
- # The first one to work wins!
- return
- except Exception:
- # We do not write any logs of this here, because the default
- # configuration includes an attempt at using /dev/log, followed
- # up by writing to a file. /dev/log will not exist in very early
- # boot, so an exception on that is expected.
- pass
-
- # If it didn't work, at least setup a basic logger (if desired)
- basic_enabled = cfg.get('log_basic', True)
-
- sys.stderr.write(("WARN: no logging configured!"
- " (tried %s configs)\n") % (am_tried))
- if basic_enabled:
- sys.stderr.write("Setting up basic logging...\n")
- setupBasicLogging()
-
-
-def getLogger(name='cloudinit'):
- return logging.getLogger(name)
-
-
-# Fixes this annoyance...
-# No handlers could be found for logger XXX annoying output...
-try:
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-
-def _resetLogger(log):
- if not log:
- return
- handlers = list(log.handlers)
- for h in handlers:
- h.flush()
- h.close()
- log.removeHandler(h)
- log.setLevel(NOTSET)
- log.addHandler(NullHandler())
-
-
-def resetLogging():
- _resetLogger(logging.getLogger())
- _resetLogger(getLogger())
-
-
-resetLogging()
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
deleted file mode 100644
index e13f55ac..00000000
--- a/cloudinit/mergers/__init__.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-import six
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-
-NAME_MTCH = re.compile(r"(^[a-zA-Z_][A-Za-z0-9_]*)\((.*?)\)$")
-
-LOG = logging.getLogger(__name__)
-DEF_MERGE_TYPE = "list()+dict()+str()"
-MERGER_PREFIX = 'm_'
-MERGER_ATTR = 'Merger'
-
-
-class UnknownMerger(object):
- # Named differently so auto-method finding
- # doesn't pick this up if there is ever a type
- # named "unknown"
- def _handle_unknown(self, _meth_wanted, value, _merge_with):
- return value
-
- # This merging will attempt to look for a '_on_X' method
- # in our own object for a given object Y with type X,
- # if found it will be called to perform the merge of a source
- # object and a object to merge_with.
- #
- # If not found the merge will be given to a '_handle_unknown'
- # function which can decide what to do wit the 2 values.
- def merge(self, source, merge_with):
- type_name = type_utils.obj_name(source)
- type_name = type_name.lower()
- method_name = "_on_%s" % (type_name)
- meth = None
- args = [source, merge_with]
- if hasattr(self, method_name):
- meth = getattr(self, method_name)
- if not meth:
- meth = self._handle_unknown
- args.insert(0, method_name)
- return meth(*args)
-
-
-class LookupMerger(UnknownMerger):
- def __init__(self, lookups=None):
- UnknownMerger.__init__(self)
- if lookups is None:
- self._lookups = []
- else:
- self._lookups = lookups
-
- def __str__(self):
- return 'LookupMerger: (%s)' % (len(self._lookups))
-
- # For items which can not be merged by the parent this object
- # will lookup in a internally maintained set of objects and
- # find which one of those objects can perform the merge. If
- # any of the contained objects have the needed method, they
- # will be called to perform the merge.
- def _handle_unknown(self, meth_wanted, value, merge_with):
- meth = None
- for merger in self._lookups:
- if hasattr(merger, meth_wanted):
- # First one that has that method/attr gets to be
- # the one that will be called
- meth = getattr(merger, meth_wanted)
- break
- if not meth:
- return UnknownMerger._handle_unknown(self, meth_wanted,
- value, merge_with)
- return meth(value, merge_with)
-
-
-def dict_extract_mergers(config):
- parsed_mergers = []
- raw_mergers = config.pop('merge_how', None)
- if raw_mergers is None:
- raw_mergers = config.pop('merge_type', None)
- if raw_mergers is None:
- return parsed_mergers
- if isinstance(raw_mergers, six.string_types):
- return string_extract_mergers(raw_mergers)
- for m in raw_mergers:
- if isinstance(m, (dict)):
- name = m['name']
- name = name.replace("-", "_").strip()
- opts = m['settings']
- else:
- name = m[0]
- if len(m) >= 2:
- opts = m[1:]
- else:
- opts = []
- if name:
- parsed_mergers.append((name, opts))
- return parsed_mergers
-
-
-def string_extract_mergers(merge_how):
- parsed_mergers = []
- for m_name in merge_how.split("+"):
- # Canonicalize the name (so that it can be found
- # even when users alter it in various ways)
- m_name = m_name.lower().strip()
- m_name = m_name.replace("-", "_")
- if not m_name:
- continue
- match = NAME_MTCH.match(m_name)
- if not match:
- msg = ("Matcher identifer '%s' is not in the right format" %
- (m_name))
- raise ValueError(msg)
- (m_name, m_ops) = match.groups()
- m_ops = m_ops.strip().split(",")
- m_ops = [m.strip().lower() for m in m_ops if m.strip()]
- parsed_mergers.append((m_name, m_ops))
- return parsed_mergers
-
-
-def default_mergers():
- return tuple(string_extract_mergers(DEF_MERGE_TYPE))
-
-
-def construct(parsed_mergers):
- mergers_to_be = []
- for (m_name, m_ops) in parsed_mergers:
- if not m_name.startswith(MERGER_PREFIX):
- m_name = MERGER_PREFIX + str(m_name)
- merger_locs, looked_locs = importer.find_module(m_name,
- [__name__],
- [MERGER_ATTR])
- if not merger_locs:
- msg = ("Could not find merger module named '%s' "
- "with attribute '%s' (searched %s)") % (m_name,
- MERGER_ATTR,
- looked_locs)
- raise ImportError(msg)
- else:
- mod = importer.import_module(merger_locs[0])
- mod_attr = getattr(mod, MERGER_ATTR)
- mergers_to_be.append((mod_attr, m_ops))
- # Now form them...
- mergers = []
- root = LookupMerger(mergers)
- for (attr, opts) in mergers_to_be:
- mergers.append(attr(root, opts))
- return root
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
deleted file mode 100644
index 87cf1a72..00000000
--- a/cloudinit/mergers/m_dict.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-DEF_MERGE_TYPE = 'no_replace'
-MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
-
-
-def _has_any(what, *keys):
- for k in keys:
- if k in what:
- return True
- return False
-
-
-class Merger(object):
- def __init__(self, merger, opts):
- self._merger = merger
- # Affects merging behavior...
- self._method = DEF_MERGE_TYPE
- for m in MERGE_TYPES:
- if m in opts:
- self._method = m
- break
- # Affect how recursive merging is done on other primitives.
- self._recurse_str = 'recurse_str' in opts
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
- self._allow_delete = 'allow_delete' in opts
- # Backwards compat require this to be on.
- self._recurse_dict = True
-
- def __str__(self):
- s = ('DictMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s,allow_delete=%s)')
- s = s % (self._method, self._recurse_str,
- self._recurse_dict, self._recurse_array, self._allow_delete)
- return s
-
- def _do_dict_replace(self, value, merge_with, do_replace):
-
- def merge_same_key(old_v, new_v):
- if do_replace:
- return new_v
- if isinstance(new_v, (list, tuple)) and self._recurse_array:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, (dict)) and self._recurse_dict:
- return self._merger.merge(old_v, new_v)
- # Otherwise leave it be...
- return old_v
-
- for (k, v) in merge_with.items():
- if k in value:
- if v is None and self._allow_delete:
- value.pop(k)
- else:
- value[k] = merge_same_key(value[k], v)
- else:
- value[k] = v
- return value
-
- def _on_dict(self, value, merge_with):
- if not isinstance(merge_with, (dict)):
- return value
- if self._method == 'replace':
- merged = self._do_dict_replace(dict(value), merge_with, True)
- elif self._method == 'no_replace':
- merged = self._do_dict_replace(dict(value), merge_with, False)
- else:
- raise NotImplementedError("Unknown merge type %s" % (self._method))
- return merged
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
deleted file mode 100644
index 81e5c580..00000000
--- a/cloudinit/mergers/m_list.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-DEF_MERGE_TYPE = 'replace'
-MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
-
-
-def _has_any(what, *keys):
- for k in keys:
- if k in what:
- return True
- return False
-
-
-class Merger(object):
- def __init__(self, merger, opts):
- self._merger = merger
- # Affects merging behavior...
- self._method = DEF_MERGE_TYPE
- for m in MERGE_TYPES:
- if m in opts:
- self._method = m
- break
- # Affect how recursive merging is done on other primitives
- self._recurse_str = _has_any(opts, 'recurse_str')
- self._recurse_dict = _has_any(opts, 'recurse_dict')
- self._recurse_array = _has_any(opts, 'recurse_array', 'recurse_list')
-
- def __str__(self):
- return ('ListMerger: (method=%s,recurse_str=%s,'
- 'recurse_dict=%s,recurse_array=%s)') % (self._method,
- self._recurse_str,
- self._recurse_dict,
- self._recurse_array)
-
- def _on_tuple(self, value, merge_with):
- return tuple(self._on_list(list(value), merge_with))
-
- def _on_list(self, value, merge_with):
- if (self._method == 'replace' and
- not isinstance(merge_with, (tuple, list))):
- return merge_with
-
- # Ok we now know that what we are merging with is a list or tuple.
- merged_list = []
- if self._method == 'prepend':
- merged_list.extend(merge_with)
- merged_list.extend(value)
- return merged_list
- elif self._method == 'append':
- merged_list.extend(value)
- merged_list.extend(merge_with)
- return merged_list
-
- def merge_same_index(old_v, new_v):
- if self._method == 'no_replace':
- # Leave it be...
- return old_v
- if isinstance(new_v, (list, tuple)) and self._recurse_array:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, six.string_types) and self._recurse_str:
- return self._merger.merge(old_v, new_v)
- if isinstance(new_v, (dict)) and self._recurse_dict:
- return self._merger.merge(old_v, new_v)
- return new_v
-
- # Ok now we are replacing same indexes
- merged_list.extend(value)
- common_len = min(len(merged_list), len(merge_with))
- for i in range(0, common_len):
- merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
- return merged_list
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
deleted file mode 100644
index b00c4bf3..00000000
--- a/cloudinit/mergers/m_str.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# -*- coding: utf-8 -*-
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-
-class Merger(object):
- def __init__(self, _merger, opts):
- self._append = 'append' in opts
-
- def __str__(self):
- return 'StringMerger: (append=%s)' % (self._append)
-
- # On encountering a unicode object to merge value with
- # we will for now just proxy into the string method to let it handle it.
- def _on_unicode(self, value, merge_with):
- return self._on_str(value, merge_with)
-
- # On encountering a string object to merge with we will
- # perform the following action, if appending we will
- # merge them together, otherwise we will just return value.
- def _on_str(self, value, merge_with):
- if not isinstance(value, six.string_types):
- return merge_with
- if not self._append:
- return merge_with
- if isinstance(value, six.text_type):
- return value + six.text_type(merge_with)
- else:
- return value + six.binary_type(merge_with)
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
deleted file mode 100644
index 21cc602b..00000000
--- a/cloudinit/net/__init__.py
+++ /dev/null
@@ -1,371 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Blake Rouse <blake.rouse@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import logging
-import os
-import re
-
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-SYS_CLASS_NET = "/sys/class/net/"
-DEFAULT_PRIMARY_INTERFACE = 'eth0'
-
-
-def sys_dev_path(devname, path=""):
- return SYS_CLASS_NET + devname + "/" + path
-
-
-def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None):
- try:
- contents = util.load_file(sys_dev_path(devname, path))
- except (OSError, IOError) as e:
- if getattr(e, 'errno', None) == errno.ENOENT:
- if enoent is not None:
- return enoent
- raise
- contents = contents.strip()
- if translate is None:
- return contents
- try:
- return translate.get(contents)
- except KeyError:
- LOG.debug("found unexpected value '%s' in '%s/%s'", contents,
- devname, path)
- if keyerror is not None:
- return keyerror
- raise
-
-
-def is_up(devname):
- # The linux kernel says to consider devices in 'unknown'
- # operstate as up for the purposes of network configuration. See
- # Documentation/networking/operstates.txt in the kernel source.
- translate = {'up': True, 'unknown': True, 'down': False}
- return read_sys_net(devname, "operstate", enoent=False, keyerror=False,
- translate=translate)
-
-
-def is_wireless(devname):
- return os.path.exists(sys_dev_path(devname, "wireless"))
-
-
-def is_connected(devname):
- # is_connected isn't really as simple as that. 2 is
- # 'physically connected'. 3 is 'not connected'. but a wlan interface will
- # always show 3.
- try:
- iflink = read_sys_net(devname, "iflink", enoent=False)
- if iflink == "2":
- return True
- if not is_wireless(devname):
- return False
- LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname)
-
- return read_sys_net(devname, "carrier", enoent=False, keyerror=False,
- translate={'0': False, '1': True})
-
- except IOError as e:
- if e.errno == errno.EINVAL:
- return False
- raise
-
-
-def is_physical(devname):
- return os.path.exists(sys_dev_path(devname, "device"))
-
-
-def is_present(devname):
- return os.path.exists(sys_dev_path(devname))
-
-
-def get_devicelist():
- return os.listdir(SYS_CLASS_NET)
-
-
-class ParserError(Exception):
- """Raised when a parser has issue parsing a file/content."""
-
-
-def is_disabled_cfg(cfg):
- if not cfg or not isinstance(cfg, dict):
- return False
- return cfg.get('config') == "disabled"
-
-
-def sys_netdev_info(name, field):
- if not os.path.exists(os.path.join(SYS_CLASS_NET, name)):
- raise OSError("%s: interface does not exist in %s" %
- (name, SYS_CLASS_NET))
- fname = os.path.join(SYS_CLASS_NET, name, field)
- if not os.path.exists(fname):
- raise OSError("%s: could not find sysfs entry: %s" % (name, fname))
- data = util.load_file(fname)
- if data[-1] == '\n':
- data = data[:-1]
- return data
-
-
-def generate_fallback_config():
- """Determine which attached net dev is most likely to have a connection and
- generate network state to run dhcp on that interface"""
- # by default use eth0 as primary interface
- nconf = {'config': [], 'version': 1}
-
- # get list of interfaces that could have connections
- invalid_interfaces = set(['lo'])
- potential_interfaces = set(get_devicelist())
- potential_interfaces = potential_interfaces.difference(invalid_interfaces)
- # sort into interfaces with carrier, interfaces which could have carrier,
- # and ignore interfaces that are definitely disconnected
- connected = []
- possibly_connected = []
- for interface in potential_interfaces:
- if interface.startswith("veth"):
- continue
- if os.path.exists(sys_dev_path(interface, "bridge")):
- # skip any bridges
- continue
- try:
- carrier = int(sys_netdev_info(interface, 'carrier'))
- if carrier:
- connected.append(interface)
- continue
- except OSError:
- pass
- # check if nic is dormant or down, as this may make a nick appear to
- # not have a carrier even though it could acquire one when brought
- # online by dhclient
- try:
- dormant = int(sys_netdev_info(interface, 'dormant'))
- if dormant:
- possibly_connected.append(interface)
- continue
- except OSError:
- pass
- try:
- operstate = sys_netdev_info(interface, 'operstate')
- if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']:
- possibly_connected.append(interface)
- continue
- except OSError:
- pass
-
- # don't bother with interfaces that might not be connected if there are
- # some that definitely are
- if connected:
- potential_interfaces = connected
- else:
- potential_interfaces = possibly_connected
- # if there are no interfaces, give up
- if not potential_interfaces:
- return
- # if eth0 exists use it above anything else, otherwise get the interface
- # that looks 'first'
- if DEFAULT_PRIMARY_INTERFACE in potential_interfaces:
- name = DEFAULT_PRIMARY_INTERFACE
- else:
- name = sorted(potential_interfaces)[0]
-
- mac = sys_netdev_info(name, 'address')
- target_name = name
-
- nconf['config'].append(
- {'type': 'physical', 'name': target_name,
- 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]})
- return nconf
-
-
-def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
- """read the network config and rename devices accordingly.
- if strict_present is false, then do not raise exception if no devices
- match. if strict_busy is false, then do not raise exception if the
- device cannot be renamed because it is currently configured."""
- renames = []
- for ent in netcfg.get('config', {}):
- if ent.get('type') != 'physical':
- continue
- mac = ent.get('mac_address')
- name = ent.get('name')
- if not mac:
- continue
- renames.append([mac, name])
-
- return _rename_interfaces(renames)
-
-
-def _get_current_rename_info(check_downable=True):
- """Collect information necessary for rename_interfaces."""
- names = get_devicelist()
- bymac = {}
- for n in names:
- bymac[get_interface_mac(n)] = {
- 'name': n, 'up': is_up(n), 'downable': None}
-
- if check_downable:
- nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]")
- ipv6, _err = util.subp(['ip', '-6', 'addr', 'show', 'permanent',
- 'scope', 'global'], capture=True)
- ipv4, _err = util.subp(['ip', '-4', 'addr', 'show'], capture=True)
-
- nics_with_addresses = set()
- for bytes_out in (ipv6, ipv4):
- nics_with_addresses.update(nmatch.findall(bytes_out))
-
- for d in bymac.values():
- d['downable'] = (d['up'] is False or
- d['name'] not in nics_with_addresses)
-
- return bymac
-
-
-def _rename_interfaces(renames, strict_present=True, strict_busy=True,
- current_info=None):
-
- if not len(renames):
- LOG.debug("no interfaces to rename")
- return
-
- if current_info is None:
- current_info = _get_current_rename_info()
-
- cur_bymac = {}
- for mac, data in current_info.items():
- cur = data.copy()
- cur['mac'] = mac
- cur_bymac[mac] = cur
-
- def update_byname(bymac):
- return dict((data['name'], data)
- for data in bymac.values())
-
- def rename(cur, new):
- util.subp(["ip", "link", "set", cur, "name", new], capture=True)
-
- def down(name):
- util.subp(["ip", "link", "set", name, "down"], capture=True)
-
- def up(name):
- util.subp(["ip", "link", "set", name, "up"], capture=True)
-
- ops = []
- errors = []
- ups = []
- cur_byname = update_byname(cur_bymac)
- tmpname_fmt = "cirename%d"
- tmpi = -1
-
- for mac, new_name in renames:
- cur = cur_bymac.get(mac, {})
- cur_name = cur.get('name')
- cur_ops = []
- if cur_name == new_name:
- # nothing to do
- continue
-
- if not cur_name:
- if strict_present:
- errors.append(
- "[nic not present] Cannot rename mac=%s to %s"
- ", not available." % (mac, new_name))
- continue
-
- if cur['up']:
- msg = "[busy] Error renaming mac=%s from %s to %s"
- if not cur['downable']:
- if strict_busy:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- cur['up'] = False
- cur_ops.append(("down", mac, new_name, (cur_name,)))
- ups.append(("up", mac, new_name, (new_name,)))
-
- if new_name in cur_byname:
- target = cur_byname[new_name]
- if target['up']:
- msg = "[busy-target] Error renaming mac=%s from %s to %s."
- if not target['downable']:
- if strict_busy:
- errors.append(msg % (mac, cur_name, new_name))
- continue
- else:
- cur_ops.append(("down", mac, new_name, (new_name,)))
-
- tmp_name = None
- while tmp_name is None or tmp_name in cur_byname:
- tmpi += 1
- tmp_name = tmpname_fmt % tmpi
-
- cur_ops.append(("rename", mac, new_name, (new_name, tmp_name)))
- target['name'] = tmp_name
- cur_byname = update_byname(cur_bymac)
- if target['up']:
- ups.append(("up", mac, new_name, (tmp_name,)))
-
- cur_ops.append(("rename", mac, new_name, (cur['name'], new_name)))
- cur['name'] = new_name
- cur_byname = update_byname(cur_bymac)
- ops += cur_ops
-
- opmap = {'rename': rename, 'down': down, 'up': up}
-
- if len(ops) + len(ups) == 0:
- if len(errors):
- LOG.debug("unable to do any work for renaming of %s", renames)
- else:
- LOG.debug("no work necessary for renaming of %s", renames)
- else:
- LOG.debug("achieving renaming of %s with ops %s", renames, ops + ups)
-
- for op, mac, new_name, params in ops + ups:
- try:
- opmap.get(op)(*params)
- except Exception as e:
- errors.append(
- "[unknown] Error performing %s%s for %s, %s: %s" %
- (op, params, mac, new_name, e))
-
- if len(errors):
- raise Exception('\n'.join(errors))
-
-
-def get_interface_mac(ifname):
- """Returns the string value of an interface's MAC Address"""
- return read_sys_net(ifname, "address", enoent=False)
-
-
-def get_interfaces_by_mac(devs=None):
- """Build a dictionary of tuples {mac: name}"""
- if devs is None:
- try:
- devs = get_devicelist()
- except OSError as e:
- if e.errno == errno.ENOENT:
- devs = []
- else:
- raise
- ret = {}
- for name in devs:
- mac = get_interface_mac(name)
- # some devices may not have a mac (tun0)
- if mac:
- ret[mac] = name
- return ret
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
deleted file mode 100644
index 822a020b..00000000
--- a/cloudinit/net/cmdline.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Blake Rouse <blake.rouse@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import glob
-import gzip
-import io
-import shlex
-import sys
-
-import six
-
-from . import get_devicelist
-from . import sys_netdev_info
-
-from cloudinit import util
-
-PY26 = sys.version_info[0:2] == (2, 6)
-
-
-def _shlex_split(blob):
- if PY26 and isinstance(blob, six.text_type):
- # Older versions don't support unicode input
- blob = blob.encode("utf8")
- return shlex.split(blob)
-
-
-def _load_shell_content(content, add_empty=False, empty_val=None):
- """Given shell like syntax (key=value\nkey2=value2\n) in content
- return the data in dictionary form. If 'add_empty' is True
- then add entries in to the returned dictionary for 'VAR='
- variables. Set their value to empty_val."""
- data = {}
- for line in _shlex_split(content):
- key, value = line.split("=", 1)
- if not value:
- value = empty_val
- if add_empty or value:
- data[key] = value
-
- return data
-
-
-def _klibc_to_config_entry(content, mac_addrs=None):
- """Convert a klibc writtent shell content file to a 'config' entry
- When ip= is seen on the kernel command line in debian initramfs
- and networking is brought up, ipconfig will populate
- /run/net-<name>.cfg.
-
- The files are shell style syntax, and examples are in the tests
- provided here. There is no good documentation on this unfortunately.
-
- DEVICE=<name> is expected/required and PROTO should indicate if
- this is 'static' or 'dhcp'.
- """
-
- if mac_addrs is None:
- mac_addrs = {}
-
- data = _load_shell_content(content)
- try:
- name = data['DEVICE']
- except KeyError:
- raise ValueError("no 'DEVICE' entry in data")
-
- # ipconfig on precise does not write PROTO
- proto = data.get('PROTO')
- if not proto:
- if data.get('filename'):
- proto = 'dhcp'
- else:
- proto = 'static'
-
- if proto not in ('static', 'dhcp'):
- raise ValueError("Unexpected value for PROTO: %s" % proto)
-
- iface = {
- 'type': 'physical',
- 'name': name,
- 'subnets': [],
- }
-
- if name in mac_addrs:
- iface['mac_address'] = mac_addrs[name]
-
- # originally believed there might be IPV6* values
- for v, pre in (('ipv4', 'IPV4'),):
- # if no IPV4ADDR or IPV6ADDR, then go on.
- if pre + "ADDR" not in data:
- continue
- subnet = {'type': proto, 'control': 'manual'}
-
- # these fields go right on the subnet
- for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
- if pre + key in data:
- subnet[key.lower()] = data[pre + key]
-
- dns = []
- # handle IPV4DNS0 or IPV6DNS0
- for nskey in ('DNS0', 'DNS1'):
- ns = data.get(pre + nskey)
- # verify it has something other than 0.0.0.0 (or ipv6)
- if ns and len(ns.strip(":.0")):
- dns.append(data[pre + nskey])
- if dns:
- subnet['dns_nameservers'] = dns
- # add search to both ipv4 and ipv6, as it has no namespace
- search = data.get('DOMAINSEARCH')
- if search:
- if ',' in search:
- subnet['dns_search'] = search.split(",")
- else:
- subnet['dns_search'] = search.split()
-
- iface['subnets'].append(subnet)
-
- return name, iface
-
-
-def config_from_klibc_net_cfg(files=None, mac_addrs=None):
- if files is None:
- files = glob.glob('/run/net*.conf')
-
- entries = []
- names = {}
- for cfg_file in files:
- name, entry = _klibc_to_config_entry(util.load_file(cfg_file),
- mac_addrs=mac_addrs)
- if name in names:
- raise ValueError(
- "device '%s' defined multiple times: %s and %s" % (
- name, names[name], cfg_file))
-
- names[name] = cfg_file
- entries.append(entry)
- return {'config': entries, 'version': 1}
-
-
-def _decomp_gzip(blob, strict=True):
- # decompress blob. raise exception if not compressed unless strict=False.
- with io.BytesIO(blob) as iobuf:
- gzfp = None
- try:
- gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf)
- return gzfp.read()
- except IOError:
- if strict:
- raise
- return blob
- finally:
- if gzfp:
- gzfp.close()
-
-
-def _b64dgz(b64str, gzipped="try"):
- # decode a base64 string. If gzipped is true, transparently uncompresss
- # if gzipped is 'try', then try gunzip, returning the original on fail.
- try:
- blob = base64.b64decode(b64str)
- except TypeError:
- raise ValueError("Invalid base64 text: %s" % b64str)
-
- if not gzipped:
- return blob
-
- return _decomp_gzip(blob, strict=gzipped != "try")
-
-
-def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None):
- if cmdline is None:
- cmdline = util.get_cmdline()
-
- if 'network-config=' in cmdline:
- data64 = None
- for tok in cmdline.split():
- if tok.startswith("network-config="):
- data64 = tok.split("=", 1)[1]
- if data64:
- return util.load_yaml(_b64dgz(data64))
-
- if 'ip=' not in cmdline:
- return None
-
- if mac_addrs is None:
- mac_addrs = dict((k, sys_netdev_info(k, 'address'))
- for k in get_devicelist())
-
- return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs)
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
deleted file mode 100644
index eff5b924..00000000
--- a/cloudinit/net/eni.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import glob
-import os
-import re
-
-from . import ParserError
-
-from . import renderer
-
-from cloudinit import util
-
-
-NET_CONFIG_COMMANDS = [
- "pre-up", "up", "post-up", "down", "pre-down", "post-down",
-]
-
-NET_CONFIG_BRIDGE_OPTIONS = [
- "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit",
- "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp",
-]
-
-NET_CONFIG_OPTIONS = [
- "address", "netmask", "broadcast", "network", "metric", "gateway",
- "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime",
- "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame",
- "netnum", "endpoint", "local", "ttl",
-]
-
-
-# TODO: switch valid_map based on mode inet/inet6
-def _iface_add_subnet(iface, subnet):
- content = []
- valid_map = [
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'mtu',
- 'scope',
- 'dns_search',
- 'dns_nameservers',
- ]
- for key, value in subnet.items():
- if value and key in valid_map:
- if type(value) == list:
- value = " ".join(value)
- if '_' in key:
- key = key.replace('_', '-')
- content.append(" {0} {1}".format(key, value))
-
- return sorted(content)
-
-
-# TODO: switch to valid_map for attrs
-def _iface_add_attrs(iface, index):
- # If the index is non-zero, this is an alias interface. Alias interfaces
- # represent additional interface addresses, and should not have additional
- # attributes. (extra attributes here are almost always either incorrect,
- # or are applied to the parent interface.) So if this is an alias, stop
- # right here.
- if index != 0:
- return []
- content = []
- ignore_map = [
- 'control',
- 'index',
- 'inet',
- 'mode',
- 'name',
- 'subnets',
- 'type',
- ]
- renames = {'mac_address': 'hwaddress'}
- if iface['type'] not in ['bond', 'bridge', 'vlan']:
- ignore_map.append('mac_address')
-
- for key, value in iface.items():
- if not value or key in ignore_map:
- continue
- if type(value) == list:
- value = " ".join(value)
- content.append(" {0} {1}".format(renames.get(key, key), value))
-
- return sorted(content)
-
-
-def _iface_start_entry(iface, index, render_hwaddress=False):
- fullname = iface['name']
- if index != 0:
- fullname += ":%s" % index
-
- control = iface['control']
- if control == "auto":
- cverb = "auto"
- elif control in ("hotplug",):
- cverb = "allow-" + control
- else:
- cverb = "# control-" + control
-
- subst = iface.copy()
- subst.update({'fullname': fullname, 'cverb': cverb})
-
- lines = [
- "{cverb} {fullname}".format(**subst),
- "iface {fullname} {inet} {mode}".format(**subst)]
- if render_hwaddress and iface.get('mac_address'):
- lines.append(" hwaddress {mac_address}".format(**subst))
-
- return lines
-
-
-def _parse_deb_config_data(ifaces, contents, src_dir, src_path):
- """Parses the file contents, placing result into ifaces.
-
- '_source_path' is added to every dictionary entry to define which file
- the configration information came from.
-
- :param ifaces: interface dictionary
- :param contents: contents of interfaces file
- :param src_dir: directory interfaces file was located
- :param src_path: file path the `contents` was read
- """
- currif = None
- for line in contents.splitlines():
- line = line.strip()
- if line.startswith('#'):
- continue
- split = line.split(' ')
- option = split[0]
- if option == "source-directory":
- parsed_src_dir = split[1]
- if not parsed_src_dir.startswith("/"):
- parsed_src_dir = os.path.join(src_dir, parsed_src_dir)
- for expanded_path in glob.glob(parsed_src_dir):
- dir_contents = os.listdir(expanded_path)
- dir_contents = [
- os.path.join(expanded_path, path)
- for path in dir_contents
- if (os.path.isfile(os.path.join(expanded_path, path)) and
- re.match("^[a-zA-Z0-9_-]+$", path) is not None)
- ]
- for entry in dir_contents:
- with open(entry, "r") as fp:
- src_data = fp.read().strip()
- abs_entry = os.path.abspath(entry)
- _parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_entry), abs_entry)
- elif option == "source":
- new_src_path = split[1]
- if not new_src_path.startswith("/"):
- new_src_path = os.path.join(src_dir, new_src_path)
- for expanded_path in glob.glob(new_src_path):
- with open(expanded_path, "r") as fp:
- src_data = fp.read().strip()
- abs_path = os.path.abspath(expanded_path)
- _parse_deb_config_data(
- ifaces, src_data,
- os.path.dirname(abs_path), abs_path)
- elif option == "auto":
- for iface in split[1:]:
- if iface not in ifaces:
- ifaces[iface] = {
- # Include the source path this interface was found in.
- "_source_path": src_path
- }
- ifaces[iface]['auto'] = True
- elif option == "iface":
- iface, family, method = split[1:4]
- if iface not in ifaces:
- ifaces[iface] = {
- # Include the source path this interface was found in.
- "_source_path": src_path
- }
- elif 'family' in ifaces[iface]:
- raise ParserError(
- "Interface %s can only be defined once. "
- "Re-defined in '%s'." % (iface, src_path))
- ifaces[iface]['family'] = family
- ifaces[iface]['method'] = method
- currif = iface
- elif option == "hwaddress":
- if split[1] == "ether":
- val = split[2]
- else:
- val = split[1]
- ifaces[currif]['hwaddress'] = val
- elif option in NET_CONFIG_OPTIONS:
- ifaces[currif][option] = split[1]
- elif option in NET_CONFIG_COMMANDS:
- if option not in ifaces[currif]:
- ifaces[currif][option] = []
- ifaces[currif][option].append(' '.join(split[1:]))
- elif option.startswith('dns-'):
- if 'dns' not in ifaces[currif]:
- ifaces[currif]['dns'] = {}
- if option == 'dns-search':
- ifaces[currif]['dns']['search'] = []
- for domain in split[1:]:
- ifaces[currif]['dns']['search'].append(domain)
- elif option == 'dns-nameservers':
- ifaces[currif]['dns']['nameservers'] = []
- for server in split[1:]:
- ifaces[currif]['dns']['nameservers'].append(server)
- elif option.startswith('bridge_'):
- if 'bridge' not in ifaces[currif]:
- ifaces[currif]['bridge'] = {}
- if option in NET_CONFIG_BRIDGE_OPTIONS:
- bridge_option = option.replace('bridge_', '', 1)
- ifaces[currif]['bridge'][bridge_option] = split[1]
- elif option == "bridge_ports":
- ifaces[currif]['bridge']['ports'] = []
- for iface in split[1:]:
- ifaces[currif]['bridge']['ports'].append(iface)
- elif option == "bridge_hw" and split[1].lower() == "mac":
- ifaces[currif]['bridge']['mac'] = split[2]
- elif option == "bridge_pathcost":
- if 'pathcost' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['pathcost'] = {}
- ifaces[currif]['bridge']['pathcost'][split[1]] = split[2]
- elif option == "bridge_portprio":
- if 'portprio' not in ifaces[currif]['bridge']:
- ifaces[currif]['bridge']['portprio'] = {}
- ifaces[currif]['bridge']['portprio'][split[1]] = split[2]
- elif option.startswith('bond-'):
- if 'bond' not in ifaces[currif]:
- ifaces[currif]['bond'] = {}
- bond_option = option.replace('bond-', '', 1)
- ifaces[currif]['bond'][bond_option] = split[1]
- for iface in ifaces.keys():
- if 'auto' not in ifaces[iface]:
- ifaces[iface]['auto'] = False
-
-
-def parse_deb_config(path):
- """Parses a debian network configuration file."""
- ifaces = {}
- with open(path, "r") as fp:
- contents = fp.read().strip()
- abs_path = os.path.abspath(path)
- _parse_deb_config_data(
- ifaces, contents,
- os.path.dirname(abs_path), abs_path)
- return ifaces
-
-
-def convert_eni_data(eni_data):
- # return a network config representation of what is in eni_data
- ifaces = {}
- _parse_deb_config_data(ifaces, eni_data, src_dir=None, src_path=None)
- return _ifaces_to_net_config_data(ifaces)
-
-
-def _ifaces_to_net_config_data(ifaces):
- """Return network config that represents the ifaces data provided.
- ifaces = parse_deb_config("/etc/network/interfaces")
- config = ifaces_to_net_config_data(ifaces)
- state = parse_net_config_data(config)."""
- devs = {}
- for name, data in ifaces.items():
- # devname is 'eth0' for name='eth0:1'
- devname = name.partition(":")[0]
- if devname not in devs:
- devs[devname] = {'type': 'physical', 'name': devname,
- 'subnets': []}
- # this isnt strictly correct, but some might specify
- # hwaddress on a nic for matching / declaring name.
- if 'hwaddress' in data:
- devs[devname]['mac_address'] = data['hwaddress']
- subnet = {'_orig_eni_name': name, 'type': data['method']}
- if data.get('auto'):
- subnet['control'] = 'auto'
- else:
- subnet['control'] = 'manual'
-
- if data.get('method') == 'static':
- subnet['address'] = data['address']
-
- for copy_key in ('netmask', 'gateway', 'broadcast'):
- if copy_key in data:
- subnet[copy_key] = data[copy_key]
-
- if 'dns' in data:
- for n in ('nameservers', 'search'):
- if n in data['dns'] and data['dns'][n]:
- subnet['dns_' + n] = data['dns'][n]
- devs[devname]['subnets'].append(subnet)
-
- return {'version': 1,
- 'config': [devs[d] for d in sorted(devs)]}
-
-
-class Renderer(renderer.Renderer):
- """Renders network information in a /etc/network/interfaces format."""
-
- def __init__(self, config=None):
- if not config:
- config = {}
- self.eni_path = config.get('eni_path', 'etc/network/interfaces')
- self.eni_header = config.get('eni_header', None)
- self.links_path_prefix = config.get(
- 'links_path_prefix', 'etc/systemd/network/50-cloud-init-')
- self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
-
- def _render_route(self, route, indent=""):
- """When rendering routes for an iface, in some cases applying a route
- may result in the route command returning non-zero which produces
- some confusing output for users manually using ifup/ifdown[1]. To
- that end, we will optionally include an '|| true' postfix to each
- route line allowing users to work with ifup/ifdown without using
- --force option.
-
- We may at somepoint not want to emit this additional postfix, and
- add a 'strict' flag to this function. When called with strict=True,
- then we will not append the postfix.
-
- 1. http://askubuntu.com/questions/168033/
- how-to-set-static-routes-in-ubuntu-server
- """
- content = []
- up = indent + "post-up route add"
- down = indent + "pre-down route del"
- or_true = " || true"
- mapping = {
- 'network': '-net',
- 'netmask': 'netmask',
- 'gateway': 'gw',
- 'metric': 'metric',
- }
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- default_gw = " default gw %s" % route['gateway']
- content.append(up + default_gw + or_true)
- content.append(down + default_gw + or_true)
- elif route['network'] == '::' and route['netmask'] == 0:
- # ipv6!
- default_gw = " -A inet6 default gw %s" % route['gateway']
- content.append(up + default_gw + or_true)
- content.append(down + default_gw + or_true)
- else:
- route_line = ""
- for k in ['network', 'netmask', 'gateway', 'metric']:
- if k in route:
- route_line += " %s %s" % (mapping[k], route[k])
- content.append(up + route_line + or_true)
- content.append(down + route_line + or_true)
- return content
-
- def _render_iface(self, iface, render_hwaddress=False):
- sections = []
- subnets = iface.get('subnets', {})
- if subnets:
- for index, subnet in zip(range(0, len(subnets)), subnets):
- iface['index'] = index
- iface['mode'] = subnet['type']
- iface['control'] = subnet.get('control', 'auto')
- subnet_inet = 'inet'
- if iface['mode'].endswith('6'):
- # This is a request for DHCPv6.
- subnet_inet += '6'
- elif iface['mode'] == 'static' and ":" in subnet['address']:
- # This is a static IPv6 address.
- subnet_inet += '6'
- iface['inet'] = subnet_inet
- if iface['mode'].startswith('dhcp'):
- iface['mode'] = 'dhcp'
-
- lines = list(
- _iface_start_entry(
- iface, index, render_hwaddress=render_hwaddress) +
- _iface_add_subnet(iface, subnet) +
- _iface_add_attrs(iface, index)
- )
- for route in subnet.get('routes', []):
- lines.extend(self._render_route(route, indent=" "))
-
- if len(subnets) > 1 and index == 0:
- tmpl = " post-up ifup %s:%s\n"
- for i in range(1, len(subnets)):
- lines.append(tmpl % (iface['name'], i))
-
- sections.append(lines)
- else:
- # ifenslave docs say to auto the slave devices
- lines = []
- if 'bond-master' in iface:
- lines.append("auto {name}".format(**iface))
- lines.append("iface {name} {inet} {mode}".format(**iface))
- lines.extend(_iface_add_attrs(iface, index=0))
- sections.append(lines)
- return sections
-
- def _render_interfaces(self, network_state, render_hwaddress=False):
- '''Given state, emit etc/network/interfaces content.'''
-
- # handle 'lo' specifically as we need to insert the global dns entries
- # there (as that is the only interface that will be always up).
- lo = {'name': 'lo', 'type': 'physical', 'inet': 'inet',
- 'subnets': [{'type': 'loopback', 'control': 'auto'}]}
- for iface in network_state.iter_interfaces():
- if iface.get('name') == "lo":
- lo = copy.deepcopy(iface)
-
- nameservers = network_state.dns_nameservers
- if nameservers:
- lo['subnets'][0]["dns_nameservers"] = (" ".join(nameservers))
-
- searchdomains = network_state.dns_searchdomains
- if searchdomains:
- lo['subnets'][0]["dns_search"] = (" ".join(searchdomains))
-
- ''' Apply a sort order to ensure that we write out
- the physical interfaces first; this is critical for
- bonding
- '''
- order = {
- 'physical': 0,
- 'bond': 1,
- 'bridge': 2,
- 'vlan': 3,
- }
-
- sections = []
- sections.extend(self._render_iface(lo))
- for iface in sorted(network_state.iter_interfaces(),
- key=lambda k: (order[k['type']], k['name'])):
-
- if iface.get('name') == "lo":
- continue
- sections.extend(
- self._render_iface(iface, render_hwaddress=render_hwaddress))
-
- for route in network_state.iter_routes():
- sections.append(self._render_route(route))
-
- return '\n\n'.join(['\n'.join(s) for s in sections]) + "\n"
-
- def render_network_state(self, target, network_state):
- fpeni = os.path.join(target, self.eni_path)
- util.ensure_dir(os.path.dirname(fpeni))
- header = self.eni_header if self.eni_header else ""
- util.write_file(fpeni, header + self._render_interfaces(network_state))
-
- if self.netrules_path:
- netrules = os.path.join(target, self.netrules_path)
- util.ensure_dir(os.path.dirname(netrules))
- util.write_file(netrules,
- self._render_persistent_net(network_state))
-
- if self.links_path_prefix:
- self._render_systemd_links(target, network_state,
- links_prefix=self.links_path_prefix)
-
- def _render_systemd_links(self, target, network_state, links_prefix):
- fp_prefix = os.path.join(target, links_prefix)
- for f in glob.glob(fp_prefix + "*"):
- os.unlink(f)
- for iface in network_state.iter_interfaces():
- if (iface['type'] == 'physical' and 'name' in iface and
- iface.get('mac_address')):
- fname = fp_prefix + iface['name'] + ".link"
- content = "\n".join([
- "[Match]",
- "MACAddress=" + iface['mac_address'],
- "",
- "[Link]",
- "Name=" + iface['name'],
- ""
- ])
- util.write_file(fname, content)
-
-
-def network_state_to_eni(network_state, header=None, render_hwaddress=False):
- # render the provided network state, return a string of equivalent eni
- eni_path = 'etc/network/interfaces'
- renderer = Renderer({
- 'eni_path': eni_path,
- 'eni_header': header,
- 'links_path_prefix': None,
- 'netrules_path': None,
- })
- if not header:
- header = ""
- if not header.endswith("\n"):
- header += "\n"
- contents = renderer._render_interfaces(
- network_state, render_hwaddress=render_hwaddress)
- return header + contents
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
deleted file mode 100644
index 8ca5106f..00000000
--- a/cloudinit/net/network_state.py
+++ /dev/null
@@ -1,454 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import functools
-import logging
-
-import six
-
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-NETWORK_STATE_VERSION = 1
-NETWORK_STATE_REQUIRED_KEYS = {
- 1: ['version', 'config', 'network_state'],
-}
-
-
-def parse_net_config_data(net_config, skip_broken=True):
- """Parses the config, returns NetworkState object
-
- :param net_config: curtin network config dict
- """
- state = None
- if 'version' in net_config and 'config' in net_config:
- nsi = NetworkStateInterpreter(version=net_config.get('version'),
- config=net_config.get('config'))
- nsi.parse_config(skip_broken=skip_broken)
- state = nsi.network_state
- return state
-
-
-def parse_net_config(path, skip_broken=True):
- """Parses a curtin network configuration file and
- return network state"""
- ns = None
- net_config = util.read_conf(path)
- if 'network' in net_config:
- ns = parse_net_config_data(net_config.get('network'),
- skip_broken=skip_broken)
- return ns
-
-
-def from_state_file(state_file):
- state = util.read_conf(state_file)
- nsi = NetworkStateInterpreter()
- nsi.load(state)
- return nsi
-
-
-def diff_keys(expected, actual):
- missing = set(expected)
- for key in actual:
- missing.discard(key)
- return missing
-
-
-class InvalidCommand(Exception):
- pass
-
-
-def ensure_command_keys(required_keys):
-
- def wrapper(func):
-
- @functools.wraps(func)
- def decorator(self, command, *args, **kwargs):
- if required_keys:
- missing_keys = diff_keys(required_keys, command)
- if missing_keys:
- raise InvalidCommand("Command missing %s of required"
- " keys %s" % (missing_keys,
- required_keys))
- return func(self, command, *args, **kwargs)
-
- return decorator
-
- return wrapper
-
-
-class CommandHandlerMeta(type):
- """Metaclass that dynamically creates a 'command_handlers' attribute.
-
- This will scan the to-be-created class for methods that start with
- 'handle_' and on finding those will populate a class attribute mapping
- so that those methods can be quickly located and called.
- """
- def __new__(cls, name, parents, dct):
- command_handlers = {}
- for attr_name, attr in dct.items():
- if callable(attr) and attr_name.startswith('handle_'):
- handles_what = attr_name[len('handle_'):]
- if handles_what:
- command_handlers[handles_what] = attr
- dct['command_handlers'] = command_handlers
- return super(CommandHandlerMeta, cls).__new__(cls, name,
- parents, dct)
-
-
-class NetworkState(object):
-
- def __init__(self, network_state, version=NETWORK_STATE_VERSION):
- self._network_state = copy.deepcopy(network_state)
- self._version = version
-
- @property
- def version(self):
- return self._version
-
- def iter_routes(self, filter_func=None):
- for route in self._network_state.get('routes', []):
- if filter_func is not None:
- if filter_func(route):
- yield route
- else:
- yield route
-
- @property
- def dns_nameservers(self):
- try:
- return self._network_state['dns']['nameservers']
- except KeyError:
- return []
-
- @property
- def dns_searchdomains(self):
- try:
- return self._network_state['dns']['search']
- except KeyError:
- return []
-
- def iter_interfaces(self, filter_func=None):
- ifaces = self._network_state.get('interfaces', {})
- for iface in six.itervalues(ifaces):
- if filter_func is None:
- yield iface
- else:
- if filter_func(iface):
- yield iface
-
-
-@six.add_metaclass(CommandHandlerMeta)
-class NetworkStateInterpreter(object):
-
- initial_network_state = {
- 'interfaces': {},
- 'routes': [],
- 'dns': {
- 'nameservers': [],
- 'search': [],
- }
- }
-
- def __init__(self, version=NETWORK_STATE_VERSION, config=None):
- self._version = version
- self._config = config
- self._network_state = copy.deepcopy(self.initial_network_state)
- self._parsed = False
-
- @property
- def network_state(self):
- return NetworkState(self._network_state, version=self._version)
-
- def dump(self):
- state = {
- 'version': self._version,
- 'config': self._config,
- 'network_state': self._network_state,
- }
- return util.yaml_dumps(state)
-
- def load(self, state):
- if 'version' not in state:
- LOG.error('Invalid state, missing version field')
- raise ValueError('Invalid state, missing version field')
-
- required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']]
- missing_keys = diff_keys(required_keys, state)
- if missing_keys:
- msg = 'Invalid state, missing keys: %s' % (missing_keys)
- LOG.error(msg)
- raise ValueError(msg)
-
- # v1 - direct attr mapping, except version
- for key in [k for k in required_keys if k not in ['version']]:
- setattr(self, key, state[key])
-
- def dump_network_state(self):
- return util.yaml_dumps(self._network_state)
-
- def parse_config(self, skip_broken=True):
- # rebuild network state
- for command in self._config:
- command_type = command['type']
- try:
- handler = self.command_handlers[command_type]
- except KeyError:
- raise RuntimeError("No handler found for"
- " command '%s'" % command_type)
- try:
- handler(self, command)
- except InvalidCommand:
- if not skip_broken:
- raise
- else:
- LOG.warn("Skipping invalid command: %s", command,
- exc_info=True)
- LOG.debug(self.dump_network_state())
-
- @ensure_command_keys(['name'])
- def handle_physical(self, command):
- '''
- command = {
- 'type': 'physical',
- 'mac_address': 'c0:d6:9f:2c:e8:80',
- 'name': 'eth0',
- 'subnets': [
- {'type': 'dhcp4'}
- ]
- }
- '''
-
- interfaces = self._network_state.get('interfaces', {})
- iface = interfaces.get(command['name'], {})
- for param, val in command.get('params', {}).items():
- iface.update({param: val})
-
- # convert subnet ipv6 netmask to cidr as needed
- subnets = command.get('subnets')
- if subnets:
- for subnet in subnets:
- if subnet['type'] == 'static':
- if 'netmask' in subnet and ':' in subnet['address']:
- subnet['netmask'] = mask2cidr(subnet['netmask'])
- for route in subnet.get('routes', []):
- if 'netmask' in route:
- route['netmask'] = mask2cidr(route['netmask'])
- iface.update({
- 'name': command.get('name'),
- 'type': command.get('type'),
- 'mac_address': command.get('mac_address'),
- 'inet': 'inet',
- 'mode': 'manual',
- 'mtu': command.get('mtu'),
- 'address': None,
- 'gateway': None,
- 'subnets': subnets,
- })
- self._network_state['interfaces'].update({command.get('name'): iface})
- self.dump_network_state()
-
- @ensure_command_keys(['name', 'vlan_id', 'vlan_link'])
- def handle_vlan(self, command):
- '''
- auto eth0.222
- iface eth0.222 inet static
- address 10.10.10.1
- netmask 255.255.255.0
- hwaddress ether BC:76:4E:06:96:B3
- vlan-raw-device eth0
- '''
- interfaces = self._network_state.get('interfaces', {})
- self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['vlan-raw-device'] = command.get('vlan_link')
- iface['vlan_id'] = command.get('vlan_id')
- interfaces.update({iface['name']: iface})
-
- @ensure_command_keys(['name', 'bond_interfaces', 'params'])
- def handle_bond(self, command):
- '''
- #/etc/network/interfaces
- auto eth0
- iface eth0 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto eth1
- iface eth1 inet manual
- bond-master bond0
- bond-mode 802.3ad
-
- auto bond0
- iface bond0 inet static
- address 192.168.0.10
- gateway 192.168.0.1
- netmask 255.255.255.0
- bond-slaves none
- bond-mode 802.3ad
- bond-miimon 100
- bond-downdelay 200
- bond-updelay 200
- bond-lacp-rate 4
- '''
-
- self.handle_physical(command)
- interfaces = self._network_state.get('interfaces')
- iface = interfaces.get(command.get('name'), {})
- for param, val in command.get('params').items():
- iface.update({param: val})
- iface.update({'bond-slaves': 'none'})
- self._network_state['interfaces'].update({iface['name']: iface})
-
- # handle bond slaves
- for ifname in command.get('bond_interfaces'):
- if ifname not in interfaces:
- cmd = {
- 'name': ifname,
- 'type': 'bond',
- }
- # inject placeholder
- self.handle_physical(cmd)
-
- interfaces = self._network_state.get('interfaces', {})
- bond_if = interfaces.get(ifname)
- bond_if['bond-master'] = command.get('name')
- # copy in bond config into slave
- for param, val in command.get('params').items():
- bond_if.update({param: val})
- self._network_state['interfaces'].update({ifname: bond_if})
-
- @ensure_command_keys(['name', 'bridge_interfaces', 'params'])
- def handle_bridge(self, command):
- '''
- auto br0
- iface br0 inet static
- address 10.10.10.1
- netmask 255.255.255.0
- bridge_ports eth0 eth1
- bridge_stp off
- bridge_fd 0
- bridge_maxwait 0
-
- bridge_params = [
- "bridge_ports",
- "bridge_ageing",
- "bridge_bridgeprio",
- "bridge_fd",
- "bridge_gcint",
- "bridge_hello",
- "bridge_hw",
- "bridge_maxage",
- "bridge_maxwait",
- "bridge_pathcost",
- "bridge_portprio",
- "bridge_stp",
- "bridge_waitport",
- ]
- '''
-
- # find one of the bridge port ifaces to get mac_addr
- # handle bridge_slaves
- interfaces = self._network_state.get('interfaces', {})
- for ifname in command.get('bridge_interfaces'):
- if ifname in interfaces:
- continue
-
- cmd = {
- 'name': ifname,
- }
- # inject placeholder
- self.handle_physical(cmd)
-
- interfaces = self._network_state.get('interfaces', {})
- self.handle_physical(command)
- iface = interfaces.get(command.get('name'), {})
- iface['bridge_ports'] = command['bridge_interfaces']
- for param, val in command.get('params').items():
- iface.update({param: val})
-
- interfaces.update({iface['name']: iface})
-
- @ensure_command_keys(['address'])
- def handle_nameserver(self, command):
- dns = self._network_state.get('dns')
- if 'address' in command:
- addrs = command['address']
- if not type(addrs) == list:
- addrs = [addrs]
- for addr in addrs:
- dns['nameservers'].append(addr)
- if 'search' in command:
- paths = command['search']
- if not isinstance(paths, list):
- paths = [paths]
- for path in paths:
- dns['search'].append(path)
-
- @ensure_command_keys(['destination'])
- def handle_route(self, command):
- routes = self._network_state.get('routes', [])
- network, cidr = command['destination'].split("/")
- netmask = cidr2mask(int(cidr))
- route = {
- 'network': network,
- 'netmask': netmask,
- 'gateway': command.get('gateway'),
- 'metric': command.get('metric'),
- }
- routes.append(route)
-
-
-def cidr2mask(cidr):
- mask = [0, 0, 0, 0]
- for i in list(range(0, cidr)):
- idx = int(i / 8)
- mask[idx] = mask[idx] + (1 << (7 - i % 8))
- return ".".join([str(x) for x in mask])
-
-
-def ipv4mask2cidr(mask):
- if '.' not in mask:
- return mask
- return sum([bin(int(x)).count('1') for x in mask.split('.')])
-
-
-def ipv6mask2cidr(mask):
- if ':' not in mask:
- return mask
-
- bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00,
- 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc,
- 0xfffe, 0xffff]
- cidr = 0
- for word in mask.split(':'):
- if not word or int(word, 16) == 0:
- break
- cidr += bitCount.index(int(word, 16))
-
- return cidr
-
-
-def mask2cidr(mask):
- if ':' in mask:
- return ipv6mask2cidr(mask)
- elif '.' in mask:
- return ipv4mask2cidr(mask)
- else:
- return mask
diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py
deleted file mode 100644
index 310cbe0d..00000000
--- a/cloudinit/net/renderer.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (C) 2013-2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Blake Rouse <blake.rouse@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-from .udev import generate_udev_rule
-
-
-def filter_by_type(match_type):
- return lambda iface: match_type == iface['type']
-
-
-def filter_by_name(match_name):
- return lambda iface: match_name == iface['name']
-
-
-filter_by_physical = filter_by_type('physical')
-
-
-class Renderer(object):
-
- @staticmethod
- def _render_persistent_net(network_state):
- """Given state, emit udev rules to map mac to ifname."""
- # TODO(harlowja): this seems shared between eni renderer and
- # this, so move it to a shared location.
- content = six.StringIO()
- for iface in network_state.iter_interfaces(filter_by_physical):
- # for physical interfaces write out a persist net udev rule
- if 'name' in iface and iface.get('mac_address'):
- content.write(generate_udev_rule(iface['name'],
- iface['mac_address']))
- return content.getvalue()
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
deleted file mode 100644
index c53acf71..00000000
--- a/cloudinit/net/sysconfig.py
+++ /dev/null
@@ -1,400 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-
-import six
-
-from cloudinit.distros.parsers import resolv_conf
-from cloudinit import util
-
-from . import renderer
-
-
-def _make_header(sep='#'):
- lines = [
- "Created by cloud-init on instance boot automatically, do not edit.",
- "",
- ]
- for i in range(0, len(lines)):
- if lines[i]:
- lines[i] = sep + " " + lines[i]
- else:
- lines[i] = sep
- return "\n".join(lines)
-
-
-def _is_default_route(route):
- if route['network'] == '::' and route['netmask'] == 0:
- return True
- if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0':
- return True
- return False
-
-
-def _quote_value(value):
- if re.search(r"\s", value):
- # This doesn't handle complex cases...
- if value.startswith('"') and value.endswith('"'):
- return value
- else:
- return '"%s"' % value
- else:
- return value
-
-
-class ConfigMap(object):
- """Sysconfig like dictionary object."""
-
- # Why does redhat prefer yes/no to true/false??
- _bool_map = {
- True: 'yes',
- False: 'no',
- }
-
- def __init__(self):
- self._conf = {}
-
- def __setitem__(self, key, value):
- self._conf[key] = value
-
- def drop(self, key):
- self._conf.pop(key, None)
-
- def __len__(self):
- return len(self._conf)
-
- def to_string(self):
- buf = six.StringIO()
- buf.write(_make_header())
- if self._conf:
- buf.write("\n")
- for key in sorted(self._conf.keys()):
- value = self._conf[key]
- if isinstance(value, bool):
- value = self._bool_map[value]
- if not isinstance(value, six.string_types):
- value = str(value)
- buf.write("%s=%s\n" % (key, _quote_value(value)))
- return buf.getvalue()
-
-
-class Route(ConfigMap):
- """Represents a route configuration."""
-
- route_fn_tpl = '%(base)s/network-scripts/route-%(name)s'
-
- def __init__(self, route_name, base_sysconf_dir):
- super(Route, self).__init__()
- self.last_idx = 1
- self.has_set_default = False
- self._route_name = route_name
- self._base_sysconf_dir = base_sysconf_dir
-
- def copy(self):
- r = Route(self._route_name, self._base_sysconf_dir)
- r._conf = self._conf.copy()
- r.last_idx = self.last_idx
- r.has_set_default = self.has_set_default
- return r
-
- @property
- def path(self):
- return self.route_fn_tpl % ({'base': self._base_sysconf_dir,
- 'name': self._route_name})
-
-
-class NetInterface(ConfigMap):
- """Represents a sysconfig/networking-script (and its config + children)."""
-
- iface_fn_tpl = '%(base)s/network-scripts/ifcfg-%(name)s'
-
- iface_types = {
- 'ethernet': 'Ethernet',
- 'bond': 'Bond',
- 'bridge': 'Bridge',
- }
-
- def __init__(self, iface_name, base_sysconf_dir, kind='ethernet'):
- super(NetInterface, self).__init__()
- self.children = []
- self.routes = Route(iface_name, base_sysconf_dir)
- self._kind = kind
- self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
- self._conf['TYPE'] = self.iface_types[kind]
- self._base_sysconf_dir = base_sysconf_dir
-
- @property
- def name(self):
- return self._iface_name
-
- @name.setter
- def name(self, iface_name):
- self._iface_name = iface_name
- self._conf['DEVICE'] = iface_name
-
- @property
- def kind(self):
- return self._kind
-
- @kind.setter
- def kind(self, kind):
- self._kind = kind
- self._conf['TYPE'] = self.iface_types[kind]
-
- @property
- def path(self):
- return self.iface_fn_tpl % ({'base': self._base_sysconf_dir,
- 'name': self.name})
-
- def copy(self, copy_children=False, copy_routes=False):
- c = NetInterface(self.name, self._base_sysconf_dir, kind=self._kind)
- c._conf = self._conf.copy()
- if copy_children:
- c.children = list(self.children)
- if copy_routes:
- c.routes = self.routes.copy()
- return c
-
-
-class Renderer(renderer.Renderer):
- """Renders network information in a /etc/sysconfig format."""
-
- # See: https://access.redhat.com/documentation/en-US/\
- # Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/\
- # s1-networkscripts-interfaces.html (or other docs for
- # details about this)
-
- iface_defaults = tuple([
- ('ONBOOT', True),
- ('USERCTL', False),
- ('NM_CONTROLLED', False),
- ('BOOTPROTO', 'none'),
- ])
-
- # If these keys exist, then there values will be used to form
- # a BONDING_OPTS grouping; otherwise no grouping will be set.
- bond_tpl_opts = tuple([
- ('bond_mode', "mode=%s"),
- ('bond_xmit_hash_policy', "xmit_hash_policy=%s"),
- ('bond_miimon', "miimon=%s"),
- ])
-
- bridge_opts_keys = tuple([
- ('bridge_stp', 'STP'),
- ('bridge_ageing', 'AGEING'),
- ('bridge_bridgeprio', 'PRIO'),
- ])
-
- def __init__(self, config=None):
- if not config:
- config = {}
- self.sysconf_dir = config.get('sysconf_dir', 'etc/sysconfig/')
- self.netrules_path = config.get(
- 'netrules_path', 'etc/udev/rules.d/70-persistent-net.rules')
- self.dns_path = config.get('dns_path', 'etc/resolv.conf')
-
- @classmethod
- def _render_iface_shared(cls, iface, iface_cfg):
- for k, v in cls.iface_defaults:
- iface_cfg[k] = v
- for (old_key, new_key) in [('mac_address', 'HWADDR'), ('mtu', 'MTU')]:
- old_value = iface.get(old_key)
- if old_value is not None:
- iface_cfg[new_key] = old_value
-
- @classmethod
- def _render_subnet(cls, iface_cfg, route_cfg, subnet):
- subnet_type = subnet.get('type')
- if subnet_type == 'dhcp6':
- iface_cfg['DHCPV6C'] = True
- iface_cfg['IPV6INIT'] = True
- iface_cfg['BOOTPROTO'] = 'dhcp'
- elif subnet_type in ['dhcp4', 'dhcp']:
- iface_cfg['BOOTPROTO'] = 'dhcp'
- elif subnet_type == 'static':
- iface_cfg['BOOTPROTO'] = 'static'
- if subnet.get('ipv6'):
- iface_cfg['IPV6ADDR'] = subnet['address']
- iface_cfg['IPV6INIT'] = True
- else:
- iface_cfg['IPADDR'] = subnet['address']
- else:
- raise ValueError("Unknown subnet type '%s' found"
- " for interface '%s'" % (subnet_type,
- iface_cfg.name))
- if 'netmask' in subnet:
- iface_cfg['NETMASK'] = subnet['netmask']
- for route in subnet.get('routes', []):
- if _is_default_route(route):
- if route_cfg.has_set_default:
- raise ValueError("Duplicate declaration of default"
- " route found for interface '%s'"
- % (iface_cfg.name))
- # NOTE(harlowja): ipv6 and ipv4 default gateways
- gw_key = 'GATEWAY0'
- nm_key = 'NETMASK0'
- addr_key = 'ADDRESS0'
- # The owning interface provides the default route.
- #
- # TODO(harlowja): add validation that no other iface has
- # also provided the default route?
- iface_cfg['DEFROUTE'] = True
- if 'gateway' in route:
- iface_cfg['GATEWAY'] = route['gateway']
- route_cfg.has_set_default = True
- else:
- gw_key = 'GATEWAY%s' % route_cfg.last_idx
- nm_key = 'NETMASK%s' % route_cfg.last_idx
- addr_key = 'ADDRESS%s' % route_cfg.last_idx
- route_cfg.last_idx += 1
- for (old_key, new_key) in [('gateway', gw_key),
- ('netmask', nm_key),
- ('network', addr_key)]:
- if old_key in route:
- route_cfg[new_key] = route[old_key]
-
- @classmethod
- def _render_bonding_opts(cls, iface_cfg, iface):
- bond_opts = []
- for (bond_key, value_tpl) in cls.bond_tpl_opts:
- # Seems like either dash or underscore is possible?
- bond_keys = [bond_key, bond_key.replace("_", "-")]
- for bond_key in bond_keys:
- if bond_key in iface:
- bond_value = iface[bond_key]
- if isinstance(bond_value, (tuple, list)):
- bond_value = " ".join(bond_value)
- bond_opts.append(value_tpl % (bond_value))
- break
- if bond_opts:
- iface_cfg['BONDING_OPTS'] = " ".join(bond_opts)
-
- @classmethod
- def _render_physical_interfaces(cls, network_state, iface_contents):
- physical_filter = renderer.filter_by_physical
- for iface in network_state.iter_interfaces(physical_filter):
- iface_name = iface['name']
- iface_subnets = iface.get("subnets", [])
- iface_cfg = iface_contents[iface_name]
- route_cfg = iface_cfg.routes
- if len(iface_subnets) == 1:
- cls._render_subnet(iface_cfg, route_cfg, iface_subnets[0])
- elif len(iface_subnets) > 1:
- for i, iface_subnet in enumerate(iface_subnets,
- start=len(iface.children)):
- iface_sub_cfg = iface_cfg.copy()
- iface_sub_cfg.name = "%s:%s" % (iface_name, i)
- iface.children.append(iface_sub_cfg)
- cls._render_subnet(iface_sub_cfg, route_cfg, iface_subnet)
-
- @classmethod
- def _render_bond_interfaces(cls, network_state, iface_contents):
- bond_filter = renderer.filter_by_type('bond')
- for iface in network_state.iter_interfaces(bond_filter):
- iface_name = iface['name']
- iface_cfg = iface_contents[iface_name]
- cls._render_bonding_opts(iface_cfg, iface)
- iface_master_name = iface['bond-master']
- iface_cfg['MASTER'] = iface_master_name
- iface_cfg['SLAVE'] = True
- # Ensure that the master interface (and any of its children)
- # are actually marked as being bond types...
- master_cfg = iface_contents[iface_master_name]
- master_cfgs = [master_cfg]
- master_cfgs.extend(master_cfg.children)
- for master_cfg in master_cfgs:
- master_cfg['BONDING_MASTER'] = True
- master_cfg.kind = 'bond'
-
- @staticmethod
- def _render_vlan_interfaces(network_state, iface_contents):
- vlan_filter = renderer.filter_by_type('vlan')
- for iface in network_state.iter_interfaces(vlan_filter):
- iface_name = iface['name']
- iface_cfg = iface_contents[iface_name]
- iface_cfg['VLAN'] = True
- iface_cfg['PHYSDEV'] = iface_name[:iface_name.rfind('.')]
-
- @staticmethod
- def _render_dns(network_state, existing_dns_path=None):
- content = resolv_conf.ResolvConf("")
- if existing_dns_path and os.path.isfile(existing_dns_path):
- content = resolv_conf.ResolvConf(util.load_file(existing_dns_path))
- for nameserver in network_state.dns_nameservers:
- content.add_nameserver(nameserver)
- for searchdomain in network_state.dns_searchdomains:
- content.add_search_domain(searchdomain)
- return "\n".join([_make_header(';'), str(content)])
-
- @classmethod
- def _render_bridge_interfaces(cls, network_state, iface_contents):
- bridge_filter = renderer.filter_by_type('bridge')
- for iface in network_state.iter_interfaces(bridge_filter):
- iface_name = iface['name']
- iface_cfg = iface_contents[iface_name]
- iface_cfg.kind = 'bridge'
- for old_key, new_key in cls.bridge_opts_keys:
- if old_key in iface:
- iface_cfg[new_key] = iface[old_key]
- # Is this the right key to get all the connected interfaces?
- for bridged_iface_name in iface.get('bridge_ports', []):
- # Ensure all bridged interfaces are correctly tagged
- # as being bridged to this interface.
- bridged_cfg = iface_contents[bridged_iface_name]
- bridged_cfgs = [bridged_cfg]
- bridged_cfgs.extend(bridged_cfg.children)
- for bridge_cfg in bridged_cfgs:
- bridge_cfg['BRIDGE'] = iface_name
-
- @classmethod
- def _render_sysconfig(cls, base_sysconf_dir, network_state):
- '''Given state, return /etc/sysconfig files + contents'''
- iface_contents = {}
- for iface in network_state.iter_interfaces():
- iface_name = iface['name']
- iface_cfg = NetInterface(iface_name, base_sysconf_dir)
- cls._render_iface_shared(iface, iface_cfg)
- iface_contents[iface_name] = iface_cfg
- cls._render_physical_interfaces(network_state, iface_contents)
- cls._render_bond_interfaces(network_state, iface_contents)
- cls._render_vlan_interfaces(network_state, iface_contents)
- cls._render_bridge_interfaces(network_state, iface_contents)
- contents = {}
- for iface_name, iface_cfg in iface_contents.items():
- if iface_cfg or iface_cfg.children:
- contents[iface_cfg.path] = iface_cfg.to_string()
- for iface_cfg in iface_cfg.children:
- if iface_cfg:
- contents[iface_cfg.path] = iface_cfg.to_string()
- if iface_cfg.routes:
- contents[iface_cfg.routes.path] = iface_cfg.routes.to_string()
- return contents
-
- def render_network_state(self, target, network_state):
- base_sysconf_dir = os.path.join(target, self.sysconf_dir)
- for path, data in self._render_sysconfig(base_sysconf_dir,
- network_state).items():
- util.write_file(path, data)
- if self.dns_path:
- dns_path = os.path.join(target, self.dns_path)
- resolv_content = self._render_dns(network_state,
- existing_dns_path=dns_path)
- util.write_file(dns_path, resolv_content)
- if self.netrules_path:
- netrules_content = self._render_persistent_net(network_state)
- netrules_path = os.path.join(target, self.netrules_path)
- util.write_file(netrules_path, netrules_content)
diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py
deleted file mode 100644
index 09188295..00000000
--- a/cloudinit/net/udev.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (C) 2015 Canonical Ltd.
-#
-# Author: Ryan Harper <ryan.harper@canonical.com>
-#
-# Curtin is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Affero General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version.
-#
-# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
-# more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with Curtin. If not, see <http://www.gnu.org/licenses/>.
-
-
-def compose_udev_equality(key, value):
- """Return a udev comparison clause, like `ACTION=="add"`."""
- assert key == key.upper()
- return '%s=="%s"' % (key, value)
-
-
-def compose_udev_attr_equality(attribute, value):
- """Return a udev attribute comparison clause, like `ATTR{type}=="1"`."""
- assert attribute == attribute.lower()
- return 'ATTR{%s}=="%s"' % (attribute, value)
-
-
-def compose_udev_setting(key, value):
- """Return a udev assignment clause, like `NAME="eth0"`."""
- assert key == key.upper()
- return '%s="%s"' % (key, value)
-
-
-def generate_udev_rule(interface, mac):
- """Return a udev rule to set the name of network interface with `mac`.
-
- The rule ends up as a single line looking something like:
-
- SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*",
- ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0"
- """
- rule = ', '.join([
- compose_udev_equality('SUBSYSTEM', 'net'),
- compose_udev_equality('ACTION', 'add'),
- compose_udev_equality('DRIVERS', '?*'),
- compose_udev_attr_equality('address', mac),
- compose_udev_setting('NAME', interface),
- ])
- return '%s\n' % rule
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
deleted file mode 100644
index d8698a5d..00000000
--- a/cloudinit/netinfo.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-from cloudinit import log as logging
-from cloudinit import util
-
-from prettytable import PrettyTable
-
-LOG = logging.getLogger()
-
-
-def netdev_info(empty=""):
- fields = ("hwaddr", "addr", "bcast", "mask")
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"])
- devs = {}
- for line in str(ifcfg_out).splitlines():
- if len(line) == 0:
- continue
- if line[0] not in ("\t", " "):
- curdev = line.split()[0]
- devs[curdev] = {"up": False}
- for field in fields:
- devs[curdev][field] = ""
- toks = line.lower().strip().split()
- if toks[0] == "up":
- devs[curdev]['up'] = True
- # If the output of ifconfig doesn't contain the required info in the
- # obvious place, use a regex filter to be sure.
- elif len(toks) > 1:
- if re.search(r"flags=\d+<up,", toks[1]):
- devs[curdev]['up'] = True
-
- fieldpost = ""
- if toks[0] == "inet6":
- fieldpost = "6"
-
- for i in range(len(toks)):
- # older net-tools (ubuntu) show 'inet addr:xx.yy',
- # newer (freebsd and fedora) show 'inet xx.yy'
- # just skip this 'inet' entry. (LP: #1285185)
- try:
- if ((toks[i] in ("inet", "inet6") and
- toks[i + 1].startswith("addr:"))):
- continue
- except IndexError:
- pass
-
- # Couple the different items we're interested in with the correct
- # field since FreeBSD/CentOS/Fedora differ in the output.
- ifconfigfields = {
- "addr:": "addr", "inet": "addr",
- "bcast:": "bcast", "broadcast": "bcast",
- "mask:": "mask", "netmask": "mask",
- "hwaddr": "hwaddr", "ether": "hwaddr",
- "scope": "scope",
- }
- for origfield, field in ifconfigfields.items():
- target = "%s%s" % (field, fieldpost)
- if devs[curdev].get(target, ""):
- continue
- if toks[i] == "%s" % origfield:
- try:
- devs[curdev][target] = toks[i + 1]
- except IndexError:
- pass
- elif toks[i].startswith("%s" % origfield):
- devs[curdev][target] = toks[i][len(field) + 1:]
-
- if empty != "":
- for (_devname, dev) in devs.items():
- for field in dev:
- if dev[field] == "":
- dev[field] = empty
-
- return devs
-
-
-def route_info():
- (route_out, _err) = util.subp(["netstat", "-rn"])
-
- routes = {}
- routes['ipv4'] = []
- routes['ipv6'] = []
-
- entries = route_out.splitlines()[1:]
- for line in entries:
- if not line:
- continue
- toks = line.split()
- # FreeBSD shows 6 items in the routing table:
- # Destination Gateway Flags Refs Use Netif Expire
- # default 10.65.0.1 UGS 0 34920 vtnet0
- #
- # Linux netstat shows 2 more:
- # Destination Gateway Genmask Flags MSS Window irtt Iface
- # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
- if (len(toks) < 6 or toks[0] == "Kernel" or
- toks[0] == "Destination" or toks[0] == "Internet" or
- toks[0] == "Internet6" or toks[0] == "Routing"):
- continue
- if len(toks) < 8:
- toks.append("-")
- toks.append("-")
- toks[7] = toks[5]
- toks[5] = "-"
- entry = {
- 'destination': toks[0],
- 'gateway': toks[1],
- 'genmask': toks[2],
- 'flags': toks[3],
- 'metric': toks[4],
- 'ref': toks[5],
- 'use': toks[6],
- 'iface': toks[7],
- }
- routes['ipv4'].append(entry)
-
- try:
- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"])
- except util.ProcessExecutionError:
- pass
- else:
- entries6 = route_out6.splitlines()[1:]
- for line in entries6:
- if not line:
- continue
- toks = line.split()
- if (len(toks) < 6 or toks[0] == "Kernel" or
- toks[0] == "Proto" or toks[0] == "Active"):
- continue
- entry = {
- 'proto': toks[0],
- 'recv-q': toks[1],
- 'send-q': toks[2],
- 'local address': toks[3],
- 'foreign address': toks[4],
- 'state': toks[5],
- }
- routes['ipv6'].append(entry)
- return routes
-
-
-def getgateway():
- try:
- routes = route_info()
- except Exception:
- pass
- else:
- for r in routes.get('ipv4', []):
- if r['flags'].find("G") >= 0:
- return "%s[%s]" % (r['gateway'], r['iface'])
- return None
-
-
-def netdev_pformat():
- lines = []
- try:
- netdev = netdev_info(empty=".")
- except Exception:
- lines.append(util.center("Net device info failed", '!', 80))
- else:
- fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
- tbl = PrettyTable(fields)
- for (dev, d) in netdev.items():
- tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
- if d.get('addr6'):
- tbl.add_row([dev, d["up"],
- d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
- netdev_s = tbl.get_string()
- max_len = len(max(netdev_s.splitlines(), key=len))
- header = util.center("Net device info", "+", max_len)
- lines.extend([header, netdev_s])
- return "\n".join(lines)
-
-
-def route_pformat():
- lines = []
- try:
- routes = route_info()
- except Exception as e:
- lines.append(util.center('Route info failed', '!', 80))
- util.logexc(LOG, "Route info failed: %s" % e)
- else:
- if routes.get('ipv4'):
- fields_v4 = ['Route', 'Destination', 'Gateway',
- 'Genmask', 'Interface', 'Flags']
- tbl_v4 = PrettyTable(fields_v4)
- for (n, r) in enumerate(routes.get('ipv4')):
- route_id = str(n)
- tbl_v4.add_row([route_id, r['destination'],
- r['gateway'], r['genmask'],
- r['iface'], r['flags']])
- route_s = tbl_v4.get_string()
- max_len = len(max(route_s.splitlines(), key=len))
- header = util.center("Route IPv4 info", "+", max_len)
- lines.extend([header, route_s])
- if routes.get('ipv6'):
- fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
- 'Local Address', 'Foreign Address', 'State']
- tbl_v6 = PrettyTable(fields_v6)
- for (n, r) in enumerate(routes.get('ipv6')):
- route_id = str(n)
- tbl_v6.add_row([route_id, r['proto'],
- r['recv-q'], r['send-q'],
- r['local address'], r['foreign address'],
- r['state']])
- route_s = tbl_v6.get_string()
- max_len = len(max(route_s.splitlines(), key=len))
- header = util.center("Route IPv6 info", "+", max_len)
- lines.extend([header, route_s])
- return "\n".join(lines)
-
-
-def debug_info(prefix='ci-info: '):
- lines = []
- netdev_lines = netdev_pformat().splitlines()
- if prefix:
- for line in netdev_lines:
- lines.append("%s%s" % (prefix, line))
- else:
- lines.extend(netdev_lines)
- route_lines = route_pformat().splitlines()
- if prefix:
- for line in route_lines:
- lines.append("%s%s" % (prefix, line))
- else:
- lines.extend(route_lines)
- return "\n".join(lines)
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
deleted file mode 100644
index f6609d6f..00000000
--- a/cloudinit/patcher.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import imp
-import logging
-import sys
-
-# Default fallback format
-FALL_FORMAT = ('FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: ' +
- '%(message)s')
-
-
-class QuietStreamHandler(logging.StreamHandler):
- def handleError(self, record):
- pass
-
-
-def _patch_logging():
- # Replace 'handleError' with one that will be more
- # tolerant of errors in that it can avoid
- # re-notifying on exceptions and when errors
- # do occur, it can at least try to write to
- # sys.stderr using a fallback logger
- fallback_handler = QuietStreamHandler(sys.stderr)
- fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT))
-
- def handleError(self, record):
- try:
- fallback_handler.handle(record)
- fallback_handler.flush()
- except IOError:
- pass
- setattr(logging.Handler, 'handleError', handleError)
-
-
-def patch():
- imp.acquire_lock()
- try:
- _patch_logging()
- finally:
- imp.release_lock()
diff --git a/cloudinit/registry.py b/cloudinit/registry.py
deleted file mode 100644
index 04368ddf..00000000
--- a/cloudinit/registry.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-# vi: ts=4 expandtab
-import copy
-
-
-class DictRegistry(object):
- """A simple registry for a mapping of objects."""
-
- def __init__(self):
- self.reset()
-
- def reset(self):
- self._items = {}
-
- def register_item(self, key, item):
- """Add item to the registry."""
- if key in self._items:
- raise ValueError(
- 'Item already registered with key {0}'.format(key))
- self._items[key] = item
-
- def unregister_item(self, key, force=True):
- """Remove item from the registry."""
- if key in self._items:
- del self._items[key]
- elif not force:
- raise KeyError("%s: key not present to unregister" % key)
-
- @property
- def registered_items(self):
- """All the items that have been registered.
-
- This cannot be used to modify the contents of the registry.
- """
- return copy.copy(self._items)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
deleted file mode 100644
index 6b41ae61..00000000
--- a/cloudinit/reporting/__init__.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-"""
-cloud-init reporting framework
-
-The reporting framework is intended to allow all parts of cloud-init to
-report events in a structured manner.
-"""
-
-from ..registry import DictRegistry
-from .handlers import available_handlers
-
-DEFAULT_CONFIG = {
- 'logging': {'type': 'log'},
-}
-
-
-def update_configuration(config):
- """Update the instanciated_handler_registry.
-
- :param config:
- The dictionary containing changes to apply. If a key is given
- with a False-ish value, the registered handler matching that name
- will be unregistered.
- """
- for handler_name, handler_config in config.items():
- if not handler_config:
- instantiated_handler_registry.unregister_item(
- handler_name, force=True)
- continue
- handler_config = handler_config.copy()
- cls = available_handlers.registered_items[handler_config.pop('type')]
- instantiated_handler_registry.unregister_item(handler_name)
- instance = cls(**handler_config)
- instantiated_handler_registry.register_item(handler_name, instance)
-
-
-instantiated_handler_registry = DictRegistry()
-update_configuration(DEFAULT_CONFIG)
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
deleted file mode 100644
index df2b9b4a..00000000
--- a/cloudinit/reporting/events.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init. See LICENCE file for license information.
-#
-"""
-events for reporting.
-
-The events here are designed to be used with reporting.
-They can be published to registered handlers with report_event.
-"""
-import base64
-import os.path
-import time
-
-from . import instantiated_handler_registry
-
-FINISH_EVENT_TYPE = 'finish'
-START_EVENT_TYPE = 'start'
-
-DEFAULT_EVENT_ORIGIN = 'cloudinit'
-
-
-class _nameset(set):
- def __getattr__(self, name):
- if name in self:
- return name
- raise AttributeError("%s not a valid value" % name)
-
-
-status = _nameset(("SUCCESS", "WARN", "FAIL"))
-
-
-class ReportingEvent(object):
- """Encapsulation of event formatting."""
-
- def __init__(self, event_type, name, description,
- origin=DEFAULT_EVENT_ORIGIN, timestamp=None):
- self.event_type = event_type
- self.name = name
- self.description = description
- self.origin = origin
- if timestamp is None:
- timestamp = time.time()
- self.timestamp = timestamp
-
- def as_string(self):
- """The event represented as a string."""
- return '{0}: {1}: {2}'.format(
- self.event_type, self.name, self.description)
-
- def as_dict(self):
- """The event represented as a dictionary."""
- return {'name': self.name, 'description': self.description,
- 'event_type': self.event_type, 'origin': self.origin,
- 'timestamp': self.timestamp}
-
-
-class FinishReportingEvent(ReportingEvent):
-
- def __init__(self, name, description, result=status.SUCCESS,
- post_files=None):
- super(FinishReportingEvent, self).__init__(
- FINISH_EVENT_TYPE, name, description)
- self.result = result
- if post_files is None:
- post_files = []
- self.post_files = post_files
- if result not in status:
- raise ValueError("Invalid result: %s" % result)
-
- def as_string(self):
- return '{0}: {1}: {2}: {3}'.format(
- self.event_type, self.name, self.result, self.description)
-
- def as_dict(self):
- """The event represented as json friendly."""
- data = super(FinishReportingEvent, self).as_dict()
- data['result'] = self.result
- if self.post_files:
- data['files'] = _collect_file_info(self.post_files)
- return data
-
-
-def report_event(event):
- """Report an event to all registered event handlers.
-
- This should generally be called via one of the other functions in
- the reporting module.
-
- :param event_type:
- The type of the event; this should be a constant from the
- reporting module.
- """
- for _, handler in instantiated_handler_registry.registered_items.items():
- handler.publish_event(event)
-
-
-def report_finish_event(event_name, event_description,
- result=status.SUCCESS, post_files=None):
- """Report a "finish" event.
-
- See :py:func:`.report_event` for parameter details.
- """
- event = FinishReportingEvent(event_name, event_description, result,
- post_files=post_files)
- return report_event(event)
-
-
-def report_start_event(event_name, event_description):
- """Report a "start" event.
-
- :param event_name:
- The name of the event; this should be a topic which events would
- share (e.g. it will be the same for start and finish events).
-
- :param event_description:
- A human-readable description of the event that has occurred.
- """
- event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
- return report_event(event)
-
-
-class ReportEventStack(object):
- """Context Manager for using :py:func:`report_event`
-
- This enables calling :py:func:`report_start_event` and
- :py:func:`report_finish_event` through a context manager.
-
- :param name:
- the name of the event
-
- :param description:
- the event's description, passed on to :py:func:`report_start_event`
-
- :param message:
- the description to use for the finish event. defaults to
- :param:description.
-
- :param parent:
- :type parent: :py:class:ReportEventStack or None
- The parent of this event. The parent is populated with
- results of all its children. The name used in reporting
- is <parent.name>/<name>
-
- :param reporting_enabled:
- Indicates if reporting events should be generated.
- If not provided, defaults to the parent's value, or True if no parent
- is provided.
-
- :param result_on_exception:
- The result value to set if an exception is caught. default
- value is FAIL.
- """
- def __init__(self, name, description, message=None, parent=None,
- reporting_enabled=None, result_on_exception=status.FAIL,
- post_files=None):
- self.parent = parent
- self.name = name
- self.description = description
- self.message = message
- self.result_on_exception = result_on_exception
- self.result = status.SUCCESS
- if post_files is None:
- post_files = []
- self.post_files = post_files
-
- # use parents reporting value if not provided
- if reporting_enabled is None:
- if parent:
- reporting_enabled = parent.reporting_enabled
- else:
- reporting_enabled = True
- self.reporting_enabled = reporting_enabled
-
- if parent:
- self.fullname = '/'.join((parent.fullname, name,))
- else:
- self.fullname = self.name
- self.children = {}
-
- def __repr__(self):
- return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
- (self.name, self.description, self.reporting_enabled))
-
- def __enter__(self):
- self.result = status.SUCCESS
- if self.reporting_enabled:
- report_start_event(self.fullname, self.description)
- if self.parent:
- self.parent.children[self.name] = (None, None)
- return self
-
- def _childrens_finish_info(self):
- for cand_result in (status.FAIL, status.WARN):
- for name, (value, msg) in self.children.items():
- if value == cand_result:
- return (value, self.message)
- return (self.result, self.message)
-
- @property
- def result(self):
- return self._result
-
- @result.setter
- def result(self, value):
- if value not in status:
- raise ValueError("'%s' not a valid result" % value)
- self._result = value
-
- @property
- def message(self):
- if self._message is not None:
- return self._message
- return self.description
-
- @message.setter
- def message(self, value):
- self._message = value
-
- def _finish_info(self, exc):
- # return tuple of description, and value
- if exc:
- return (self.result_on_exception, self.message)
- return self._childrens_finish_info()
-
- def __exit__(self, exc_type, exc_value, traceback):
- (result, msg) = self._finish_info(exc_value)
- if self.parent:
- self.parent.children[self.name] = (result, msg)
- if self.reporting_enabled:
- report_finish_event(self.fullname, msg, result,
- post_files=self.post_files)
-
-
-def _collect_file_info(files):
- if not files:
- return None
- ret = []
- for fname in files:
- if not os.path.isfile(fname):
- content = None
- else:
- with open(fname, "rb") as fp:
- content = base64.b64encode(fp.read()).decode()
- ret.append({'path': fname, 'content': content,
- 'encoding': 'base64'})
- return ret
-
-# vi: ts=4 expandtab syntax=python
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
deleted file mode 100644
index dff20ecb..00000000
--- a/cloudinit/reporting/handlers.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# vi: ts=4 expandtab
-
-import abc
-import json
-import six
-
-from cloudinit import log as logging
-from cloudinit.registry import DictRegistry
-from cloudinit import (url_helper, util)
-
-
-LOG = logging.getLogger(__name__)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class ReportingHandler(object):
- """Base class for report handlers.
-
- Implement :meth:`~publish_event` for controlling what
- the handler does with an event.
- """
-
- @abc.abstractmethod
- def publish_event(self, event):
- """Publish an event."""
-
-
-class LogHandler(ReportingHandler):
- """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
-
- def __init__(self, level="DEBUG"):
- super(LogHandler, self).__init__()
- if isinstance(level, int):
- pass
- else:
- input_level = level
- try:
- level = getattr(logging, level.upper())
- except Exception:
- LOG.warn("invalid level '%s', using WARN", input_level)
- level = logging.WARN
- self.level = level
-
- def publish_event(self, event):
- logger = logging.getLogger(
- '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
- logger.log(self.level, event.as_string())
-
-
-class PrintHandler(ReportingHandler):
- """Print the event as a string."""
-
- def publish_event(self, event):
- print(event.as_string())
-
-
-class WebHookHandler(ReportingHandler):
- def __init__(self, endpoint, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None, timeout=None,
- retries=None):
- super(WebHookHandler, self).__init__()
-
- if any([consumer_key, token_key, token_secret, consumer_secret]):
- self.oauth_helper = url_helper.OauthUrlHelper(
- consumer_key=consumer_key, token_key=token_key,
- token_secret=token_secret, consumer_secret=consumer_secret)
- else:
- self.oauth_helper = None
- self.endpoint = endpoint
- self.timeout = timeout
- self.retries = retries
- self.ssl_details = util.fetch_ssl_details()
-
- def publish_event(self, event):
- if self.oauth_helper:
- readurl = self.oauth_helper.readurl
- else:
- readurl = url_helper.readurl
- try:
- return readurl(
- self.endpoint, data=json.dumps(event.as_dict()),
- timeout=self.timeout,
- retries=self.retries, ssl_details=self.ssl_details)
- except Exception:
- LOG.warn("failed posting event: %s" % event.as_string())
-
-
-available_handlers = DictRegistry()
-available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', PrintHandler)
-available_handlers.register_item('webhook', WebHookHandler)
diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py
deleted file mode 100644
index eba5d056..00000000
--- a/cloudinit/safeyaml.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-
-class _CustomSafeLoader(yaml.SafeLoader):
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
-_CustomSafeLoader.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- _CustomSafeLoader.construct_python_unicode)
-
-
-def load(blob):
- return(yaml.load(blob, Loader=_CustomSafeLoader))
diff --git a/cloudinit/serial.py b/cloudinit/serial.py
deleted file mode 100644
index af45c13e..00000000
--- a/cloudinit/serial.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-from __future__ import absolute_import
-
-try:
- from serial import Serial
-except ImportError:
- # For older versions of python (ie 2.6) pyserial may not exist and/or
- # work and/or be installed, so make a dummy/fake serial that blows up
- # when used...
- class Serial(object):
- def __init__(self, *args, **kwargs):
- pass
-
- @staticmethod
- def isOpen():
- return False
-
- @staticmethod
- def write(data):
- raise IOError("Unable to perform serial `write` operation,"
- " pyserial not installed.")
-
- @staticmethod
- def readline():
- raise IOError("Unable to perform serial `readline` operation,"
- " pyserial not installed.")
-
- @staticmethod
- def flush():
- raise IOError("Unable to perform serial `flush` operation,"
- " pyserial not installed.")
-
- @staticmethod
- def read(size=1):
- raise IOError("Unable to perform serial `read` operation,"
- " pyserial not installed.")
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
deleted file mode 100644
index 8c258ea1..00000000
--- a/cloudinit/settings.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-# Set and read for determining the cloud config file location
-CFG_ENV_NAME = "CLOUD_CFG"
-
-# This is expected to be a yaml formatted file
-CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
-
-# What u get if no config is provided
-CFG_BUILTIN = {
- 'datasource_list': [
- 'NoCloud',
- 'ConfigDrive',
- 'OpenNebula',
- 'Azure',
- 'AltCloud',
- 'OVF',
- 'MAAS',
- 'GCE',
- 'OpenStack',
- 'Ec2',
- 'CloudSigma',
- 'CloudStack',
- 'SmartOS',
- 'Bigstep',
- # At the end to act as a 'catch' when none of the above work...
- 'None',
- ],
- 'def_log_file': '/var/log/cloud-init.log',
- 'log_cfgs': [],
- 'syslog_fix_perms': ['syslog:adm', 'root:adm'],
- 'system_info': {
- 'paths': {
- 'cloud_dir': '/var/lib/cloud',
- 'templates_dir': '/etc/cloud/templates/',
- },
- 'distro': 'ubuntu',
- },
- 'vendor_data': {'enabled': True, 'prefix': []},
-}
-
-# Valid frequencies of handlers/modules
-PER_INSTANCE = "once-per-instance"
-PER_ALWAYS = "always"
-PER_ONCE = "once"
-
-# Used to sanity check incoming handlers/modules frequencies
-FREQUENCIES = [PER_INSTANCE, PER_ALWAYS, PER_ONCE]
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
deleted file mode 100644
index 0d95f506..00000000
--- a/cloudinit/signal_handler.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import inspect
-import signal
-import sys
-
-from six import StringIO
-
-from cloudinit import log as logging
-from cloudinit import util
-from cloudinit import version as vr
-
-LOG = logging.getLogger(__name__)
-
-
-BACK_FRAME_TRACE_DEPTH = 3
-EXIT_FOR = {
- signal.SIGINT: ('Cloud-init %(version)s received SIGINT, exiting...', 1),
- signal.SIGTERM: ('Cloud-init %(version)s received SIGTERM, exiting...', 1),
- # Can't be caught...
- # signal.SIGKILL: ('Cloud-init killed, exiting...', 1),
- signal.SIGABRT: ('Cloud-init %(version)s received SIGABRT, exiting...', 1),
-}
-
-
-def _pprint_frame(frame, depth, max_depth, contents):
- if depth > max_depth or not frame:
- return
- frame_info = inspect.getframeinfo(frame)
- prefix = " " * (depth * 2)
- contents.write("%sFilename: %s\n" % (prefix, frame_info.filename))
- contents.write("%sFunction: %s\n" % (prefix, frame_info.function))
- contents.write("%sLine number: %s\n" % (prefix, frame_info.lineno))
- _pprint_frame(frame.f_back, depth + 1, max_depth, contents)
-
-
-def _handle_exit(signum, frame):
- (msg, rc) = EXIT_FOR[signum]
- msg = msg % ({'version': vr.version()})
- contents = StringIO()
- contents.write("%s\n" % (msg))
- _pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents)
- util.multi_log(contents.getvalue(),
- console=True, stderr=False, log=LOG)
- sys.exit(rc)
-
-
-def attach_handlers():
- sigs_attached = 0
- for signum in EXIT_FOR.keys():
- signal.signal(signum, _handle_exit)
- sigs_attached += len(EXIT_FOR)
- return sigs_attached
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
deleted file mode 100644
index a3529609..00000000
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joe VLcek <JVLcek@RedHat.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-This file contains code used to gather the user data passed to an
-instance on RHEVm and vSphere.
-'''
-
-import errno
-import os
-import os.path
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.util import ProcessExecutionError
-
-LOG = logging.getLogger(__name__)
-
-# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
-
-# Shell command lists
-CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
-
-META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
-}
-
-
-def read_user_data_callback(mount_dir):
- '''
- Description:
- This callback will be applied by util.mount_cb() on the mounted
- file.
-
- Deltacloud file name contains deltacloud. Those not using
- Deltacloud but instead instrumenting the injection, could
- drop deltacloud from the file name.
-
- Input:
- mount_dir - Mount directory
-
- Returns:
- User Data
-
- '''
-
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
-
- # First try deltacloud_user_data_file. On failure try user_data_file.
- try:
- user_data = util.load_file(deltacloud_user_data_file).strip()
- except IOError:
- try:
- user_data = util.load_file(user_data_file).strip()
- except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
- return None
-
- return user_data
-
-
-class DataSourceAltCloud(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_cloud_type(self):
- '''
- Description:
- Get the type for the cloud back end this instance is running on
- by examining the string returned by reading the dmi data.
-
- Input:
- None
-
- Returns:
- One of the following strings:
- 'RHEV', 'VSPHERE' or 'UNKNOWN'
-
- '''
-
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data is not available on ARM processors
- LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
- return 'UNKNOWN'
-
- system_name = util.read_dmi_data("system-product-name")
- if not system_name:
- return 'UNKNOWN'
-
- sys_name = system_name.upper()
-
- if sys_name.startswith('RHEV'):
- return 'RHEV'
-
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
-
- return 'UNKNOWN'
-
- def get_data(self):
- '''
- Description:
- User Data is passed to the launching instance which
- is used to perform instance configuration.
-
- Cloud providers expose the user data differently.
- It is necessary to determine which cloud provider
- the current instance is running on to determine
- how to access the user data. Images built with
- image factory will contain a CLOUD_INFO_FILE which
- contains a string identifying the cloud provider.
-
- Images not built with Imagefactory will try to
- determine what the cloud provider is based on system
- information.
- '''
-
- LOG.debug('Invoked get_data()')
-
- if os.path.exists(CLOUD_INFO_FILE):
- try:
- cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
- except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return False
- else:
- cloud_type = self.get_cloud_type()
-
- LOG.debug('cloud_type: ' + str(cloud_type))
-
- if 'RHEV' in cloud_type:
- if self.user_data_rhevm():
- return True
- elif 'VSPHERE' in cloud_type:
- if self.user_data_vsphere():
- return True
- else:
- # there was no recognized alternate cloud type
- # indicating this handler should not be used.
- return False
-
- # No user data found
- util.logexc(LOG, 'Failed accessing user data.')
- return False
-
- def user_data_rhevm(self):
- '''
- RHEVM specific userdata read
-
- If on RHEV-M the user data will be contained on the
- floppy device in file <user_data_file>
- To access it:
- modprobe floppy
-
- Leverage util.mount_cb to:
- mkdir <tmp mount dir>
- mount /dev/fd0 <tmp mount dir>
- The call back passed to util.mount_cb will do:
- read <tmp mount dir>/<user_data_file>
- '''
-
- return_str = None
-
- # modprobe floppy
- try:
- cmd = CMD_PROBE_FLOPPY
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
- return False
-
- floppy_dev = '/dev/fd0'
-
- # udevadm settle for floppy device
- try:
- cmd = CMD_UDEVADM_SETTLE
- cmd.append('--exit-if-exists=' + floppy_dev)
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
-
- try:
- return_str = util.mount_cb(floppy_dev, read_user_data_callback)
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
-
- self.userdata_raw = return_str
- self.metadata = META_DATA_NOT_SUPPORTED
-
- if return_str:
- return True
- else:
- return False
-
- def user_data_vsphere(self):
- '''
- vSphere specific userdata read
-
- If on vSphere the user data will be contained on the
- cdrom device in file <user_data_file>
- To access it:
- Leverage util.mount_cb to:
- mkdir <tmp mount dir>
- mount /dev/fd0 <tmp mount dir>
- The call back passed to util.mount_cb will do:
- read <tmp mount dir>/<user_data_file>
- '''
-
- return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
- for cdrom_dev in cdrom_list:
- try:
- return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
- if return_str:
- break
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
-
- self.userdata_raw = return_str
- self.metadata = META_DATA_NOT_SUPPORTED
-
- if return_str:
- return True
- else:
- return False
-
-# Used to match classes to dependencies
-# Source DataSourceAltCloud does not really depend on networking.
-# In the future 'dsmode' like behavior can be added to offer user
-# the ability to run before networking.
-datasources = [
- (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
deleted file mode 100644
index 8c7e8673..00000000
--- a/cloudinit/sources/DataSourceAzure.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import contextlib
-import crypt
-import fnmatch
-import os
-import os.path
-import time
-import xml.etree.ElementTree as ET
-
-from xml.dom import minidom
-
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-DS_NAME = 'Azure'
-DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
-AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = [
- 'sh', '-xc',
- "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
-]
-
-BUILTIN_DS_CONFIG = {
- 'agent_command': AGENT_START,
- 'data_dir': "/var/lib/waagent",
- 'set_hostname': True,
- 'hostname_bounce': {
- 'interface': 'eth0',
- 'policy': True,
- 'command': BOUNCE_COMMAND,
- 'hostname_command': 'hostname',
- },
- 'disk_aliases': {'ephemeral0': '/dev/sdb'},
-}
-
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
- },
- 'fs_setup': [{'filesystem': 'ext4',
- 'device': 'ephemeral0.1',
- 'replace_fs': 'ntfs'}],
-}
-
-DS_CFG_PATH = ['datasource', DS_NAME]
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
-
-# The redacted password fails to meet password complexity requirements
-# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
-
-
-def get_hostname(hostname_command='hostname'):
- return util.subp(hostname_command, capture=True)[0].strip()
-
-
-def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
-
-
-@contextlib.contextmanager
-def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
- """
- Set a temporary hostname, restoring the previous hostname on exit.
-
- Will have the value of the previous hostname when used as a context
- manager, or None if the hostname was not changed.
- """
- policy = cfg['hostname_bounce']['policy']
- previous_hostname = get_hostname(hostname_command)
- if (not util.is_true(cfg.get('set_hostname')) or
- util.is_false(policy) or
- (previous_hostname == temp_hostname and policy != 'force')):
- yield None
- return
- set_hostname(temp_hostname, hostname_command)
- try:
- yield previous_hostname
- finally:
- set_hostname(previous_hostname, hostname_command)
-
-
-class DataSourceAzureNet(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
- self.cfg = {}
- self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
- hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
- with temporary_hostname(temp_hostname, self.ds_cfg,
- hostname_command=hostname_command) \
- as previous_hostname:
- if (previous_hostname is not None and
- util.is_true(self.ds_cfg.get('set_hostname'))):
- cfg = self.ds_cfg['hostname_bounce']
- try:
- perform_hostname_bounce(hostname=temp_hostname,
- cfg=cfg,
- prev_hostname=previous_hostname)
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(self.ds_cfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
- "using fingerprint from fabirc")
-
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(fp_files,))
- if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
-
- metadata = {}
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
- return metadata
-
- def get_data(self):
- # azure removes/ejects the cdrom containing the ovf-env.xml
- # file on reboot. So, in order to successfully reboot we
- # need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
-
- candidates = [self.seed_dir]
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
-
- for cdev in candidates:
- try:
- if cdev.startswith("/dev/"):
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
-
- except NonAzureDataSource:
- continue
- except BrokenAzureDataSource as exc:
- raise exc
- except util.MountFailedError:
- LOG.warn("%s was not mountable", cdev)
- continue
-
- (md, self.userdata_raw, cfg, files) = ret
- self.seed = cdev
- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
- found = cdev
-
- LOG.debug("found datasource in %s", cdev)
- break
-
- if not found:
- return False
-
- if found == ddir:
- LOG.debug("using files cached in %s", ddir)
-
- # azure / hyper-v provides random data here
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
- if seed:
- self.metadata['random_seed'] = seed
-
- # now update ds_cfg to reflect contents pass in config
- user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
-
- # walinux agent writes files world readable, but expects
- # the directory to be protected.
- write_files(ddir, files, dirmode=0o700)
-
- if self.ds_cfg['agent_command'] == '__builtin__':
- metadata_func = get_metadata_from_fabric
- else:
- metadata_func = self.get_metadata_from_agent
- try:
- fabric_data = metadata_func()
- except Exception as exc:
- LOG.info("Error communicating with Azure fabric; assume we aren't"
- " on Azure.", exc_info=True)
- return False
-
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
- self.metadata.update(fabric_data)
-
- found_ephemeral = find_fabric_formatted_ephemeral_disk()
- if found_ephemeral:
- self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
- LOG.debug("using detected ephemeral0 of %s", found_ephemeral)
-
- cc_modules_override = support_new_ephemeral(self.sys_cfg)
- if cc_modules_override:
- self.cfg['cloud_config_modules'] = cc_modules_override
-
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
-
- def get_config_obj(self):
- return self.cfg
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
-
-def count_files(mp):
- return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*'))
-
-
-def find_fabric_formatted_ephemeral_part():
- """
- Locate the first fabric formatted ephemeral device.
- """
- potential_locations = ['/dev/disk/cloud/azure_resource-part1',
- '/dev/disk/azure/resource-part1']
- device_location = None
- for potential_location in potential_locations:
- if os.path.exists(potential_location):
- device_location = potential_location
- break
- if device_location is None:
- return None
- ntfs_devices = util.find_devs_with("TYPE=ntfs")
- real_device = os.path.realpath(device_location)
- if real_device in ntfs_devices:
- return device_location
- return None
-
-
-def find_fabric_formatted_ephemeral_disk():
- """
- Get the ephemeral disk.
- """
- part_dev = find_fabric_formatted_ephemeral_part()
- if part_dev:
- return part_dev.split('-')[0]
- return None
-
-
-def support_new_ephemeral(cfg):
- """
- Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device
- may be presented as a fresh device, or not.
-
- Since the knowledge of when a disk is supposed to be plowed under is
- specific to Windows Azure, the logic resides here in the datasource. When a
- new ephemeral device is detected, cloud-init overrides the default
- frequency for both disk-setup and mounts for the current boot only.
- """
- device = find_fabric_formatted_ephemeral_part()
- if not device:
- LOG.debug("no default fabric formated ephemeral0.1 found")
- return None
- LOG.debug("fabric formated ephemeral0.1 device at %s", device)
-
- file_count = 0
- try:
- file_count = util.mount_cb(device, count_files)
- except Exception:
- return None
- LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count)
-
- if file_count >= 1:
- LOG.debug("fabric prepared ephemeral0.1 will be preserved")
- return None
- else:
- # if device was already mounted, then we need to unmount it
- # race conditions could allow for a check-then-unmount
- # to have a false positive. so just unmount and then check.
- try:
- util.subp(['umount', device])
- except util.ProcessExecutionError as e:
- if device in util.mounts():
- LOG.warn("Failed to unmount %s, will not reformat.", device)
- LOG.debug("Failed umount: %s", e)
- return None
-
- LOG.debug("cloud-init will format ephemeral0.1 this boot.")
- LOG.debug("setting disk_setup and mounts modules 'always' for this boot")
-
- cc_modules = cfg.get('cloud_config_modules')
- if not cc_modules:
- return None
-
- mod_list = []
- for mod in cc_modules:
- if mod in ("disk_setup", "mounts"):
- mod_list.append([mod, PER_ALWAYS])
- LOG.debug("set module '%s' to 'always' for this boot", mod)
- else:
- mod_list.append(mod)
- return mod_list
-
-
-def perform_hostname_bounce(hostname, cfg, prev_hostname):
- # set the hostname to 'hostname' if it is not already set to that.
- # then, if policy is not off, bounce the interface using command
- command = cfg['command']
- interface = cfg['interface']
- policy = cfg['policy']
-
- msg = ("hostname=%s policy=%s interface=%s" %
- (hostname, policy, interface))
- env = os.environ.copy()
- env['interface'] = interface
- env['hostname'] = hostname
- env['old_hostname'] = prev_hostname
-
- if command == "builtin":
- command = BOUNCE_COMMAND
-
- LOG.debug("pubhname: publishing hostname [%s]", msg)
- shell = not isinstance(command, (list, tuple))
- # capture=False, see comments in bug 1202758 and bug 1206164.
- util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
-
-
-def crtfile_to_pubkey(fname, data=None):
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
- capture=True, data=data)
- return out.rstrip()
-
-
-def pubkeys_from_crt_files(flist):
- pubkeys = []
- errors = []
- for fname in flist:
- try:
- pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
- errors.append(fname)
-
- if errors:
- LOG.warn("failed to convert the crt files to pubkey: %s", errors)
-
- return pubkeys
-
-
-def wait_for_files(flist, maxwait=60, naplen=.5):
- need = set(flist)
- waited = 0
- while waited < maxwait:
- need -= set([f for f in need if os.path.exists(f)])
- if len(need) == 0:
- return []
- time.sleep(naplen)
- waited += naplen
- return need
-
-
-def write_files(datadir, files, dirmode=None):
-
- def _redact_password(cnt, fname):
- """Azure provides the UserPassword in plain text. So we redact it"""
- try:
- root = ET.fromstring(cnt)
- for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
- elem.text = DEF_PASSWD_REDACTION
- return ET.tostring(root)
- except Exception:
- LOG.critical("failed to redact userpassword in %s", fname)
- return cnt
-
- if not datadir:
- return
- if not files:
- files = {}
- util.ensure_dir(datadir, dirmode)
- for (name, content) in files.items():
- fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
- content = _redact_password(content, fname)
- util.write_file(filename=fname, content=content, mode=0o600)
-
-
-def invoke_agent(cmd):
- # this is a function itself to simplify patching it for test
- if cmd:
- LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
- else:
- LOG.debug("not invoking agent")
-
-
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-def load_azure_ovf_pubkeys(sshnode):
- # This parses a 'SSH' node formatted like below, and returns
- # an array of dicts.
- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
- # 'path': 'where/to/go'}]
- #
- # <SSH><PublicKeys>
- # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
- # ...
- # </PublicKeys></SSH>
- results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
- if len(results) == 0:
- return []
- if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
-
- pubkeys_node = results[0]
- pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
-
- if len(pubkeys) == 0:
- return []
-
- found = []
- text_node = minidom.Document.TEXT_NODE
-
- for pk_node in pubkeys:
- if not pk_node.hasChildNodes():
- continue
-
- cur = {'fingerprint': "", 'path': "", 'value': ""}
- for child in pk_node.childNodes:
- if child.nodeType == text_node or not child.localName:
- continue
-
- name = child.localName.lower()
-
- if name not in cur.keys():
- continue
-
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
- continue
-
- cur[name] = child.childNodes[0].wholeText.strip()
- found.append(cur)
-
- return found
-
-
-def read_azure_ovf(contents):
- try:
- dom = minidom.parseString(contents)
- except Exception as e:
- raise BrokenAzureDataSource("invalid xml: %s" % e)
-
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
-
- if len(results) == 0:
- raise NonAzureDataSource("No ProvisioningSection")
- if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
- provSection = results[0]
-
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
-
- if len(results) == 0:
- raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
- if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- ("LinuxProvisioningConfigurationSet",
- len(results)))
- lpcs = lpcs_nodes[0]
-
- if not lpcs.hasChildNodes():
- raise BrokenAzureDataSource("no child nodes of configuration set")
-
- md_props = 'seedfrom'
- md = {'azure_data': {}}
- cfg = {}
- ud = ""
- password = None
- username = None
-
- for child in lpcs.childNodes:
- if child.nodeType == dom.TEXT_NODE or not child.localName:
- continue
-
- name = child.localName.lower()
-
- simple = False
- value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
- simple = True
- value = child.childNodes[0].wholeText
-
- attrs = dict([(k, v) for k, v in child.attributes.items()])
-
- # we accept either UserData or CustomData. If both are present
- # then behavior is undefined.
- if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
- else:
- ud = value
- elif name == "username":
- username = value
- elif name == "userpassword":
- password = value
- elif name == "hostname":
- md['local-hostname'] = value
- elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
- else:
- dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
- elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
- elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
- elif simple:
- if name in md_props:
- md[name] = value
- else:
- md['azure_data'][name] = value
-
- defuser = {}
- if username:
- defuser['name'] = username
- if password and DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = encrypt_pass(password)
- defuser['lock_passwd'] = False
-
- if defuser:
- cfg['system_info'] = {'default_user': defuser}
-
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
-
- return (md, ud, cfg)
-
-
-def encrypt_pass(password, salt_id="$6$"):
- return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
-
-
-def list_possible_azure_ds_devs():
- # return a sorted list of devices that might have a azure datasource
- devlist = []
- for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
-
-
-def load_azure_ds_dir(source_dir):
- ovf_file = os.path.join(source_dir, "ovf-env.xml")
-
- if not os.path.isfile(ovf_file):
- raise NonAzureDataSource("No ovf-env file found")
-
- with open(ovf_file, "rb") as fp:
- contents = fp.read()
-
- md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
-
-
-class BrokenAzureDataSource(Exception):
- pass
-
-
-class NonAzureDataSource(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
deleted file mode 100644
index f80956a5..00000000
--- a/cloudinit/sources/DataSourceBigstep.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Copyright (C) 2015-2016 Bigstep Cloud Ltd.
-#
-# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com>
-#
-
-import errno
-import json
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceBigstep(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = {}
- self.vendordata_raw = ""
- self.userdata_raw = ""
-
- def get_data(self, apply_filter=False):
- url = get_url_from_file()
- if url is None:
- return False
- response = url_helper.readurl(url)
- decoded = json.loads(response.contents)
- self.metadata = decoded["metadata"]
- self.vendordata_raw = decoded["vendordata_raw"]
- self.userdata_raw = decoded["userdata_raw"]
- return True
-
-
-def get_url_from_file():
- try:
- content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
- except IOError as e:
- # If the file doesn't exist, then the server probably isn't a Bigstep
- # instance; otherwise, another problem exists which needs investigation
- if e.errno == errno.ENOENT:
- return None
- else:
- raise
- return content
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
deleted file mode 100644
index d1f806d6..00000000
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 CloudSigma
-#
-# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from base64 import b64decode
-import os
-import re
-
-from cloudinit.cs_utils import Cepko
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceCloudSigma(sources.DataSource):
- """
- Uses cepko in order to gather the server context from the VM.
-
- For more information about CloudSigma's Server Context:
- http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
- """
- def __init__(self, sys_cfg, distro, paths):
- self.cepko = Cepko()
- self.ssh_public_key = ''
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
-
- def is_running_in_cloudsigma(self):
- """
- Uses dmi data to detect if this instance of cloud-init is running
- in the CloudSigma's infrastructure.
- """
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data on ARM processors
- LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
- return False
-
- LOG.debug("determining hypervisor product name via dmi data")
- sys_product_name = util.read_dmi_data("system-product-name")
- if not sys_product_name:
- LOG.debug("system-product-name not available in dmi data")
- return False
- else:
- LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
-
- LOG.warn("failed to query dmi data for system product name")
- return False
-
- def get_data(self):
- """
- Metadata is the whole server context and /meta/cloud-config is used
- as userdata.
- """
- dsmode = None
- if not self.is_running_in_cloudsigma():
- return False
-
- try:
- server_context = self.cepko.all().result
- server_meta = server_context['meta']
- except Exception:
- # TODO: check for explicit "config on", and then warn
- # but since no explicit config is available now, just debug.
- LOG.debug("CloudSigma: Unable to read from serial port")
- return False
-
- self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
- if dsmode == sources.DSMODE_DISABLED:
- return False
-
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
- self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
- self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
-
- self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
-
- return True
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- """
- Cleans up and uses the server's name if the latter is set. Otherwise
- the first part from uuid is being used.
- """
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
- else:
- return self.metadata['uuid'].split('-')[0]
-
- def get_public_ssh_keys(self):
- return [self.ssh_public_key]
-
- def get_instance_id(self):
- return self.metadata['uuid']
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceCloudSigmaNet = DataSourceCloudSigma
-
-# Used to match classes to dependencies. Since this datasource uses the serial
-# port network is not really required, so it's okay to load without it, too.
-datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)),
-]
-
-
-def get_datasource_list(depends):
- """
- Return a list of data sources that match this set of dependencies
- """
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
deleted file mode 100644
index 4de1f563..00000000
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Cosmin Luta
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2012 Gerard Dethier
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Cosmin Luta <q4break@gmail.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Gerard Dethier <g.dethier@gmail.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from socket import inet_ntoa
-from struct import pack
-import time
-
-from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class CloudStackPasswordServerClient(object):
- """
- Implements password fetching from the CloudStack password server.
-
- http://cloudstack-administration.readthedocs.org/
- en/latest/templates.html#adding-password-management-to-your-templates
- has documentation about the system. This implementation is following that
- found at
- https://github.com/shankerbalan/cloudstack-scripts/
- blob/master/cloud-set-guest-password-debian
- """
-
- def __init__(self, virtual_router_address):
- self.virtual_router_address = virtual_router_address
-
- def _do_request(self, domu_request):
- # The password server was in the past, a broken HTTP server, but is now
- # fixed. wget handles this seamlessly, so it's easier to shell out to
- # that rather than write our own handling code.
- output, _ = util.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
- return output.strip()
-
- def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
- return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
- return password
-
-
-class DataSourceCloudStack(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
- # Cloudstack has its metadata/userdata URLs located at
- # http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
- self.vr_addr = get_vr_address()
- if not self.vr_addr:
- raise RuntimeError("No virtual router found!")
- self.metadata_address = "http://%s/" % (self.vr_addr,)
- self.cfg = {}
-
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- (max_wait, timeout) = self._get_url_settings()
-
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
- start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url)
- else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
-
- return bool(url)
-
- def get_config_obj(self):
- return self.cfg
-
- def get_data(self):
- seed_ret = {}
- if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
- LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
- return True
- try:
- if not self.wait_for_metadata_service():
- return False
- start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
- password_client = CloudStackPasswordServerClient(self.vr_addr)
- try:
- set_password = password_client.get_password()
- except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
- else:
- if set_password:
- self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
- },
- }
- return True
- except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
- return False
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- @property
- def availability_zone(self):
- return self.metadata['availability-zone']
-
-
-def get_default_gateway():
- # Returns the default gateway ip address in the dotted format.
- lines = util.load_file("/proc/net/route").splitlines()
- for line in lines:
- items = line.split("\t")
- if items[1] == "00000000":
- # Found the default route, get the gateway
- gw = inet_ntoa(pack("<L", int(items[2], 16)))
- LOG.debug("Found default route, gateway is %s", gw)
- return gw
- return None
-
-
-def get_dhclient_d():
- # find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp"]
- for d in supported_dirs:
- if os.path.exists(d):
- LOG.debug("Using %s lease directory", d)
- return d
- return None
-
-
-def get_latest_lease():
- # find latest lease file
- lease_d = get_dhclient_d()
- if not lease_d:
- return None
- lease_files = os.listdir(lease_d)
- latest_mtime = -1
- latest_file = None
- for file_name in lease_files:
- if file_name.startswith("dhclient.") and \
- (file_name.endswith(".lease") or file_name.endswith(".leases")):
- abs_path = os.path.join(lease_d, file_name)
- mtime = os.path.getmtime(abs_path)
- if mtime > latest_mtime:
- latest_mtime = mtime
- latest_file = abs_path
- return latest_file
-
-
-def get_vr_address():
- # Get the address of the virtual router via dhcp leases
- # see http://bit.ly/T76eKC for documentation on the virtual router.
- # If no virtual router is detected, fallback on default gateway.
- lease_file = get_latest_lease()
- if not lease_file:
- LOG.debug("No lease file found, using default gateway")
- return get_default_gateway()
-
- latest_address = None
- with open(lease_file, "r") as fd:
- for line in fd:
- if "dhcp-server-identifier" in line:
- words = line.strip(" ;\r\n").split(" ")
- if len(words) > 2:
- dhcp = words[2]
- LOG.debug("Found DHCP identifier %s", dhcp)
- latest_address = dhcp
- if not latest_address:
- # No virtual router found, fallback on default gateway
- LOG.debug("No DHCP found, using default gateway")
- return get_default_gateway()
- return latest_address
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
deleted file mode 100644
index 91d6ff13..00000000
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.net import eni
-
-from cloudinit.sources.helpers import openstack
-
-LOG = logging.getLogger(__name__)
-
-# Various defaults/constants...
-DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
-DEFAULT_METADATA = {
- "instance-id": DEFAULT_IID,
-}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2',)
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
-
-
-class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
- self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
- self.version = None
- self.ec2_metadata = None
- self._network_config = None
- self.network_json = None
- self.network_eni = None
- self.known_macs = None
- self.files = {}
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
- mstr += "[source=%s]" % (self.source)
- return mstr
-
- def get_data(self):
- found = None
- md = {}
- results = {}
- if os.path.isdir(self.seed_dir):
- try:
- results = read_config_drive(self.seed_dir)
- found = self.seed_dir
- except openstack.NonReadable:
- util.logexc(LOG, "Failed reading config drive from %s",
- self.seed_dir)
- if not found:
- for dev in find_candidate_devs():
- try:
- # Set mtype if freebsd and turn off sync
- if dev.startswith("/dev/cd"):
- mtype = "cd9660"
- sync = False
- else:
- mtype = None
- sync = True
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype, sync=sync)
- found = dev
- except openstack.NonReadable:
- pass
- except util.MountFailedError:
- pass
- except openstack.BrokenMetadata:
- util.logexc(LOG, "Broken config drive: %s", dev)
- if found:
- break
- if not found:
- return False
-
- md = results.get('metadata', {})
- md = util.mergemanydict([md, DEFAULT_METADATA])
-
- self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
- if prev_iid != cur_iid:
- # better would be to handle this centrally, allowing
- # the datasource to do something on new instance id
- # note, networking is only rendered here if dsmode is DSMODE_PASS
- # which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
-
- # This is legacy and sneaky. If dsmode is 'pass' then do not claim
- # the datasource was used, even though we did run on_first_boot above.
- if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
- return False
-
- self.source = found
- self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
-
- vd = results.get('vendordata')
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
- except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- # network_config is an /etc/network/interfaces formated file and is
- # obsolete compared to networkdata (from network_data.json) but both
- # might be present.
- self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self.network_json is not None:
- LOG.debug("network config provided via network_json")
- self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
- elif self.network_eni is not None:
- self._network_config = eni.convert_eni_data(self.network_eni)
- LOG.debug("network config provided via converted eni data")
- else:
- LOG.debug("no network configuration available")
- return self._network_config
-
-
-def read_config_drive(source_dir):
- reader = openstack.ConfigDriveReader(source_dir)
- finders = [
- (reader.read_v2, [], {}),
- (reader.read_v1, [], {}),
- ]
- excps = []
- for (functor, args, kwargs) in finders:
- try:
- return functor(*args, **kwargs)
- except openstack.NonReadable as e:
- excps.append(e)
- raise excps[-1]
-
-
-def get_previous_iid(paths):
- # interestingly, for this purpose the "previous" instance-id is the current
- # instance-id. cloud-init hasn't moved them over yet as this datasource
- # hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
- try:
- return util.load_file(fname).rstrip("\n")
- except IOError:
- return None
-
-
-def on_first_boot(data, distro=None, network=True):
- """Performs any first-boot actions using data read from a config-drive."""
- if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
- if network:
- net_conf = data.get("network_config", '')
- if net_conf and distro:
- LOG.warn("Updating network interfaces from config drive")
- distro.apply_network(net_conf)
- write_injected_files(data.get('files'))
-
-
-def write_injected_files(files):
- if files:
- LOG.debug("Writing %s injected files", len(files))
- for (filename, content) in files.items():
- if not filename.startswith(os.sep):
- filename = os.sep + filename
- try:
- util.write_file(filename, content, mode=0o660)
- except IOError:
- util.logexc(LOG, "Failed writing file: %s", filename)
-
-
-def find_candidate_devs(probe_optical=True):
- """Return a list of devices that may contain the config drive.
-
- The returned list is sorted by search order where the first item has
- should be searched first (highest priority)
-
- config drive v1:
- Per documentation, this is "associated as the last available disk on the
- instance", and should be VFAT.
- Currently, we do not restrict search list to "last available disk"
-
- config drive v2:
- Disk should be:
- * either vfat or iso9660 formated
- * labeled with 'config-2'
- """
- # query optical drive to get it in blkid cache for 2.6 kernels
- if probe_optical:
- for device in OPTICAL_DEVICES:
- try:
- util.find_devs_with(path=device)
- except util.ProcessExecutionError:
- pass
-
- by_fstype = []
- for fs_type in FS_TYPES:
- by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
-
- by_label = []
- for label in LABEL_TYPES:
- by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
-
- # give preference to "last available disk" (vdb over vda)
- # note, this is not a perfect rendition of that.
- by_fstype.sort(reverse=True)
- by_label.sort(reverse=True)
-
- # combine list of items by putting by-label items first
- # followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
-
- # We are looking for a block device or partition with necessary label or
- # an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
- return devices
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceConfigDriveNet = DataSourceConfigDrive
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceConfigDrive, (sources.DEP_FILESYSTEM,)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
deleted file mode 100644
index 44a17a00..00000000
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Neal Shrader <neal@digitalocean.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import ec2_utils
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-import functools
-
-
-LOG = logging.getLogger(__name__)
-
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1/',
- 'mirrors_url': 'http://mirrors.digitalocean.com/'
-}
-MD_RETRIES = 0
-MD_TIMEOUT = 1
-
-
-class DataSourceDigitalOcean(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
-
- if self.ds_cfg.get('retries'):
- self.retries = self.ds_cfg['retries']
- else:
- self.retries = MD_RETRIES
-
- if self.ds_cfg.get('timeout'):
- self.timeout = self.ds_cfg['timeout']
- else:
- self.timeout = MD_TIMEOUT
-
- def get_data(self):
- caller = functools.partial(util.read_file_or_url,
- timeout=self.timeout, retries=self.retries)
-
- def mcaller(url):
- return caller(url).contents
-
- md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
- base_url=self.metadata_address,
- caller=mcaller)
-
- self.metadata = md.materialize()
-
- if self.metadata.get('id'):
- return True
- else:
- return False
-
- def get_userdata_raw(self):
- return "\n".join(self.metadata['user-data'])
-
- def get_vendordata_raw(self):
- return "\n".join(self.metadata['vendor-data'])
-
- def get_public_ssh_keys(self):
- public_keys = self.metadata['public-keys']
- if isinstance(public_keys, list):
- return public_keys
- else:
- return [public_keys]
-
- @property
- def availability_zone(self):
- return self.metadata['region']
-
- def get_instance_id(self):
- return self.metadata['id']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- return self.metadata['hostname']
-
- def get_package_mirror_info(self):
- return self.ds_cfg['mirrors_url']
-
- @property
- def launch_index(self):
- return None
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
deleted file mode 100644
index 6fe2a0bb..00000000
--- a/cloudinit/sources/DataSourceEc2.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-
-from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-DEF_MD_URL = "http://169.254.169.254"
-
-# Which version we are requesting of the ec2 metadata apis
-DEF_MD_VERSION = '2009-04-04'
-
-# Default metadata urls that will be used if none are provided
-# They will be checked for 'resolveability' and some of the
-# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
-
-
-class DataSourceEc2(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata_address = DEF_MD_URL
- self.seed_dir = os.path.join(paths.seed_dir, "ec2")
- self.api_ver = DEF_MD_VERSION
-
- def get_data(self):
- seed_ret = {}
- if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
- LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
- return True
-
- try:
- if not self.wait_for_metadata_service():
- return False
- start_time = time.time()
- self.userdata_raw = \
- ec2.get_instance_userdata(self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
- return True
- except Exception:
- util.logexc(LOG, "Failed reading from metadata address %s",
- self.metadata_address)
- return False
-
- @property
- def launch_index(self):
- if not self.metadata:
- return None
- return self.metadata.get('ami-launch-index')
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- timeout = 50
- try:
- timeout = max(0, int(mcfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- mcfg = self.ds_cfg
-
- (max_wait, timeout) = self._get_url_settings()
- if max_wait <= 0:
- return False
-
- # Remove addresses from the list that wont resolve.
- mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
- filtered = [x for x in mdurls if util.is_resolvable_url(x)]
-
- if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
-
- if len(filtered):
- mdurls = filtered
- else:
- LOG.warn("Empty metadata url list! using default list")
- mdurls = DEF_MD_URLS
-
- urls = []
- url2base = {}
- for url in mdurls:
- cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
- urls.append(cur)
- url2base[cur] = url
-
- start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url2base[url])
- else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
-
- self.metadata_address = url2base.get(url)
- return bool(url)
-
- def device_name_to_device(self, name):
- # Consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
- return None
-
- # Example:
- # 'block-device-mapping':
- # {'ami': '/dev/sda1',
- # 'ephemeral0': '/dev/sdb',
- # 'root': '/dev/sda1'}
- found = None
- bdm = self.metadata['block-device-mapping']
- for (entname, device) in bdm.items():
- if entname == name:
- found = device
- break
- # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
- if entname == "ephemeral" and name == "ephemeral0":
- found = device
-
- if found is None:
- LOG.debug("Unable to convert %s to a device", name)
- return None
-
- ofound = found
- if not found.startswith("/"):
- found = "/dev/%s" % found
-
- if os.path.exists(found):
- return found
-
- remapped = self._remap_device(os.path.basename(found))
- if remapped:
- LOG.debug("Remapped device name %s => %s", found, remapped)
- return remapped
-
- # On t1.micro, ephemeral0 will appear in block-device-mapping from
- # metadata, but it will not exist on disk (and never will)
- # at this point, we've verified that the path did not exist
- # in the special case of 'ephemeral0' return None to avoid bogus
- # fstab entry (LP: #744019)
- if name == "ephemeral0":
- return None
- return ofound
-
- @property
- def availability_zone(self):
- try:
- return self.metadata['placement']['availability-zone']
- except KeyError:
- return None
-
- @property
- def region(self):
- az = self.availability_zone
- if az is not None:
- return az[:-1]
- return None
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
deleted file mode 100644
index c660a350..00000000
--- a/cloudinit/sources/DataSourceGCE.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Vaidas Jablonskis <jablonskis@gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-from base64 import b64decode
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/'
-}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-
-
-class GoogleMetadataFetcher(object):
- headers = {'X-Google-Metadata-Request': True}
-
- def __init__(self, metadata_address):
- self.metadata_address = metadata_address
-
- def get_value(self, path, is_text):
- value = None
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=self.headers)
- except url_helper.UrlError as exc:
- msg = "url %s raised exception %s"
- LOG.debug(msg, path, exc)
- else:
- if resp.code == 200:
- if is_text:
- value = util.decode_binary(resp.contents)
- else:
- value = resp.contents
- else:
- LOG.debug("url %s returned code %s", path, resp.code)
- return value
-
-
-class DataSourceGCE(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
-
- # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
- # so we have to trim each key to remove the username part
- def _trim_key(self, public_key):
- try:
- index = public_key.index(':')
- if index > 0:
- return public_key[(index + 1):]
- except Exception:
- return public_key
-
- def get_data(self):
- # url_map: (our-key, path, required, is_text)
- url_map = [
- ('instance-id', ('instance/id',), True, True),
- ('availability-zone', ('instance/zone',), True, True),
- ('local-hostname', ('instance/hostname',), True, True),
- ('public-keys', ('project/attributes/sshKeys',
- 'instance/attributes/sshKeys'), False, True),
- ('user-data', ('instance/attributes/user-data',), False, False),
- ('user-data-encoding', ('instance/attributes/user-data-encoding',),
- False, True),
- ]
-
- # if we cannot resolve the metadata server, then no point in trying
- if not util.is_resolvable_url(self.metadata_address):
- LOG.debug("%s is not resolvable", self.metadata_address)
- return False
-
- metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
- # iterate over url_map keys to get metadata items
- running_on_gce = False
- for (mkey, paths, required, is_text) in url_map:
- value = None
- for path in paths:
- new_value = metadata_fetcher.get_value(path, is_text)
- if new_value is not None:
- value = new_value
- if value:
- running_on_gce = True
- if required and value is None:
- msg = "required key %s returned nothing. not GCE"
- if not running_on_gce:
- LOG.debug(msg, mkey)
- else:
- LOG.warn(msg, mkey)
- return False
- self.metadata[mkey] = value
-
- if self.metadata['public-keys']:
- lines = self.metadata['public-keys'].splitlines()
- self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
-
- if self.metadata['availability-zone']:
- self.metadata['availability-zone'] = self.metadata[
- 'availability-zone'].split('/')[-1]
-
- encoding = self.metadata.get('user-data-encoding')
- if encoding:
- if encoding == 'base64':
- self.metadata['user-data'] = b64decode(
- self.metadata['user-data'])
- else:
- LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
-
- return running_on_gce
-
- @property
- def launch_index(self):
- # GCE does not provide lauch_index property
- return None
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def get_public_ssh_keys(self):
- return self.metadata['public-keys']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- # GCE has long FDQN's and has asked for short hostnames
- return self.metadata['local-hostname'].split('.')[0]
-
- def get_userdata_raw(self):
- return self.metadata['user-data']
-
- @property
- def availability_zone(self):
- return self.metadata['availability-zone']
-
- @property
- def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
deleted file mode 100644
index d828f078..00000000
--- a/cloudinit/sources/DataSourceMAAS.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-import errno
-import os
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-MD_VERSION = "2012-03-01"
-
-BINARY_FIELDS = ('user-data',)
-
-
-class DataSourceMAAS(sources.DataSource):
- """
- DataSourceMAAS reads instance information from MAAS.
- Given a config metadata_url, and oauth tokens, it expects to find
- files under the root named:
- instance-id
- user-data
- hostname
- """
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
- self.oauth_helper = self._get_helper()
-
- def _get_helper(self):
- mcfg = self.ds_cfg
- # If we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return url_helper.OauthUrlHelper()
-
- return url_helper.OauthUrlHelper(
- consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
- token_secret=mcfg['token_secret'],
- consumer_secret=mcfg.get('consumer_secret'))
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [%s]" % (root, self.base_url)
-
- def get_data(self):
- mcfg = self.ds_cfg
-
- try:
- (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
- self.userdata_raw = userdata
- self.metadata = metadata
- self.base_url = self.seed_dir
- return True
- except MAASSeedDirNone:
- pass
- except MAASSeedDirMalformed as exc:
- LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
- raise
-
- # If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
- if not url:
- return False
-
- try:
- # doing this here actually has a side affect of
- # getting oauth time-fix in place. As no where else would
- # retry by default, so even if we could fix the timestamp
- # we would not.
- if not self.wait_for_metadata_service(url):
- return False
-
- self.base_url = url
-
- (userdata, metadata) = read_maas_seed_url(
- self.base_url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1)
- self.userdata_raw = userdata
- self.metadata = metadata
- return True
- except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", url)
- return False
-
- def wait_for_metadata_service(self, url):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- if timeout in mcfg:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- LOG.warn("Failed to get timeout, using %s" % timeout)
-
- starttime = time.time()
- check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
- urls = [check_url]
- url = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url)
- else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
-
- return bool(url)
-
-
-def read_maas_seed_dir(seed_d):
- """
- Return user-data and metadata for a maas seed dir in seed_d.
- Expected format of seed_d are the following files:
- * instance-id
- * local-hostname
- * user-data
- """
- if not os.path.isdir(seed_d):
- raise MAASSeedDirNone("%s: not a directory")
-
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
- for fname in files:
- try:
- md[fname] = util.load_file(os.path.join(seed_d, fname),
- decode=fname not in BINARY_FIELDS)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
-
- return check_seed_contents(md, seed_d)
-
-
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
- """
- Read the maas datasource at seed_url.
- read_file_or_url is a method that should provide an interface
- like util.read_file_or_url
-
- Expected format of seed_url is are the following files:
- * <seed_url>/<version>/meta-data/instance-id
- * <seed_url>/<version>/meta-data/local-hostname
- * <seed_url>/<version>/user-data
- """
- base_url = "%s/%s" % (seed_url, version)
- file_order = [
- 'local-hostname',
- 'instance-id',
- 'public-keys',
- 'user-data',
- ]
- files = {
- 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
- 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
- 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
- 'user-data': "%s/%s" % (base_url, 'user-data'),
- }
-
- if read_file_or_url is None:
- read_file_or_url = util.read_file_or_url
-
- md = {}
- for name in file_order:
- url = files.get(name)
- if name == 'user-data':
- item_retries = 0
- else:
- item_retries = retries
-
- try:
- ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=item_retries,
- timeout=timeout, ssl_details=ssl_details)
- if resp.ok():
- if name in BINARY_FIELDS:
- md[name] = resp.contents
- else:
- md[name] = util.decode_binary(resp.contents)
- else:
- LOG.warn(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
- except url_helper.UrlError as e:
- if e.code != 404:
- raise
- return check_seed_contents(md, seed_url)
-
-
-def check_seed_contents(content, seed):
- """Validate if content is Is the content a dict that is valid as a
- return for a datasource.
- Either return a (userdata, metadata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
- """
- md_required = ('instance-id', 'local-hostname')
- if len(content) == 0:
- raise MAASSeedDirNone("%s: no data files found" % seed)
-
- found = list(content.keys())
- missing = [k for k in md_required if k not in found]
- if len(missing):
- raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
-
- userdata = content.get('user-data', b"")
- md = {}
- for (key, val) in content.items():
- if key == 'user-data':
- continue
- md[key] = val
-
- return (userdata, md)
-
-
-class MAASSeedDirNone(Exception):
- pass
-
-
-class MAASSeedDirMalformed(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
-
-
-if __name__ == "__main__":
- def main():
- """
- Call with single argument of directory or http or https url.
- If url is given additional arguments are allowed, which will be
- interpreted as consumer_key, token_key, token_secret, consumer_secret
- """
- import argparse
- import pprint
-
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
-
- subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
-
- args = parser.parse_args()
-
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
-
- if args.config:
- cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
- for key in creds.keys():
- if key in cfg and creds[key] is None:
- creds[key] = cfg[key]
-
- oauth_helper = url_helper.OauthUrlHelper(**creds)
-
- def geturl(url):
- # the retry is to ensure that oauth timestamp gets fixed
- return oauth_helper.readurl(url, retries=1).contents
-
- def printurl(url):
- print("== %s ==\n%s\n" % (url, geturl(url).decode()))
-
- def crawl(url):
- if url.endswith("/"):
- for line in geturl(url).decode().splitlines():
- if line.endswith("/"):
- crawl("%s%s" % (url, line))
- elif line == "meta-data":
- # meta-data is a dir, it *should* end in a /
- crawl("%s%s" % (url, "meta-data/"))
- else:
- printurl("%s%s" % (url, line))
- else:
- printurl(url)
-
- if args.subcmd == "check-seed":
- readurl = oauth_helper.readurl
- if args.url[0] == "/" or args.url.startswith("file://"):
- readurl = None
- (userdata, metadata) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
- print("=== userdata ===")
- print(userdata.decode())
- print("=== metadata ===")
- pprint.pprint(metadata)
-
- elif args.subcmd == "get":
- printurl(args.url)
-
- elif args.subcmd == "crawl":
- if not args.url.endswith("/"):
- args.url = "%s/" % args.url
- crawl(args.url)
-
- main()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
deleted file mode 100644
index cdc9eef5..00000000
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import os
-
-from cloudinit import log as logging
-from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNoCloud(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
- self.seed_dir = None
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
-
- def get_data(self):
- defaults = {
- "instance-id": "nocloud",
- "dsmode": self.dsmode,
- }
-
- found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': {}}
-
- try:
- # Parse the kernel command line, getting data passed in
- md = {}
- if load_cmdline_data(md):
- found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
- except Exception:
- util.logexc(LOG, "Unable to parse command line data")
- return False
-
- # Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
-
- for path in self.seed_dirs:
- try:
- seeded = util.pathprefix2dict(path, **pp2d_kwargs)
- found.append(path)
- LOG.debug("Using seeded data from %s", path)
- mydata = _merge_new_seed(mydata, seeded)
- break
- except ValueError as e:
- pass
-
- # If the datasource config had a 'seedfrom' entry, then that takes
- # precedence over a 'seedfrom' that was found in a filesystem
- # but not over external media
- if self.ds_cfg.get('seedfrom'):
- found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
-
- # fields appropriately named can also just come from the datasource
- # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
- mydata = _merge_new_seed(mydata, self.ds_cfg)
- found.append("ds_config")
-
- def _pp2d_callback(mp, data):
- return util.pathprefix2dict(mp, **data)
-
- label = self.ds_cfg.get('fs_label', "cidata")
- if label is not None:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
-
- label_list = util.find_devs_with("LABEL=%s" % label)
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
-
- for dev in devlist:
- try:
- LOG.debug("Attempting to use data from %s", dev)
-
- try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
- except ValueError as e:
- if dev in label_list:
- LOG.warn("device %s with label=%s not a"
- "valid seed.", dev, label)
- continue
-
- mydata = _merge_new_seed(mydata, seeded)
-
- LOG.debug("Using data from %s", dev)
- found.append(dev)
- break
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
-
- # There was no indication on kernel cmdline or data
- # in the seeddir suggesting this handler should be used.
- if len(found) == 0:
- return False
-
- # The special argument "seedfrom" indicates we should
- # attempt to seed the userdata / metadata from its value
- # its primarily value is in allowing the user to type less
- # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
- seedfound = False
- for proto in self.supported_seed_starts:
- if seedfrom.startswith(proto):
- seedfound = proto
- break
- if not seedfound:
- LOG.debug("Seed from %s not supported by %s", seedfrom, self)
- return False
-
- # This could throw errors, but the user told us to do it
- # so if errors are raised, let them raise
- (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- LOG.debug("Using seeded cache data from %s", seedfrom)
-
- # Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- found.append(seedfrom)
-
- # Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
-
- self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
- return False
-
- self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- # we check kernel command line or files.
- current = self.get_instance_id()
- if not current:
- return None
-
- # LP: #1568150 need getattr in the case that an old class object
- # has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
- quick_id = _quick_read_instance_id(dirs=dirs)
- if not quick_id:
- return None
- return quick_id == current
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self._network_eni is not None:
- self._network_config = eni.convert_eni_data(self._network_eni)
- return self._network_config
-
-
-def _quick_read_instance_id(dirs=None):
- if dirs is None:
- dirs = []
-
- iid_key = 'instance-id'
- fill = {}
- if load_cmdline_data(fill) and iid_key in fill:
- return fill[iid_key]
-
- for d in dirs:
- if d is None:
- continue
- try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
- if iid_key in md:
- return md[iid_key]
- except ValueError:
- pass
-
- return None
-
-
-def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
- for idstr, dsmode in pairs:
- if parse_cmdline_data(idstr, fill, cmdline):
- # if dsmode was explicitly in the commanad line, then
- # prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
- return True
- return False
-
-
-# Returns true or false indicating if cmdline indicated
-# that this module should be used. Updates dictionary 'fill'
-# with data that was found.
-# Example cmdline:
-# root=LABEL=uec-rootfs ro ds=nocloud
-def parse_cmdline_data(ds_id, fill, cmdline=None):
- if cmdline is None:
- cmdline = util.get_cmdline()
- cmdline = " %s " % cmdline
-
- if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline):
- return False
-
- argline = ""
- # cmdline can contain:
- # ds=nocloud[;key=val;key=val]
- for tok in cmdline.split():
- if tok.startswith(ds_id):
- argline = tok.split("=", 1)
-
- # argline array is now 'nocloud' followed optionally by
- # a ';' and then key=value pairs also terminated with ';'
- tmp = argline[1].split(";")
- if len(tmp) > 1:
- kvpairs = tmp[1:]
- else:
- kvpairs = ()
-
- # short2long mapping to save cmdline typing
- s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"}
- for item in kvpairs:
- if item == "":
- continue
- try:
- (k, v) = item.split("=", 1)
- except Exception:
- k = item
- v = None
- if k in s2l:
- k = s2l[k]
- fill[k] = v
-
- return True
-
-
-def _merge_new_seed(cur, seeded):
- ret = cur.copy()
-
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
-
- if seeded.get('network-config'):
- ret['network-config'] = util.load_yaml(seeded['network-config'])
-
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
- return ret
-
-
-class DataSourceNoCloudNet(DataSourceNoCloud):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
deleted file mode 100644
index d1a62b2a..00000000
--- a/cloudinit/sources/DataSourceNone.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import log as logging
-from cloudinit import sources
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNone(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
- self.metadata = {}
- self.userdata_raw = ''
-
- def get_data(self):
- # If the datasource config has any provided 'fallback'
- # userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
- return True
-
- def get_instance_id(self):
- return 'iid-datasource-none'
-
- @property
- def is_disconnected(self):
- return True
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- (DataSourceNone, []),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
deleted file mode 100644
index 43347cfb..00000000
--- a/cloudinit/sources/DataSourceOVF.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from xml.dom import minidom
-
-import base64
-import os
-import re
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
- enable_nics,
- get_nics_to_enable,
- set_customization_status
-)
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceOVF(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
- self.environment = None
- self.cfg = {}
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_data(self):
- found = []
- md = {}
- ud = ""
- vmwarePlatformFound = False
- vmwareImcConfigFilePath = ''
-
- defaults = {
- "instance-id": "iid-dsovf",
- }
-
- (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
-
- system_type = util.read_dmi_data("system-product-name")
- if system_type is None:
- LOG.debug("No system-product-name found")
-
- if seedfile:
- # Found a seed dir
- seed = os.path.join(self.paths.seed_dir, seedfile)
- (md, ud, cfg) = read_ovf_environment(contents)
- self.environment = contents
- found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
- LOG.debug("VMware Virtualization Platform found")
- if not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True):
- deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
- "libdeployPkgPlugin.so")
- if not deployPkgPluginPath:
- deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",
- "libdeployPkgPlugin.so")
- if deployPkgPluginPath:
- # When the VM is powered on, the "VMware Tools" daemon
- # copies the customization specification file to
- # /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory.
- vmwareImcConfigFilePath = util.log_time(
- logfunc=LOG.debug,
- msg="waiting for configuration file",
- func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg"))
-
- if vmwareImcConfigFilePath:
- LOG.debug("Found VMware DeployPkg Config File at %s" %
- vmwareImcConfigFilePath)
- else:
- LOG.debug("Did not find VMware DeployPkg Config File Path")
- else:
- LOG.debug("Customization for VMware platform is disabled.")
-
- if vmwareImcConfigFilePath:
- nics = ""
- try:
- cf = ConfigFile(vmwareImcConfigFilePath)
- conf = Config(cf)
- (md, ud, cfg) = read_vmware_imc(conf)
- dirpath = os.path.dirname(vmwareImcConfigFilePath)
- nics = get_nics_to_enable(dirpath)
- except Exception as e:
- LOG.debug("Error parsing the customization Config File")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- enable_nics(nics)
- return False
- finally:
- util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
-
- try:
- LOG.debug("Applying the Network customization")
- nicConfigurator = NicConfigurator(conf.nics)
- nicConfigurator.configure()
- except Exception as e:
- LOG.debug("Error applying the Network Configuration")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
- enable_nics(nics)
- return False
-
- vmwarePlatformFound = True
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
- enable_nics(nics)
- else:
- np = {'iso': transport_iso9660,
- 'vmware-guestd': transport_vmware_guestd, }
- name = None
- for (name, transfunc) in np.items():
- (contents, _dev, _fname) = transfunc()
- if contents:
- break
- if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
- self.environment = contents
- found.append(name)
-
- # There was no OVF transports found
- if len(found) == 0 and not vmwarePlatformFound:
- return False
-
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
- seedfound = False
- for proto in self.supported_seed_starts:
- if seedfrom.startswith(proto):
- seedfound = proto
- break
- if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
- return False
-
- (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- LOG.debug("Using seeded cache data from %s", seedfrom)
-
- md = util.mergemanydict([md, md_seed])
- found.append(seedfrom)
-
- # Now that we have exhausted any other places merge in the defaults
- md = util.mergemanydict([md, defaults])
-
- self.seed = ",".join(found)
- self.metadata = md
- self.userdata_raw = ud
- self.cfg = cfg
- return True
-
- def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
- return []
- pks = self.metadata['public-keys']
- if isinstance(pks, (list)):
- return pks
- else:
- return [pks]
-
- # The data sources' config_obj is a cloud-config formatted
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return self.cfg
-
-
-class DataSourceOVFNet(DataSourceOVF):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
-
-
-def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
- waited = 0
-
- while waited < maxwait:
- fileFullPath = search_file(dirpath, filename)
- if fileFullPath:
- return fileFullPath
- time.sleep(naplen)
- waited += naplen
- return None
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_vmware_imc(config):
- md = {}
- cfg = {}
- ud = ""
- if config.host_name:
- if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
- else:
- md['local-hostname'] = config.host_name
-
- if config.timezone:
- cfg['timezone'] = config.timezone
-
- return (md, ud, cfg)
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_ovf_environment(contents):
- props = get_properties(contents)
- md = {}
- cfg = {}
- ud = ""
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for (prop, val) in props.items():
- if prop == 'hostname':
- prop = "local-hostname"
- if prop in md_props:
- md[prop] = val
- elif prop in cfg_props:
- cfg[prop] = val
- elif prop == "user-data":
- try:
- ud = base64.decodestring(val)
- except Exception:
- ud = val
- return (md, ud, cfg)
-
-
-# Returns tuple of filename (in 'dirname', and the contents of the file)
-# on "not found", returns 'None' for filename and False for contents
-def get_ovf_env(dirname):
- env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
- for fname in env_names:
- full_fn = os.path.join(dirname, fname)
- if os.path.isfile(full_fn):
- try:
- contents = util.load_file(full_fn)
- return (fname, contents)
- except Exception:
- util.logexc(LOG, "Failed loading ovf file %s", full_fn)
- return (None, False)
-
-
-# Transport functions take no input and return
-# a 3 tuple of content, path, filename
-def transport_iso9660(require_iso=True):
-
- # default_regex matches values in
- # /lib/udev/rules.d/60-cdrom_id.rules
- # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
- envname = "CLOUD_INIT_CDROM_DEV_REGEX"
- default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
-
- devname_regex = os.environ.get(envname, default_regex)
- cdmatch = re.compile(devname_regex)
-
- # Go through mounts to see if it was already mounted
- mounts = util.mounts()
- for (dev, info) in mounts.items():
- fstype = info['fstype']
- if fstype != "iso9660" and require_iso:
- continue
- if cdmatch.match(dev[5:]) is None: # take off '/dev/'
- continue
- mp = info['mountpoint']
- (fname, contents) = get_ovf_env(mp)
- if contents is not False:
- return (contents, dev, fname)
-
- if require_iso:
- mtype = "iso9660"
- else:
- mtype = None
-
- devs = os.listdir("/dev/")
- devs.sort()
- for dev in devs:
- fullp = os.path.join("/dev/", dev)
-
- if (fullp in mounts or
- not cdmatch.match(dev) or os.path.isdir(fullp)):
- continue
-
- try:
- # See if we can read anything at all...??
- util.peek_file(fullp, 512)
- except IOError:
- continue
-
- try:
- (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
- except util.MountFailedError:
- LOG.debug("%s not mountable as iso9660" % fullp)
- continue
-
- if contents is not False:
- return (contents, fullp, fname)
-
- return (False, None, None)
-
-
-def transport_vmware_guestd():
- # http://blogs.vmware.com/vapp/2009/07/ \
- # selfconfiguration-and-the-ovf-environment.html
- # try:
- # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv']
- # (out, err) = subp(cmd)
- # return(out, 'guestinfo.ovfEnv', 'vmware-guestd')
- # except:
- # # would need to error check here and see why this failed
- # # to know if log/error should be raised
- # return(False, None, None)
- return (False, None, None)
-
-
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-def get_properties(contents):
-
- dom = minidom.parseString(contents)
- if dom.documentElement.localName != "Environment":
- raise XmlError("No Environment Node")
-
- if not dom.documentElement.hasChildNodes():
- raise XmlError("No Child Nodes")
-
- envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
-
- # could also check here that elem.namespaceURI ==
- # "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
-
- if len(propSections) == 0:
- raise XmlError("No 'PropertySection's")
-
- props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
-
- for elem in propElems:
- key = elem.attributes.getNamedItemNS(envNsURI, "key").value
- val = elem.attributes.getNamedItemNS(envNsURI, "value").value
- props[key] = val
-
- return props
-
-
-def search_file(dirpath, filename):
- if not dirpath or not filename:
- return None
-
- for root, dirs, files in os.walk(dirpath):
- if filename in files:
- return os.path.join(root, filename)
-
- return None
-
-
-class XmlError(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
- (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-)
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
deleted file mode 100644
index 7b3a76b9..00000000
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2012-2013 CERIT Scientific Cloud
-# Copyright (C) 2012-2013 OpenNebula.org
-# Copyright (C) 2014 Consejo Superior de Investigaciones Cientificas
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Vlastimil Holer <xholer@mail.muni.cz>
-# Author: Javier Fontan <jfontan@opennebula.org>
-# Author: Enol Fernandez <enolfc@ifca.unican.es>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import re
-import string
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
-CONTEXT_DISK_FILES = ["context.sh"]
-
-
-class DataSourceOpenNebula(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
-
- def get_data(self):
- defaults = {"instance-id": DEFAULT_IID}
- results = None
- seed = None
-
- # decide parseuser for context.sh shell reader
- parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
-
- candidates = [self.seed_dir]
- candidates.extend(find_candidate_devs())
- for cdev in candidates:
- try:
- if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
- elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
- except NonContextDiskDir:
- continue
- except BrokenContextDiskDir as exc:
- raise exc
- except util.MountFailedError:
- LOG.warn("%s was not mountable" % cdev)
-
- if results:
- seed = cdev
- LOG.debug("found datasource in %s", cdev)
- break
-
- if not seed:
- return False
-
- # merge fetched metadata with datasource defaults
- md = results['metadata']
- md = util.mergemanydict([md, defaults])
-
- # check for valid user specified dsmode
- self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- self.seed = seed
- self.network_eni = results.get("network_config")
- self.metadata = md
- self.userdata_raw = results.get('userdata')
- return True
-
- def get_hostname(self, fqdn=False, resolve_ip=None):
- if resolve_ip is None:
- if self.dsmode == sources.DSMODE_NETWORK:
- resolve_ip = True
- else:
- resolve_ip = False
- return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
-
-
-class NonContextDiskDir(Exception):
- pass
-
-
-class BrokenContextDiskDir(Exception):
- pass
-
-
-class OpenNebulaNetwork(object):
- REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
-
- def __init__(self, ip, context):
- self.ip = ip
- self.context = context
- self.ifaces = self.get_ifaces()
-
- def get_ifaces(self):
- return self.REG_DEV_MAC.findall(self.ip)
-
- def mac2ip(self, mac):
- components = mac.split(':')[2:]
- return [str(int(c, 16)) for c in components]
-
- def get_ip(self, dev, components):
- var_name = dev.upper() + '_IP'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components)
-
- def get_mask(self, dev):
- var_name = dev.upper() + '_MASK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '255.255.255.0'
-
- def get_network(self, dev, components):
- var_name = dev.upper() + '_NETWORK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components[:-1]) + '.0'
-
- def get_gateway(self, dev):
- var_name = dev.upper() + '_GATEWAY'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def get_dns(self, dev):
- var_name = dev.upper() + '_DNS'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def get_domain(self, dev):
- var_name = dev.upper() + '_DOMAIN'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def gen_conf(self):
- global_dns = []
- if 'DNS' in self.context:
- global_dns.append(self.context['DNS'])
-
- conf = []
- conf.append('auto lo')
- conf.append('iface lo inet loopback')
- conf.append('')
-
- for i in self.ifaces:
- dev = i[0]
- mac = i[1]
- ip_components = self.mac2ip(mac)
-
- conf.append('auto ' + dev)
- conf.append('iface ' + dev + ' inet static')
- conf.append(' address ' + self.get_ip(dev, ip_components))
- conf.append(' network ' + self.get_network(dev, ip_components))
- conf.append(' netmask ' + self.get_mask(dev))
-
- gateway = self.get_gateway(dev)
- if gateway:
- conf.append(' gateway ' + gateway)
-
- domain = self.get_domain(dev)
- if domain:
- conf.append(' dns-search ' + domain)
-
- # add global DNS servers to all interfaces
- dns = self.get_dns(dev)
- if global_dns or dns:
- all_dns = global_dns
- if dns:
- all_dns.append(dns)
- conf.append(' dns-nameservers ' + ' '.join(all_dns))
-
- conf.append('')
-
- return "\n".join(conf)
-
-
-def find_candidate_devs():
- """
- Return a list of devices that may contain the context disk.
- """
- combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
- devs = util.find_devs_with(f)
- devs.sort()
- for d in devs:
- if d not in combined:
- combined.append(d)
-
- return combined
-
-
-def switch_user_cmd(user):
- return ['sudo', '-u', user]
-
-
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
-
- if isinstance(bash, str):
- bash = [bash]
- elif bash is None:
- bash = ['bash', '-e']
-
- if switch_user_cb is None:
- switch_user_cb = switch_user_cmd
-
- # allvars expands to all existing variables by using '${!x*}' notation
- # where x is lower or upper case letters or '_'
- allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"]
-
- keylist_in = keylist
- if keylist is None:
- keylist = allvars
- keylist_in = []
-
- setup = '\n'.join(('__v="";', '',))
-
- def varprinter(vlist):
- # output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
-
- # the rendered 'bcmd' is bash syntax that does
- # setup: declare variables we use (so they show up in 'all')
- # varprinter(allvars): print all variables known at beginning
- # content: execute the provided content
- # varprinter(keylist): print all variables known after content
- #
- # output is then a null terminated array of:
- # literal '_start_'
- # key=value (for each preset variable)
- # literal '_start_'
- # key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
-
- cmd = []
- if asuser is not None:
- cmd = switch_user_cb(asuser)
-
- cmd.extend(bash)
-
- (output, _error) = util.subp(cmd, data=bcmd)
-
- # exclude vars in bash that change on their own or that we used
- excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
- preset = {}
- ret = {}
- target = None
- output = output[0:-1] # remove trailing null
-
- # go through output. First _start_ is for 'preset', second for 'target'.
- # Add to target only things were changed and not in volitile
- for line in output.split("\x00"):
- try:
- (key, val) = line.split("=", 1)
- if target is preset:
- target[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
- ret[key] = val
- except ValueError:
- if line != "_start_":
- raise
- if target is None:
- target = preset
- elif target is preset:
- target = ret
-
- return ret
-
-
-def read_context_disk_dir(source_dir, asuser=None):
- """
- read_context_disk_dir(source_dir):
- read source_dir and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a NonContextDiskDir
- """
- found = {}
- for af in CONTEXT_DISK_FILES:
- fn = os.path.join(source_dir, af)
- if os.path.isfile(fn):
- found[af] = fn
-
- if not found:
- raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
-
- context = {}
- results = {'userdata': None, 'metadata': {}}
-
- if "context.sh" in found:
- if asuser is not None:
- try:
- pwd.getpwnam(asuser)
- except KeyError as e:
- raise BrokenContextDiskDir("configured user '%s' "
- "does not exist", asuser)
- try:
- path = os.path.join(source_dir, 'context.sh')
- content = util.load_file(path)
- context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
- raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
- except IOError as e:
- raise NonContextDiskDir("Error reading context.sh: %s" % (e))
- else:
- raise NonContextDiskDir("Missing context.sh")
-
- if not context:
- return results
-
- results['metadata'] = context
-
- # process single or multiple SSH keys
- ssh_key_var = None
- if "SSH_KEY" in context:
- ssh_key_var = "SSH_KEY"
- elif "SSH_PUBLIC_KEY" in context:
- ssh_key_var = "SSH_PUBLIC_KEY"
-
- if ssh_key_var:
- lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not
- l.startswith("#")]
-
- # custom hostname -- try hostname or leave cloud-init
- # itself create hostname from IP address later
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
- if k in context:
- results['metadata']['local-hostname'] = context[k]
- break
-
- # raw user data
- if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
- elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
-
- # b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
- if encoding == "base64":
- try:
- results['userdata'] = util.b64d(results['userdata'])
- except TypeError:
- LOG.warn("Failed base64 decoding of userdata")
-
- # generate static /etc/network/interfaces
- # only if there are any required context variables
- # http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context:
- if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['/sbin/ip', 'link'])
- net = OpenNebulaNetwork(out, context)
- results['network-interfaces'] = net.gen_conf()
- break
-
- return results
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceOpenNebulaNet = DataSourceOpenNebula
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
deleted file mode 100644
index c06d17f3..00000000
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.sources.helpers import openstack
-
-LOG = logging.getLogger(__name__)
-
-# Various defaults/constants...
-DEF_MD_URL = "http://169.254.169.254"
-DEFAULT_IID = "iid-dsopenstack"
-DEFAULT_METADATA = {
- "instance-id": DEFAULT_IID,
-}
-
-
-class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
- self.metadata_address = None
- self.ssl_details = util.fetch_ssl_details(self.paths)
- self.version = None
- self.files = {}
- self.ec2_metadata = None
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
- return mstr
-
- def _get_url_settings(self):
- # TODO(harlowja): this is shared with ec2 datasource, we should just
- # move it to a shared location instead...
- # Note: the defaults here are different though.
-
- # max_wait < 0 indicates do not wait
- max_wait = -1
- timeout = 10
-
- try:
- max_wait = int(self.ds_cfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- try:
- timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
- filtered = [x for x in urls if util.is_resolvable_url(x)]
- if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
- if len(filtered):
- urls = filtered
- else:
- LOG.warn("Empty metadata url list! using default list")
- urls = [DEF_MD_URL]
-
- md_urls = []
- url2base = {}
- for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
- md_urls.append(md_url)
- url2base[md_url] = url
-
- (max_wait, timeout) = self._get_url_settings()
- start_time = time.time()
- avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
- timeout=timeout)
- if avail_url:
- LOG.debug("Using metadata source: '%s'", url2base[avail_url])
- else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
-
- self.metadata_address = url2base.get(avail_url)
- return bool(avail_url)
-
- def get_data(self, retries=5, timeout=5):
- try:
- if not self.wait_for_metadata_service():
- return False
- except IOError:
- return False
-
- try:
- results = util.log_time(LOG.debug,
- 'Crawl of openstack metadata service',
- read_metadata_service,
- args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': retries,
- 'timeout': timeout})
- except openstack.NonReadable:
- return False
- except (openstack.BrokenMetadata, IOError):
- util.logexc(LOG, "Broken metadata address %s",
- self.metadata_address)
- return False
-
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- md = results.get('metadata', {})
- md = util.mergemanydict([md, DEFAULT_METADATA])
- self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
-
- vd = results.get('vendordata')
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
- except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
-
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
- return reader.read_v2()
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
deleted file mode 100644
index ccc86883..00000000
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ /dev/null
@@ -1,781 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Datasource for provisioning on SmartOS. This works on Joyent
-# and public/private Clouds using SmartOS.
-#
-# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
-# The meta-data is transmitted via key/value pairs made by
-# requests on the console. For example, to get the hostname, you
-# would send "GET hostname" on /dev/ttyS1.
-# For Linux Guests running in LX-Brand Zones on SmartOS hosts
-# a socket (/native/.zonecontrol/metadata.sock) is used instead
-# of a serial console.
-#
-# Certain behavior is defined by the DataDictionary
-# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
-# Comments with "@datadictionary" are snippets of the definition
-
-import base64
-import binascii
-import json
-import os
-import random
-import re
-import socket
-
-from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-SMARTOS_ATTRIB_MAP = {
- # Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
-}
-
-SMARTOS_ATTRIB_JSON = {
- # Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
-}
-
-SMARTOS_ENV_LX_BRAND = "lx-brand"
-SMARTOS_ENV_KVM = "kvm"
-
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
-NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
-]
-
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
-SERIAL_TIMEOUT = 60
-
-# BUILT-IN DATASOURCE CONFIGURATION
-# The following is the built-in configuration. If the values
-# are not set via the system configuration, then these default
-# will be used:
-# serial_device: which serial device to use for the meta-data
-# serial_timeout: how long to wait on the device
-# no_base64_decode: values which are not base64 encoded and
-# are fetched directly from SmartOS, not meta-data values
-# base64_keys: meta-data keys that are delivered in base64
-# base64_all: with the exclusion of no_base64_decode values,
-# treat all meta-data as base64 encoded
-# disk_setup: describes how to partition the ephemeral drive
-# fs_setup: describes how to format the ephemeral drive
-#
-BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
-}
-
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
- },
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext3',
- 'device': 'ephemeral0'}],
-}
-
-# builtin vendor-data is a boothook that writes a script into
-# /var/lib/cloud/scripts/per-boot. *That* script then handles
-# executing the 'operator-script' and 'user-script' files
-# that cloud-init writes into /var/lib/cloud/instance/data/
-# if they exist.
-#
-# This is all very indirect, but its done like this so that at
-# some point in the future, perhaps cloud-init wouldn't do it at
-# all, but rather the vendor actually provide vendor-data that accomplished
-# their desires. (That is the point of vendor-data).
-#
-# cloud-init does cheat a bit, and write the operator-script and user-script
-# itself. It could have the vendor-script do that, but it seems better
-# to not require the image to contain a tool (mdata-get) to read those
-# keys when we have a perfectly good one inside cloud-init.
-BUILTIN_VENDOR_DATA = """\
-#cloud-boothook
-#!/bin/sh
-fname="%(per_boot_d)s/01_smartos_vendor_data.sh"
-mkdir -p "${fname%%/*}"
-cat > "$fname" <<"END_SCRIPT"
-#!/bin/sh
-##
-# This file is written as part of the default vendor data for SmartOS.
-# The SmartOS datasource writes the listed file from the listed metadata key
-# sdc:operator-script -> %(operator_script)s
-# user-script -> %(user_script)s
-#
-# You can view content with 'mdata-get <key>'
-#
-for script in "%(operator_script)s" "%(user_script)s"; do
- [ -x "$script" ] || continue
- echo "executing '$script'" 1>&2
- "$script"
-done
-END_SCRIPT
-chmod +x "$fname"
-"""
-
-
-# @datadictionary: this is legacy path for placing files from metadata
-# per the SmartOS location. It is not preferable, but is done for
-# legacy reasons
-LEGACY_USER_D = "/var/db"
-
-
-class DataSourceSmartOS(sources.DataSource):
- _unset = "_unset"
- smartos_type = _unset
- md_client = _unset
-
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
-
- self.metadata = {}
- self.network_data = None
- self._network_config = None
-
- self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
-
- self._init()
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [client=%s]" % (root, self.md_client)
-
- def _init(self):
- if self.smartos_type == self._unset:
- self.smartos_type = get_smartos_environ()
- if self.smartos_type is None:
- self.md_client = None
-
- if self.md_client == self._unset:
- self.md_client = jmc_client_factory(
- smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
-
- def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
-
- When run in a zone, the host OS will look for /var/svc/provisioning
- to be renamed as /var/svc/provision_success. This should be done
- after meta-data is successfully retrieved and from this point
- the host considers the provision of the zone to be a success and
- keeps the zone running.
- '''
-
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
-
- def get_data(self):
- self._init()
-
- md = {}
- ud = ""
-
- if not self.smartos_type:
- LOG.debug("Not running on smartos")
- return False
-
- if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
- return False
-
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
- smartos_noun, strip = attribute
- md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
-
- for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
- md[ci_noun] = self.md_client.get_json(smartos_noun)
-
- # @datadictionary: This key may contain a program that is written
- # to a file in the filesystem of the guest on each boot and then
- # executed. It may be of any format that would be considered
- # executable in the guest instance.
- #
- # We write 'user-script' and 'operator-script' into the
- # instance/data directory. The default vendor-data then handles
- # executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
- u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
-
- # @datadictionary: This key has no defined format, but its value
- # is written to the file /var/db/mdata-user-data on each boot prior
- # to the phase that runs user-script. This file is not to be executed.
- # This allows a configuration file of some kind to be injected into
- # the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
- u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
- write_boot_content(u_data, u_data_f)
-
- # Handle the cloud-init regular meta
- if not md['local-hostname']:
- md['local-hostname'] = md['instance-id']
-
- ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
- }
-
- self.metadata = util.mergemanydict([md, self.metadata])
- self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
-
- self._set_provisioned()
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
-
- def get_config_obj(self):
- if self.smartos_type == SMARTOS_ENV_KVM:
- return BUILTIN_CLOUD_CONFIG
- return {}
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(self.network_data))
- return self._network_config
-
-
-class JoyentMetadataFetchException(Exception):
- pass
-
-
-class JoyentMetadataClient(object):
- """
- A client implementing v2 of the Joyent Metadata Protocol Specification.
-
- The full specification can be found at
- http://eng.joyent.com/mdata/protocol.html
- """
- line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
-
- def __init__(self, smartos_type=None, fp=None):
- if smartos_type is None:
- smartos_type = get_smartos_environ()
- self.smartos_type = smartos_type
- self.fp = fp
-
- def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
-
- def _get_value_from_frame(self, expected_request_id, frame):
- frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
- raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
- raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
- raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
- return None
- value = util.b64d(frame_data['payload'])
- LOG.debug('Value "%s" found.', value)
- return value
-
- def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
- if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
- LOG.debug('Writing "%s" to metadata transport.', msg)
-
- need_close = False
- if not self.fp:
- self.open_transport()
- need_close = True
-
- self.fp.write(msg.encode('ascii'))
- self.fp.flush()
-
- response = bytearray()
- response.extend(self.fp.read(1))
- while response[-1:] != b'\n':
- response.extend(self.fp.read(1))
-
- if need_close:
- self.close_transport()
-
- response = response.rstrip().decode('ascii')
- LOG.debug('Read "%s" from metadata transport.', response)
-
- if 'SUCCESS' not in response:
- return None
-
- value = self._get_value_from_frame(request_id, response)
- return value
-
- def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
- if result is None:
- return default
- if result and strip:
- result = result.strip()
- return result
-
- def get_json(self, key, default=None):
- result = self.get(key, default=default)
- if result is None:
- return default
- return json.loads(result)
-
- def list(self):
- result = self.request(rtype='KEYS')
- if result:
- result = result.split('\n')
- return result
-
- def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
-
- def delete(self, key):
- return self.request(rtype='DELETE', param=key)
-
- def close_transport(self):
- if self.fp:
- self.fp.close()
- self.fp = None
-
- def __enter__(self):
- if self.fp:
- return self
- self.open_transport()
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.close_transport()
- return
-
- def open_transport(self):
- raise NotImplementedError
-
-
-class JoyentMetadataSocketClient(JoyentMetadataClient):
- def __init__(self, socketpath):
- self.socketpath = socketpath
-
- def open_transport(self):
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
-
- def exists(self):
- return os.path.exists(self.socketpath)
-
- def __repr__(self):
- return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
-
-
-class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=None):
- super(JoyentMetadataSerialClient, self).__init__(smartos_type)
- self.device = device
- self.timeout = timeout
-
- def exists(self):
- return os.path.exists(self.device)
-
- def open_transport(self):
- ser = serial.Serial(self.device, timeout=self.timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % self.device)
- self.fp = ser
-
- def __repr__(self):
- return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
-
-
-class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
- """V1 of the protocol was not safe for all values.
- Thus, we allowed the user to pass values in as base64 encoded.
- Users may still reasonably expect to be able to send base64 data
- and have it transparently decoded. So even though the V2 format is
- now used, and is safe (using base64 itself), we keep legacy support.
-
- The way for a user to do this was:
- a.) specify 'base64_keys' key whose value is a comma delimited
- list of keys that were base64 encoded.
- b.) base64_all: string interpreted as a boolean that indicates
- if all keys are base64 encoded.
- c.) set a key named b64-<keyname> with a boolean indicating that
- <keyname> is base64 encoded."""
-
- def __init__(self, device, timeout=10, smartos_type=None):
- s = super(JoyentMetadataLegacySerialClient, self)
- s.__init__(device, timeout, smartos_type)
- self.base64_keys = None
- self.base64_all = None
-
- def _init_base64_keys(self, reset=False):
- if reset:
- self.base64_keys = None
- self.base64_all = None
-
- keys = None
- if self.base64_all is None:
- keys = self.list()
- if 'base64_all' in keys:
- self.base64_all = util.is_true(self._get("base64_all"))
- else:
- self.base64_all = False
-
- if self.base64_all:
- # short circuit if base64_all is true
- return
-
- if self.base64_keys is None:
- if keys is None:
- keys = self.list()
- b64_keys = set()
- if 'base64_keys' in keys:
- b64_keys = set(self._get("base64_keys").split(","))
-
- # now add any b64-<keyname> that has a true value
- for key in [k[3:] for k in keys if k.startswith("b64-")]:
- if util.is_true(self._get(key)):
- b64_keys.add(key)
- else:
- if key in b64_keys:
- b64_keys.remove(key)
-
- self.base64_keys = b64_keys
-
- def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
-
- def is_b64_encoded(self, key, reset=False):
- if key in NO_BASE64_DECODE:
- return False
-
- self._init_base64_keys(reset=reset)
- if self.base64_all:
- return True
-
- return key in self.base64_keys
-
- def get(self, key, default=None, strip=False):
- mdefault = object()
- val = self._get(key, strip=False, default=mdefault)
- if val is mdefault:
- return default
-
- if self.is_b64_encoded(key):
- try:
- val = base64.b64decode(val.encode()).decode()
- # Bogus input produces different errors in Python 2 and 3
- except (TypeError, binascii.Error):
- LOG.warn("Failed base64 decoding key '%s': %s", key, val)
-
- if strip:
- val = val.strip()
-
- return val
-
-
-def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
-
- if smartos_type is None:
- smartos_type = get_smartos_environ(uname_version)
-
- if smartos_type is None:
- return None
- elif smartos_type == SMARTOS_ENV_KVM:
- return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
- elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
-
- raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
-
-
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
- """
- Write the content to content_f. Under the following rules:
- 1. If no content, remove the file
- 2. Write the content
- 3. If executable and no file magic, add it
- 4. If there is a link, create it
-
- @param content: what to write
- @param content_f: the file name
- @param backup_d: the directory to save the backup at
- @param link: if defined, location to create a symlink to
- @param shebang: if no file magic, set shebang
- @param mode: file mode
-
- Becuase of the way that Cloud-init executes scripts (no shell),
- a script will fail to execute if does not have a magic bit (shebang) set
- for the file. If shebang=True, then the script will be checked for a magic
- bit and to the SmartOS default of assuming that bash.
- """
-
- if not content and os.path.exists(content_f):
- os.unlink(content_f)
- if link and os.path.islink(link):
- os.unlink(link)
- if not content:
- return
-
- util.write_file(content_f, content, mode=mode)
-
- if shebang and not content.startswith("#!"):
- try:
- cmd = ["file", "--brief", "--mime-type", content_f]
- (f_type, _err) = util.subp(cmd)
- LOG.debug("script %s mime type is %s", content_f, f_type)
- if f_type.strip() == "text/plain":
- new_content = "\n".join(["#!/bin/bash", content])
- util.write_file(content_f, new_content, mode=mode)
- LOG.debug("added shebang to file %s", content_f)
-
- except Exception as e:
- util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
-
- if link:
- try:
- if os.path.islink(link):
- os.unlink(link)
- if content and os.path.exists(content_f):
- util.ensure_dir(os.path.dirname(link))
- os.symlink(content_f, link)
- except IOError as e:
- util.logexc(LOG, "failed establishing content link: %s", e)
-
-
-def get_smartos_environ(uname_version=None, product_name=None,
- uname_arch=None):
- uname = os.uname()
- if uname_arch is None:
- uname_arch = uname[4]
-
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- return None
-
- # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
- # report 'BrandZ virtual linux' as the kernel version
- if uname_version is None:
- uname_version = uname[3]
- if uname_version.lower() == 'brandz virtual linux':
- return SMARTOS_ENV_LX_BRAND
-
- if product_name is None:
- system_type = util.read_dmi_data("system-product-name")
- else:
- system_type = product_name
-
- if system_type and 'smartdc' in system_type.lower():
- return SMARTOS_ENV_KVM
-
- return None
-
-
-# Covert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None):
- """Return a dictionary of network_config by parsing provided
- SMARTOS sdc:nics configuration data
-
- sdc:nics data is a dictionary of properties of a nic and the ip
- configuration desired. Additional nic dictionaries are appended
- to the list.
-
- Converting the format is straightforward though it does include
- duplicate information as well as data which appears to be relevant
- to the hostOS rather than the guest.
-
- For each entry in the nics list returned from query sdc:nics, we
- create a type: physical entry, and extract the interface properties:
- 'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining
- keys are related to ip configuration. For each ip in the 'ips' list
- we create a subnet entry under 'subnets' pairing the ip to a one in
- the 'gateways' list.
- """
-
- valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
- ],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'gateway',
- 'metric',
- 'netmask',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
- ],
- }
-
- config = []
- for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
-
- subnets = []
- for ip, gw in zip(nic['ips'], nic['gateways']):
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- 'gateway': gw,
- })
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
-
-
-if __name__ == "__main__":
- import sys
- jmc = jmc_client_factory()
- if jmc is None:
- print("Do not appear to be on smartos.")
- sys.exit(1)
- if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()))
- else:
- keys = sys.argv[1:]
-
- data = {}
- for key in keys:
- if key in SMARTOS_ATTRIB_JSON:
- keyname = SMARTOS_ATTRIB_JSON[key]
- data[key] = jmc.get_json(keyname)
- else:
- if key in SMARTOS_ATTRIB_MAP:
- keyname, strip = SMARTOS_ATTRIB_MAP[key]
- else:
- keyname, strip = (key, False)
- val = jmc.get(keyname, strip=strip)
- data[key] = jmc.get(keyname, strip=strip)
-
- print(json.dumps(data, indent=1))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
deleted file mode 100644
index 87b8e524..00000000
--- a/cloudinit/sources/__init__.py
+++ /dev/null
@@ -1,371 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import os
-
-import six
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import user_data as ud
-from cloudinit import util
-
-from cloudinit.filters import launch_index
-from cloudinit.reporting import events
-
-DSMODE_DISABLED = "disabled"
-DSMODE_LOCAL = "local"
-DSMODE_NETWORK = "net"
-DSMODE_PASS = "pass"
-
-VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
-
-DEP_FILESYSTEM = "FILESYSTEM"
-DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNotFoundException(Exception):
- pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DataSource(object):
-
- dsmode = DSMODE_NETWORK
-
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- self.sys_cfg = sys_cfg
- self.distro = distro
- self.paths = paths
- self.userdata = None
- self.metadata = None
- self.userdata_raw = None
- self.vendordata = None
- self.vendordata_raw = None
-
- # find the datasource config name.
- # remove 'DataSource' from classname on front, and remove 'Net' on end.
- # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
- name = type_utils.obj_name(self)
- if name.startswith(DS_PREFIX):
- name = name[len(DS_PREFIX):]
- if name.endswith('Net'):
- name = name[0:-3]
-
- self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource", name), {})
- if not self.ds_cfg:
- self.ds_cfg = {}
-
- if not ud_proc:
- self.ud_proc = ud.UserDataProcessor(self.paths)
- else:
- self.ud_proc = ud_proc
-
- def __str__(self):
- return type_utils.obj_name(self)
-
- def get_userdata(self, apply_filter=False):
- if self.userdata is None:
- self.userdata = self.ud_proc.process(self.get_userdata_raw())
- if apply_filter:
- return self._filter_xdata(self.userdata)
- return self.userdata
-
- def get_vendordata(self):
- if self.vendordata is None:
- self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
- return self.vendordata
-
- @property
- def launch_index(self):
- if not self.metadata:
- return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
- return None
-
- def _filter_xdata(self, processed_ud):
- filters = [
- launch_index.Filter(util.safe_int(self.launch_index)),
- ]
- new_ud = processed_ud
- for f in filters:
- new_ud = f.apply(new_ud)
- return new_ud
-
- @property
- def is_disconnected(self):
- return False
-
- def get_userdata_raw(self):
- return self.userdata_raw
-
- def get_vendordata_raw(self):
- return self.vendordata_raw
-
- # the data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return {}
-
- def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
-
- def _remap_device(self, short_name):
- # LP: #611137
- # the metadata service may believe that devices are named 'sda'
- # when the kernel named them 'vda' or 'xvda'
- # we want to return the correct value for what will actually
- # exist in this instance
- mappings = {"sd": ("vd", "xvd", "vtb")}
- for (nfrom, tlist) in mappings.items():
- if not short_name.startswith(nfrom):
- continue
- for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
- if os.path.exists(cand):
- return cand
- return None
-
- def device_name_to_device(self, _name):
- # translate a 'name' to a device
- # the primary function at this point is on ec2
- # to consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- return None
-
- def get_locale(self):
- return 'en_US.UTF-8'
-
- @property
- def availability_zone(self):
- return self.metadata.get('availability-zone',
- self.metadata.get('availability_zone'))
-
- @property
- def region(self):
- return self.metadata.get('region')
-
- def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
- # Return a magic not really instance id string
- return "iid-datasource"
- return str(self.metadata['instance-id'])
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- defdomain = "localdomain"
- defhost = "localhost"
- domain = defdomain
-
- if not self.metadata or 'local-hostname' not in self.metadata:
- # this is somewhat questionable really.
- # the cloud datasource was asked for a hostname
- # and didn't have one. raising error might be more appropriate
- # but instead, basically look up the existing hostname
- toks = []
- hostname = util.get_hostname()
- fqdn = util.get_fqdn_from_hosts(hostname)
- if fqdn and fqdn.find(".") > 0:
- toks = str(fqdn).split(".")
- elif hostname:
- toks = [hostname, defdomain]
- else:
- toks = [defhost, defdomain]
- else:
- # if there is an ipv4 address in 'local-hostname', then
- # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
- toks = []
- if resolve_ip:
- toks = util.gethostbyaddr(lhost)
-
- if toks:
- toks = str(toks).split('.')
- else:
- toks = ["ip-%s" % lhost.replace(".", "-")]
- else:
- toks = lhost.split(".")
-
- if len(toks) > 1:
- hostname = toks[0]
- domain = '.'.join(toks[1:])
- else:
- hostname = toks[0]
-
- if fqdn:
- return "%s.%s" % (hostname, domain)
- else:
- return hostname
-
- def get_package_mirror_info(self):
- return self.distro.get_package_mirror_info(data_source=self)
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still
- return False
-
- @staticmethod
- def _determine_dsmode(candidates, default=None, valid=None):
- # return the first candidate that is non None, warn if not valid
- if default is None:
- default = DSMODE_NETWORK
-
- if valid is None:
- valid = VALID_DSMODES
-
- for candidate in candidates:
- if candidate is None:
- continue
- if candidate in valid:
- return candidate
- else:
- LOG.warn("invalid dsmode '%s', using default=%s",
- candidate, default)
- return default
-
- return default
-
- @property
- def network_config(self):
- return None
-
- @property
- def first_instance_boot(self):
- return
-
-
-def normalize_pubkey_data(pubkey_data):
- keys = []
-
- if not pubkey_data:
- return keys
-
- if isinstance(pubkey_data, six.string_types):
- return str(pubkey_data).splitlines()
-
- if isinstance(pubkey_data, (list, set)):
- return list(pubkey_data)
-
- if isinstance(pubkey_data, (dict)):
- for (_keyname, klist) in pubkey_data.items():
- # lp:506332 uec metadata service responds with
- # data that makes boto populate a string for 'klist' rather
- # than a list.
- if isinstance(klist, six.string_types):
- klist = [klist]
- if isinstance(klist, (list, set)):
- for pkey in klist:
- # There is an empty string at
- # the end of the keylist, trim it
- if pkey:
- keys.append(pkey)
-
- return keys
-
-
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
- ds_list = list_sources(cfg_list, ds_deps, pkg_list)
- ds_names = [type_utils.obj_name(f) for f in ds_list]
- mode = "network" if DEP_NETWORK in ds_deps else "local"
- LOG.debug("Searching for %s data source in: %s", mode, ds_names)
-
- for name, cls in zip(ds_names, ds_list):
- myrep = events.ReportEventStack(
- name="search-%s" % name.replace("DataSource", ""),
- description="searching for %s data from %s" % (mode, name),
- message="no %s data found from %s" % (mode, name),
- parent=reporter)
- try:
- with myrep:
- LOG.debug("Seeing if we can get any data from %s", cls)
- s = cls(sys_cfg, distro, paths)
- if s.get_data():
- myrep.message = "found %s data from %s" % (mode, name)
- return (s, type_utils.obj_name(cls))
- except Exception:
- util.logexc(LOG, "Getting data from %s failed", cls)
-
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
- raise DataSourceNotFoundException(msg)
-
-
-# Return a list of classes that have the same depends as 'depends'
-# iterate through cfg_list, loading "DataSource*" modules
-# and calling their "get_datasource_list".
-# Return an ordered list of classes that match (if any)
-def list_sources(cfg_list, depends, pkg_list):
- src_list = []
- LOG.debug(("Looking for for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
- for ds_name in cfg_list:
- if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
- for m_loc in m_locs:
- mod = importer.import_module(m_loc)
- lister = getattr(mod, "get_datasource_list")
- matches = lister(depends)
- if matches:
- src_list.extend(matches)
- break
- return src_list
-
-
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
- # quickly (local check only) if self.instance_id is still valid
- # we check kernel command line or files.
- if not instance_id:
- return False
-
- dmi_value = util.read_dmi_data(field)
- if not dmi_value:
- return False
- return instance_id.lower() == dmi_value.lower()
-
-
-# 'depends' is a list of dependencies (DEP_FILESYSTEM)
-# ds_list is a list of 2 item lists
-# ds_list = [
-# ( class, ( depends-that-this-class-needs ) )
-# }
-# It returns a list of 'class' that matched these deps exactly
-# It mainly is a helper function for DataSourceCollections
-def list_from_depends(depends, ds_list):
- ret_list = []
- depset = set(depends)
- for (cls, deps) in ds_list:
- if depset == set(deps):
- ret_list.append(cls)
- return ret_list
diff --git a/cloudinit/sources/helpers/__init__.py b/cloudinit/sources/helpers/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
deleted file mode 100644
index 63ccf10e..00000000
--- a/cloudinit/sources/helpers/azure.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import logging
-import os
-import re
-import socket
-import struct
-import tempfile
-import time
-
-from contextlib import contextmanager
-from xml.etree import ElementTree
-
-from cloudinit import util
-
-
-LOG = logging.getLogger(__name__)
-
-
-@contextmanager
-def cd(newdir):
- prevdir = os.getcwd()
- os.chdir(os.path.expanduser(newdir))
- try:
- yield
- finally:
- os.chdir(prevdir)
-
-
-class AzureEndpointHttpClient(object):
-
- headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
- }
-
- def __init__(self, certificate):
- self.extra_secure_headers = {
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": certificate,
- }
-
- def get(self, url, secure=False):
- headers = self.headers
- if secure:
- headers = self.headers.copy()
- headers.update(self.extra_secure_headers)
- return util.read_file_or_url(url, headers=headers)
-
- def post(self, url, data=None, extra_headers=None):
- headers = self.headers
- if extra_headers is not None:
- headers = self.headers.copy()
- headers.update(extra_headers)
- return util.read_file_or_url(url, data=data, headers=headers)
-
-
-class GoalState(object):
-
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
-
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
-
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
-
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
-
- @property
- def instance_id(self):
- return self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
-
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
-
-
-class OpenSSLManager(object):
-
- certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
- }
-
- def __init__(self):
- self.tmpdir = tempfile.mkdtemp()
- self.certificate = None
- self.generate_certificate()
-
- def clean_up(self):
- util.del_dir(self.tmpdir)
-
- def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
- if self.certificate is not None:
- LOG.debug('Certificate already generated.')
- return
- with cd(self.tmpdir):
- util.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
- if "CERTIFICATE" not in line:
- certificate += line.rstrip()
- self.certificate = certificate
- LOG.debug('New certificate generated.')
-
- def parse_certificates(self, certificates_xml):
- tag = ElementTree.fromstring(certificates_xml).find(
- './/Data')
- certificates_content = tag.text
- lines = [
- b'MIME-Version: 1.0',
- b'Content-Disposition: attachment; filename="Certificates.p7m"',
- b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
- ]
- with cd(self.tmpdir):
- with open('Certificates.p7m', 'wb') as f:
- f.write(b'\n'.join(lines))
- out, _ = util.subp(
- 'openssl cms -decrypt -in Certificates.p7m -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True)
- private_keys, certificates = [], []
- current = []
- for line in out.splitlines():
- current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
- private_keys.append('\n'.join(current))
- current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificates.append('\n'.join(current))
- current = []
- keys = []
- for certificate in certificates:
- with cd(self.tmpdir):
- public_key, _ = util.subp(
- 'openssl x509 -noout -pubkey |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
- data=certificate,
- shell=True)
- keys.append(public_key)
- return keys
-
-
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
-
- def __init__(self):
- LOG.debug('WALinuxAgentShim instantiated...')
- self.endpoint = self.find_endpoint()
- self.openssl_manager = None
- self.values = {}
-
- def clean_up(self):
- if self.openssl_manager is not None:
- self.openssl_manager.clean_up()
-
- @staticmethod
- def get_ip_from_lease_value(lease_value):
- unescaped_value = lease_value.replace('\\', '')
- if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
- if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
- hex_string += hex_pair
- packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
- else:
- packed_bytes = unescaped_value.encode('utf-8')
- return socket.inet_ntoa(packed_bytes)
-
- @staticmethod
- def find_endpoint():
- LOG.debug('Finding Azure endpoint...')
- content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
- value = None
- for line in content.splitlines():
- if 'unknown-245' in line:
- value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
- if value is None:
- raise ValueError('No endpoint found in DHCP config.')
- endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
- LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
- return endpoint_ip_address
-
- def register_with_azure_and_fetch_data(self):
- self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
- LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- raise
- else:
- break
- attempts += 1
- LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- public_keys = []
- if goal_state.certificates_xml is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
- public_keys = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
- data = {
- 'public-keys': public_keys,
- }
- self._report_ready(goal_state, http_client)
- return data
-
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- LOG.info('Reported ready to Azure fabric.')
-
-
-def get_metadata_from_fabric():
- shim = WALinuxAgentShim()
- try:
- return shim.register_with_azure_and_fetch_data()
- finally:
- shim.clean_up()
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
deleted file mode 100644
index 2e7a1d47..00000000
--- a/cloudinit/sources/helpers/openstack.py
+++ /dev/null
@@ -1,648 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import base64
-import copy
-import functools
-import os
-
-import six
-
-from cloudinit import ec2_utils
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-# For reference: http://tinyurl.com/laora4c
-
-LOG = logging.getLogger(__name__)
-
-FILES_V1 = {
- # Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
-}
-KEY_COPIES = (
- # Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
-)
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
-# keep this in chronological order. new supported versions go at the end.
-OS_VERSIONS = (
- OS_FOLSOM,
- OS_GRIZZLY,
- OS_HAVANA,
- OS_LIBERTY,
-)
-
-
-class NonReadable(IOError):
- pass
-
-
-class BrokenMetadata(IOError):
- pass
-
-
-class SourceMixin(object):
- def _ec2_name_to_device(self, name):
- if not self.ec2_metadata:
- return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
- for (ent_name, device) in bdm.items():
- if name == ent_name:
- return device
- return None
-
- def get_public_ssh_keys(self):
- name = "public_keys"
- if self.version == 1:
- name = "public-keys"
- return sources.normalize_pubkey_data(self.metadata.get(name))
-
- def _os_name_to_device(self, name):
- device = None
- try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
- dev_entries = util.find_devs_with(criteria)
- if dev_entries:
- device = dev_entries[0]
- except util.ProcessExecutionError:
- pass
- return device
-
- def _validate_device_name(self, device):
- if not device:
- return None
- if not device.startswith("/"):
- device = "/dev/%s" % device
- if os.path.exists(device):
- return device
- # Durn, try adjusting the mapping
- remapped = self._remap_device(os.path.basename(device))
- if remapped:
- LOG.debug("Remapped device name %s => %s", device, remapped)
- return remapped
- return None
-
- def device_name_to_device(self, name):
- # Translate a 'name' to a 'physical' device
- if not name:
- return None
- # Try the ec2 mapping first
- names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
- device = None
- LOG.debug("Using ec2 style lookup to find device %s", names)
- for n in names:
- device = self._ec2_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Try the openstack way second
- if not device:
- LOG.debug("Using openstack style lookup to find device %s", names)
- for n in names:
- device = self._os_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Ok give up...
- if not device:
- return None
- else:
- LOG.debug("Mapped %s to device %s", name, device)
- return device
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseReader(object):
-
- def __init__(self, base_path):
- self.base_path = base_path
-
- @abc.abstractmethod
- def _path_join(self, base, *add_ons):
- pass
-
- @abc.abstractmethod
- def _path_read(self, path, decode=False):
- pass
-
- @abc.abstractmethod
- def _fetch_available_versions(self):
- pass
-
- @abc.abstractmethod
- def _read_ec2_metadata(self):
- pass
-
- def _find_working_version(self):
- try:
- versions_available = self._fetch_available_versions()
- except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
- versions_available = []
-
- # openstack.OS_VERSIONS is stored in chronological order, so
- # reverse it to check newest first.
- supported = [v for v in reversed(list(OS_VERSIONS))]
- selected_version = OS_LATEST
-
- for potential_version in supported:
- if potential_version not in versions_available:
- continue
- selected_version = potential_version
- break
-
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
- return selected_version
-
- def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
- path_pieces = path.split("/")
- valid_pieces = [p for p in path_pieces if len(p)]
- if not valid_pieces:
- raise BrokenMetadata("Item %s has no valid content path" % (item))
- path = self._path_join(self.base_path, "openstack", *path_pieces)
- return self._path_read(path, decode=decode)
-
- def read_v2(self):
- """Reads a version 2 formatted location.
-
- Return a dict with metadata, userdata, ec2-metadata, dsmode,
- network_config, files and version (2).
-
- If not a valid location, raise a NonReadable exception.
- """
-
- load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list) + six.string_types)
-
- def datafiles(version):
- files = {}
- files['metadata'] = (
- # File path to read
- self._path_join("openstack", version, 'meta_data.json'),
- # Is it required?
- True,
- # Translator function (applied after loading)
- util.load_json,
- )
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
- False,
- lambda x: x,
- )
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
- False,
- load_json_anytype,
- )
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
- False,
- load_json_anytype,
- )
- return files
-
- results = {
- 'userdata': '',
- 'version': 2,
- }
- data = datafiles(self._find_working_version())
- for (name, (path, required, translator)) in data.items():
- path = self._path_join(self.base_path, path)
- data = None
- found = False
- try:
- data = self._path_read(path)
- except IOError as e:
- if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
- else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
- else:
- found = True
- if required and not found:
- raise NonReadable("Missing mandatory path: %s" % path)
- if found and translator:
- try:
- data = translator(data)
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
- if found:
- results[name] = data
-
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
- try:
- metadata['random_seed'] = base64.b64decode(random_seed)
- except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
-
- # load any files that were provided
- files = {}
- metadata_files = metadata.get('files', [])
- for item in metadata_files:
- if 'path' not in item:
- continue
- path = item['path']
- try:
- files[path] = self._read_content_path(item)
- except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
- results['files'] = files
-
- # The 'network_config' item in metadata is a content pointer
- # to the network config that should be applied. It is just a
- # ubuntu/debian '/etc/network/interfaces' file.
- net_item = metadata.get("network_config", None)
- if net_item:
- try:
- content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
- except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
-
- # To openstack, user can specify meta ('nova boot --meta=key=value')
- # and those will appear under metadata['meta'].
- # if they specify 'dsmode' they're indicating the mode that they intend
- # for this datasource to operate in.
- try:
- results['dsmode'] = metadata['meta']['dsmode']
- except KeyError:
- pass
-
- # Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
-
- # Perform some misc. metadata key renames...
- for (target_key, source_key, is_required) in KEY_COPIES:
- if is_required and source_key not in metadata:
- raise BrokenMetadata("No '%s' entry in metadata" % source_key)
- if source_key in metadata:
- metadata[target_key] = metadata.get(source_key)
- return results
-
-
-class ConfigDriveReader(BaseReader):
- def __init__(self, base_path):
- super(ConfigDriveReader, self).__init__(base_path)
- self._versions = None
-
- def _path_join(self, base, *add_ons):
- components = [base] + list(add_ons)
- return os.path.join(*components)
-
- def _path_read(self, path, decode=False):
- return util.load_file(path, decode=decode)
-
- def _fetch_available_versions(self):
- if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
- self._versions = sorted(found)
- return self._versions
-
- def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
- if not os.path.exists(path):
- return {}
- else:
- try:
- return util.load_json(self._path_read(path))
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
-
- def read_v1(self):
- """Reads a version 1 formatted location.
-
- Return a dict with metadata, userdata, dsmode, files and version (1).
-
- If not a valid path, raise a NonReadable exception.
- """
-
- found = {}
- for name in FILES_V1.keys():
- path = self._path_join(self.base_path, name)
- if os.path.exists(path):
- found[name] = path
- if len(found) == 0:
- raise NonReadable("%s: no files found" % (self.base_path))
-
- md = {}
- for (name, (key, translator, default)) in FILES_V1.items():
- if name in found:
- path = found[name]
- try:
- contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
- try:
- md[key] = translator(contents)
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
- else:
- md[key] = copy.deepcopy(default)
-
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
-
- # keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
- if keydata:
- lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
-
- # config-drive-v1 has no way for openstack to provide the instance-id
- # so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
-
- results = {
- 'version': 1,
- 'metadata': md,
- }
-
- # allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
-
- # config-drive-v1 has no way of specifying user-data, so the user has
- # to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
-
- # this implementation does not support files other than
- # network/interfaces and authorized_keys...
- results['files'] = {}
-
- return results
-
-
-class MetadataReader(BaseReader):
- def __init__(self, base_url, ssl_details=None, timeout=5, retries=5):
- super(MetadataReader, self).__init__(base_url)
- self.ssl_details = ssl_details
- self.timeout = float(timeout)
- self.retries = int(retries)
- self._versions = None
-
- def _fetch_available_versions(self):
- # <baseurl>/openstack/ returns a newline separated list of versions
- if self._versions is not None:
- return self._versions
- found = []
- version_path = self._path_join(self.base_path, "openstack")
- content = self._path_read(version_path)
- for line in content.splitlines():
- line = line.strip()
- if not line:
- continue
- found.append(line)
- self._versions = found
- return self._versions
-
- def _path_read(self, path, decode=False):
-
- def should_retry_cb(_request_args, cause):
- try:
- code = int(cause.code)
- if code >= 400:
- return False
- except (TypeError, ValueError):
- # Older versions of requests didn't have a code.
- pass
- return True
-
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
- if decode:
- return response.contents.decode()
- else:
- return response.contents
-
- def _path_join(self, base, *add_ons):
- return url_helper.combine_url(base, *add_ons)
-
- def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
-
-
-# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
-def convert_net_json(network_json=None, known_macs=None):
- """Return a dictionary of network_config by parsing provided
- OpenStack ConfigDrive NetworkData json format
-
- OpenStack network_data.json provides a 3 element dictionary
- - "links" (links are network devices, physical or virtual)
- - "networks" (networks are ip network configurations for one or more
- links)
- - services (non-ip services, like dns)
-
- networks and links are combined via network items referencing specific
- links via a 'link_id' which maps to a links 'id' field.
-
- To convert this format to network_config yaml, we first iterate over the
- links and then walk the network list to determine if any of the networks
- utilize the current link; if so we generate a subnet entry for the device
-
- We also need to map network_data.json fields to network_config fields. For
- example, the network_data links 'id' field is equivalent to network_config
- 'name' field for devices. We apply more of this mapping to the various
- link types that we encounter.
-
- There are additional fields that are populated in the network_data.json
- from OpenStack that are not relevant to network_config yaml, so we
- enumerate a dictionary of valid keys for network_yaml and apply filtering
- to drop these superflous keys from the network_config yaml.
- """
- if network_json is None:
- return None
-
- # dict of network_config key for filtering network_json
- valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
- ],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
- ],
- }
-
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
-
- config = []
- for link in links:
- subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
- # 'name' is not in openstack spec yet, but we will support it if it is
- # present. The 'id' in the spec is currently implemented as the host
- # nic's name, meaning something like 'tap-adfasdffd'. We do not want
- # to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
- if 'dhcp' in network['type']:
- t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
- subnet.update({
- 'type': t,
- })
- else:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
- cfg.update({
- 'type': 'physical',
- 'mac_address': link['ethernet_mac_address']})
- elif link['type'] in ['bond']:
- params = {}
- for k, v in link.items():
- if k == 'bond_links':
- continue
- elif k.startswith('bond'):
- params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
- elif link['type'] in ['vlan']:
- cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- else:
- raise ValueError(
- 'Unknown network_data link type: %s' % link['type'])
-
- config.append(cfg)
-
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
-
- if need_names:
- if known_macs is None:
- known_macs = net.get_interfaces_by_mac()
-
- for d in need_names:
- mac = d.get('mac_address')
- if not mac:
- raise ValueError("No mac_address or name entry for %s" % d)
- if mac not in known_macs:
- raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
-
- for service in services:
- cfg = service
- cfg.update({'type': 'nameserver'})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
-def convert_vendordata_json(data, recurse=True):
- """data: a loaded json *object* (strings, arrays, dicts).
- return something suitable for cloudinit vendordata_raw.
-
- if data is:
- None: return None
- string: return string
- list: return data
- the list is then processed in UserDataProcessor
- dict: return convert_vendordata_json(data.get('cloud-init'))
- """
- if not data:
- return None
- if isinstance(data, six.string_types):
- return data
- if isinstance(data, list):
- return copy.deepcopy(data)
- if isinstance(data, dict):
- if recurse is True:
- return convert_vendordata_json(data.get('cloud-init'),
- recurse=False)
- raise ValueError("vendordata['cloud-init'] cannot be dict")
- raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/vmware/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/vmware/imc/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
deleted file mode 100644
index fb53ec1d..00000000
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class BootProtoEnum(object):
- """Specifies the NIC Boot Settings."""
-
- DHCP = 'dhcp'
- STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
deleted file mode 100644
index d645c497..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .nic import Nic
-
-
-class Config(object):
- """
- Stores the Contents specified in the Customization
- Specification file.
- """
-
- DNS = 'DNS|NAMESERVER|'
- SUFFIX = 'DNS|SUFFIX|'
- PASS = 'PASSWORD|-PASS'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- HOSTNAME = 'NETWORK|HOSTNAME'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
-
- def __init__(self, configFile):
- self._configFile = configFile
-
- @property
- def host_name(self):
- """Return the hostname."""
- return self._configFile.get(Config.HOSTNAME, None)
-
- @property
- def domain_name(self):
- """Return the domain name."""
- return self._configFile.get(Config.DOMAINNAME, None)
-
- @property
- def timezone(self):
- """Return the timezone."""
- return self._configFile.get(Config.TIMEZONE, None)
-
- @property
- def utc(self):
- """Retrieves whether to set time to UTC or Local."""
- return self._configFile.get(Config.UTC, None)
-
- @property
- def admin_password(self):
- """Return the root password to be set."""
- return self._configFile.get(Config.PASS, None)
-
- @property
- def name_servers(self):
- """Return the list of DNS servers."""
- res = []
- cnt = self._configFile.get_count_with_prefix(Config.DNS)
- for i in range(1, cnt + 1):
- key = Config.DNS + str(i)
- res.append(self._configFile[key])
-
- return res
-
- @property
- def dns_suffixes(self):
- """Return the list of DNS Suffixes."""
- res = []
- cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
- for i in range(1, cnt + 1):
- key = Config.SUFFIX + str(i)
- res.append(self._configFile[key])
-
- return res
-
- @property
- def nics(self):
- """Return the list of associated NICs."""
- res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
- res.append(Nic(nic, self._configFile))
-
- return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
deleted file mode 100644
index bb9fb7dc..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
-from .config_source import ConfigSource
-
-logger = logging.getLogger(__name__)
-
-
-class ConfigFile(ConfigSource, dict):
- """ConfigFile module to load the content from a specified source."""
-
- def __init__(self, filename):
- self._loadConfigFile(filename)
- pass
-
- def _insertKey(self, key, val):
- """
- Inserts a Key Value pair.
-
- Keyword arguments:
- key -- The key to insert
- val -- The value to insert for the key
-
- """
- key = key.strip()
- val = val.strip()
-
- if key.startswith('-') or '|-' in key:
- canLog = False
- else:
- canLog = True
-
- # "sensitive" settings shall not be logged
- if canLog:
- logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
- else:
- logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
-
- self[key] = val
-
- def _loadConfigFile(self, filename):
- """
- Parses properties from the specified config file.
-
- Any previously available properties will be removed.
- Sensitive data will not be logged in case the key starts
- from '-'.
-
- Keyword arguments:
- filename - The full path to the config file.
- """
- logger.info('Parsing the config file %s.' % filename)
-
- config = configparser.ConfigParser()
- config.optionxform = str
- config.read(filename)
-
- self.clear()
-
- for category in config.sections():
- logger.debug("FOUND CATEGORY = '%s'" % category)
-
- for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
-
- def should_keep_current_value(self, key):
- """
- Determines whether a value for a property must be kept.
-
- If the propery is missing, it is treated as it should be not
- changed by the engine.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "empty" value which is used to indicate
- # "removal"
- return key not in self
-
- def should_remove_current_value(self, key):
- """
- Determines whether a value for the property must be removed.
-
- If the specified key is empty, it is treated as it should be
- removed by the engine.
-
- Return true if the value can be removed, false otherwise.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "missing" value which is used to indicate
- # "keeping unchanged"
- if key in self:
- return not bool(self[key])
- else:
- return False
-
- def get_count_with_prefix(self, prefix):
- """
- Return the total count of keys that start with the specified prefix.
-
- Keyword arguments:
- prefix -- prefix of the key
- """
- return len([key for key in self if key.startswith(prefix)])
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
deleted file mode 100644
index b28830f5..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .config_source import ConfigSource
-
-
-class ConfigNamespace(ConfigSource):
- """Specifies the Config Namespace."""
- pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
deleted file mode 100644
index 511cc918..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import os
-import re
-
-from cloudinit import util
-
-logger = logging.getLogger(__name__)
-
-
-class NicConfigurator(object):
- def __init__(self, nics):
- """
- Initialize the Nic Configurator
- @param nics (list) an array of nics to configure
- """
- self.nics = nics
- self.mac2Name = {}
- self.ipv4PrimaryGateway = None
- self.ipv6PrimaryGateway = None
- self.find_devices()
- self._primaryNic = self.get_primary_nic()
-
- def get_primary_nic(self):
- """
- Retrieve the primary nic if it exists
- @return (NicBase): the primary nic if exists, None otherwise
- """
- primary_nics = [nic for nic in self.nics if nic.primary]
- if not primary_nics:
- return None
- elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
- else:
- return primary_nics[0]
-
- def find_devices(self):
- """
- Create the mac2Name dictionary
- The mac address(es) are in the lower case
- """
- cmd = ['ip', 'addr', 'show']
- (output, err) = util.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
-
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
- for section in sections:
- match = re.search(macPat, section)
- if not match: # Only keep info about nics
- continue
- mac = match.group(1).lower()
- name = section.split(':', 1)[0]
- self.mac2Name[mac] = name
-
- def gen_one_nic(self, nic):
- """
- Return the lines needed to configure a nic
- @return (str list): the string list to configure the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
- name = self.mac2Name.get(nic.mac.lower())
- if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
-
- if nic.onboot:
- lines.append('auto %s' % name)
-
- # Customize IPv4
- lines.extend(self.gen_ipv4(name, nic))
-
- # Customize IPv6
- lines.extend(self.gen_ipv6(name, nic))
-
- lines.append('')
-
- return lines
-
- def gen_ipv4(self, name, nic):
- """
- Return the lines needed to configure the IPv4 setting of a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
-
- bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
- lines.append('iface %s inet %s' % (name, bootproto))
-
- if bootproto != 'static':
- return lines
-
- # Static Ipv4
- v4 = nic.staticIpv4
- if v4.ip:
- lines.append(' address %s' % v4.ip)
- if v4.netmask:
- lines.append(' netmask %s' % v4.netmask)
-
- # Add the primary gateway
- if nic.primary and v4.gateways:
- self.ipv4PrimaryGateway = v4.gateways[0]
- lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
- return lines
-
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self.gen_ipv4_route(nic, v4.gateways))
-
- return lines
-
- def gen_ipv4_route(self, nic, gateways):
- """
- Return the lines needed to configure additional Ipv4 route
- @return (str list): the string list to configure the gateways
- @param nic (NicBase): the nic to configure
- @param gateways (str list): the list of gateways
- """
- lines = []
-
- for gateway in gateways:
- lines.append(' up route add default gw %s metric 10000' %
- gateway)
-
- return lines
-
- def gen_ipv6(self, name, nic):
- """
- Return the lines needed to configure the gateways for a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
-
- if not nic.staticIpv6:
- return lines
-
- # Static Ipv6
- addrs = nic.staticIpv6
- lines.append('iface %s inet6 static' % name)
- lines.append(' address %s' % addrs[0].ip)
- lines.append(' netmask %s' % addrs[0].netmask)
-
- for addr in addrs[1:]:
- lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
- addr.netmask))
- # Add the primary gateway
- if nic.primary:
- for addr in addrs:
- if addr.gateway:
- self.ipv6PrimaryGateway = addr.gateway
- lines.append(' gateway %s' % self.ipv6PrimaryGateway)
- return lines
-
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self._genIpv6Route(name, nic, addrs))
-
- return lines
-
- def _genIpv6Route(self, name, nic, addrs):
- lines = []
-
- for addr in addrs:
- lines.append(' up route -A inet6 add default gw '
- '%s metric 10000' % addr.gateway)
-
- return lines
-
- def generate(self):
- """Return the lines that is needed to configure the nics"""
- lines = []
- lines.append('iface lo inet loopback')
- lines.append('auto lo')
- lines.append('')
-
- for nic in self.nics:
- lines.extend(self.gen_one_nic(nic))
-
- return lines
-
- def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
-
- # Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
-
- def if_down_up(self):
- names = []
- for nic in self.nics:
- name = self.mac2Name.get(nic.mac.lower())
- names.append(name)
-
- for name in names:
- logger.info('Bring down interface %s' % name)
- util.subp(["ifdown", "%s" % name])
-
- self.clear_dhcp()
-
- for name in names:
- logger.info('Bring up interface %s' % name)
- util.subp(["ifup", "%s" % name])
-
- def configure(self):
- """
- Configure the /etc/network/intefaces
- Make a back up of the original
- """
- containingDir = '/etc/network'
-
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
-
- if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
- os.rename(interfaceFile, originalFile)
-
- lines = self.generate()
- with open(interfaceFile, 'w') as fp:
- for line in lines:
- fp.write('%s\n' % line)
-
- self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
deleted file mode 100644
index 28ef306a..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ConfigSource(object):
- """Specifies a source for the Config Content."""
- pass
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
deleted file mode 100644
index d1546852..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustErrorEnum(object):
- """Specifies different errors of Guest Customization engine"""
-
- GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
deleted file mode 100644
index ce90c898..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustEventEnum(object):
- """Specifies different types of Guest Customization Events"""
-
- GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
- GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101
- GUESTCUST_EVENT_ENABLE_NICS = 103
- GUESTCUST_EVENT_QUERY_NICS = 104
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
deleted file mode 100644
index 422a096d..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustStateEnum(object):
- """Specifies different states of Guest Customization engine"""
-
- GUESTCUST_STATE_RUNNING = 4
- GUESTCUST_STATE_DONE = 5
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
deleted file mode 100644
index c07c5949..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import os
-import time
-
-from cloudinit import util
-
-from .guestcust_event import GuestCustEventEnum
-from .guestcust_state import GuestCustStateEnum
-
-logger = logging.getLogger(__name__)
-
-
-CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
-QUERY_NICS_SUPPORTED = "queryNicsSupported"
-NICS_STATUS_CONNECTED = "connected"
-
-
-# This will send a RPC command to the underlying
-# VMware Virtualization Platform.
-def send_rpc(rpc):
- if not rpc:
- return None
-
- out = ""
- err = "Error sending the RPC command"
-
- try:
- logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
- # Remove the trailing newline in the output.
- if out:
- out = out.rstrip()
- except Exception as e:
- logger.debug("Failed to send RPC command")
- logger.exception(e)
-
- return (out, err)
-
-
-# This will send the customization status to the
-# underlying VMware Virtualization Platform.
-def set_customization_status(custstate, custerror, errormessage=None):
- message = ""
-
- if errormessage:
- message = CLOUDINIT_LOG_FILE + "@" + errormessage
- else:
- message = CLOUDINIT_LOG_FILE
-
- rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message)
- (out, err) = send_rpc(rpc)
- return (out, err)
-
-
-# This will read the file nics.txt in the specified directory
-# and return the content
-def get_nics_to_enable(dirpath):
- if not dirpath:
- return None
-
- NICS_SIZE = 1024
- nicsfilepath = os.path.join(dirpath, "nics.txt")
- if not os.path.exists(nicsfilepath):
- return None
-
- with open(nicsfilepath, 'r') as fp:
- nics = fp.read(NICS_SIZE)
-
- return nics
-
-
-# This will send a RPC command to the underlying VMware Virtualization platform
-# and enable nics.
-def enable_nics(nics):
- if not nics:
- logger.warning("No Nics found")
- return
-
- enableNicsWaitRetries = 5
- enableNicsWaitCount = 5
- enableNicsWaitSeconds = 1
-
- for attempt in range(0, enableNicsWaitRetries):
- logger.debug("Trying to connect interfaces, attempt %d", attempt)
- (out, err) = set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
- if not out:
- time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
- continue
-
- if out != QUERY_NICS_SUPPORTED:
- logger.warning("NICS connection status query is not supported")
- return
-
- for count in range(0, enableNicsWaitCount):
- (out, err) = set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
- if out and out == NICS_STATUS_CONNECTED:
- logger.info("NICS are connected on %d second", count)
- return
-
- time.sleep(enableNicsWaitSeconds)
-
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
deleted file mode 100644
index 873ddc3b..00000000
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class Ipv4ModeEnum(object):
- """
- The IPv4 configuration mode which directly represents the user's goal.
-
- This mode effectively acts as a contract of the in-guest customization
- engine. It must be set based on what the user has requested and should
- not be changed by those layers. It's up to the in-guest engine to
- interpret and materialize the user's request.
- """
-
- # The legacy mode which only allows dhcp/static based on whether IPv4
- # addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
-
- # IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
-
- # IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
-
- # IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
-
- # IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
deleted file mode 100644
index b5d704ea..00000000
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .boot_proto import BootProtoEnum
-from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
-
-
-class Nic(NicBase):
- """
- Holds the information about each NIC specified
- in the customization specification file
- """
-
- def __init__(self, name, configFile):
- self._name = name
- self._configFile = configFile
-
- def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
-
- def _get_count_with_prefix(self, prefix):
- return self._configFile.get_count_with_prefix(self.name + prefix)
-
- @property
- def name(self):
- return self._name
-
- @property
- def mac(self):
- return self._get('MACADDR').lower()
-
- @property
- def primary(self):
- value = self._get('PRIMARY')
- if value:
- value = value.lower()
- return value == 'yes' or value == 'true'
- else:
- return False
-
- @property
- def onboot(self):
- value = self._get('ONBOOT')
- if value:
- value = value.lower()
- return value == 'yes' or value == 'true'
- else:
- return False
-
- @property
- def bootProto(self):
- value = self._get('BOOTPROTO')
- if value:
- return value.lower()
- else:
- return ""
-
- @property
- def ipv4_mode(self):
- value = self._get('IPv4_MODE')
- if value:
- return value.lower()
- else:
- return ""
-
- @property
- def staticIpv4(self):
- """
- Checks the BOOTPROTO property and returns StaticIPv4Addr
- configuration object if STATIC configuration is set.
- """
- if self.bootProto == BootProtoEnum.STATIC:
- return [StaticIpv4Addr(self)]
- else:
- return None
-
- @property
- def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
-
- if not cnt:
- return None
-
- result = []
- for index in range(1, cnt + 1):
- result.append(StaticIpv6Addr(self, index))
-
- return result
-
-
-class StaticIpv4Addr(StaticIpv4Base):
- """Static IPV4 Setting."""
-
- def __init__(self, nic):
- self._nic = nic
-
- @property
- def ip(self):
- return self._nic._get('IPADDR')
-
- @property
- def netmask(self):
- return self._nic._get('NETMASK')
-
- @property
- def gateways(self):
- value = self._nic._get('GATEWAY')
- if value:
- return [x.strip() for x in value.split(',')]
- else:
- return None
-
-
-class StaticIpv6Addr(StaticIpv6Base):
- """Static IPV6 Address."""
-
- def __init__(self, nic, index):
- self._nic = nic
- self._index = index
-
- @property
- def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
-
- @property
- def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
-
- @property
- def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
deleted file mode 100644
index 3c892db0..00000000
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class NicBase(object):
- """
- Define what are expected of each nic.
- The following properties should be provided in an implementation class.
- """
-
- @property
- def mac(self):
- """
- Retrieves the mac address of the nic
- @return (str) : the MACADDR setting
- """
- raise NotImplementedError('MACADDR')
-
- @property
- def primary(self):
- """
- Retrieves whether the nic is the primary nic
- Indicates whether NIC will be used to define the default gateway.
- If none of the NICs is configured to be primary, default gateway won't
- be set.
- @return (bool): the PRIMARY setting
- """
- raise NotImplementedError('PRIMARY')
-
- @property
- def onboot(self):
- """
- Retrieves whether the nic should be up at the boot time
- @return (bool) : the ONBOOT setting
- """
- raise NotImplementedError('ONBOOT')
-
- @property
- def bootProto(self):
- """
- Retrieves the boot protocol of the nic
- @return (str): the BOOTPROTO setting, valid values: dhcp and static.
- """
- raise NotImplementedError('BOOTPROTO')
-
- @property
- def ipv4_mode(self):
- """
- Retrieves the IPv4_MODE
- @return (str): the IPv4_MODE setting, valid values:
- backwards_compatible, static, dhcp, disabled, as_is
- """
- raise NotImplementedError('IPv4_MODE')
-
- @property
- def staticIpv4(self):
- """
- Retrieves the static IPv4 configuration of the nic
- @return (StaticIpv4Base list): the static ipv4 setting
- """
- raise NotImplementedError('Static IPv4')
-
- @property
- def staticIpv6(self):
- """
- Retrieves the IPv6 configuration of the nic
- @return (StaticIpv6Base list): the static ipv6 setting
- """
- raise NotImplementedError('Static Ipv6')
-
- def validate(self):
- """
- Validate the object
- For example, the staticIpv4 property is required and should not be
- empty when ipv4Mode is STATIC
- """
- raise NotImplementedError('Check constraints on properties')
-
-
-class StaticIpv4Base(object):
- """
- Define what are expected of a static IPv4 setting
- The following properties should be provided in an implementation class.
- """
-
- @property
- def ip(self):
- """
- Retrieves the Ipv4 address
- @return (str): the IPADDR setting
- """
- raise NotImplementedError('Ipv4 Address')
-
- @property
- def netmask(self):
- """
- Retrieves the Ipv4 NETMASK setting
- @return (str): the NETMASK setting
- """
- raise NotImplementedError('Ipv4 NETMASK')
-
- @property
- def gateways(self):
- """
- Retrieves the gateways on this Ipv4 subnet
- @return (str list): the GATEWAY setting
- """
- raise NotImplementedError('Ipv4 GATEWAY')
-
-
-class StaticIpv6Base(object):
- """Define what are expected of a static IPv6 setting
- The following properties should be provided in an implementation class.
- """
-
- @property
- def ip(self):
- """
- Retrieves the Ipv6 address
- @return (str): the IPv6ADDR setting
- """
- raise NotImplementedError('Ipv6 Address')
-
- @property
- def netmask(self):
- """
- Retrieves the Ipv6 NETMASK setting
- @return (str): the IPv6NETMASK setting
- """
- raise NotImplementedError('Ipv6 NETMASK')
-
- @property
- def gateway(self):
- """
- Retrieves the Ipv6 GATEWAY setting
- @return (str): the IPv6GATEWAY setting
- """
- raise NotImplementedError('Ipv6 GATEWAY')
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
deleted file mode 100644
index c74a7ae2..00000000
--- a/cloudinit/ssh_util.py
+++ /dev/null
@@ -1,314 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-# See: man sshd_config
-DEF_SSHD_CFG = "/etc/ssh/sshd_config"
-
-# taken from openssh source key.c/key_type_from_name
-VALID_KEY_TYPES = (
- "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
- "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
- "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
- "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
- "ecdsa-sha2-nistp256-cert-v01@openssh.com",
- "ecdsa-sha2-nistp384-cert-v01@openssh.com",
- "ecdsa-sha2-nistp521-cert-v01@openssh.com")
-
-
-class AuthKeyLine(object):
- def __init__(self, source, keytype=None, base64=None,
- comment=None, options=None):
- self.base64 = base64
- self.comment = comment
- self.options = options
- self.keytype = keytype
- self.source = source
-
- def valid(self):
- return (self.base64 and self.keytype)
-
- def __str__(self):
- toks = []
- if self.options:
- toks.append(self.options)
- if self.keytype:
- toks.append(self.keytype)
- if self.base64:
- toks.append(self.base64)
- if self.comment:
- toks.append(self.comment)
- if not toks:
- return self.source
- else:
- return ' '.join(toks)
-
-
-class AuthKeyLineParser(object):
- """
- AUTHORIZED_KEYS FILE FORMAT
- AuthorizedKeysFile specifies the file containing public keys for public
- key authentication; if none is specified, the default is
- ~/.ssh/authorized_keys. Each line of the file contains one key (empty
- (because of the size of the public key encoding) up to a limit of 8 kilo-
- bytes, which permits DSA keys up to 8 kilobits and RSA keys up to 16
- kilobits. You don't want to type them in; instead, copy the
- identity.pub, id_dsa.pub, or the id_rsa.pub file and edit it.
-
- sshd enforces a minimum RSA key modulus size for protocol 1 and protocol
- 2 keys of 768 bits.
-
- The options (if present) consist of comma-separated option specifica-
- tions. No spaces are permitted, except within double quotes. The fol-
- lowing option specifications are supported (note that option keywords are
- case-insensitive):
- """
-
- def _extract_options(self, ent):
- """
- The options (if present) consist of comma-separated option specifica-
- tions. No spaces are permitted, except within double quotes.
- Note that option keywords are case-insensitive.
- """
- quoted = False
- i = 0
- while (i < len(ent) and
- ((quoted) or (ent[i] not in (" ", "\t")))):
- curc = ent[i]
- if i + 1 >= len(ent):
- i = i + 1
- break
- nextc = ent[i + 1]
- if curc == "\\" and nextc == '"':
- i = i + 1
- elif curc == '"':
- quoted = not quoted
- i = i + 1
-
- options = ent[0:i]
-
- # Return the rest of the string in 'remain'
- remain = ent[i:].lstrip()
- return (options, remain)
-
- def parse(self, src_line, options=None):
- # modeled after opensshes auth2-pubkey.c:user_key_allowed2
- line = src_line.rstrip("\r\n")
- if line.startswith("#") or line.strip() == '':
- return AuthKeyLine(src_line)
-
- def parse_ssh_key(ent):
- # return ketype, key, [comment]
- toks = ent.split(None, 2)
- if len(toks) < 2:
- raise TypeError("To few fields: %s" % len(toks))
- if toks[0] not in VALID_KEY_TYPES:
- raise TypeError("Invalid keytype %s" % toks[0])
-
- # valid key type and 2 or 3 fields:
- if len(toks) == 2:
- # no comment in line
- toks.append("")
-
- return toks
-
- ent = line.strip()
- try:
- (keytype, base64, comment) = parse_ssh_key(ent)
- except TypeError:
- (keyopts, remain) = self._extract_options(ent)
- if options is None:
- options = keyopts
-
- try:
- (keytype, base64, comment) = parse_ssh_key(remain)
- except TypeError:
- return AuthKeyLine(src_line)
-
- return AuthKeyLine(src_line, keytype=keytype, base64=base64,
- comment=comment, options=options)
-
-
-def parse_authorized_keys(fname):
- lines = []
- try:
- if os.path.isfile(fname):
- lines = util.load_file(fname).splitlines()
- except (IOError, OSError):
- util.logexc(LOG, "Error reading lines from %s", fname)
- lines = []
-
- parser = AuthKeyLineParser()
- contents = []
- for line in lines:
- contents.append(parser.parse(line))
- return contents
-
-
-def update_authorized_keys(old_entries, keys):
- to_add = list(keys)
-
- for i in range(0, len(old_entries)):
- ent = old_entries[i]
- if not ent.valid():
- continue
- # Replace those with the same base64
- for k in keys:
- if not ent.valid():
- continue
- if k.base64 == ent.base64:
- # Replace it with our better one
- ent = k
- # Don't add it later
- if k in to_add:
- to_add.remove(k)
- old_entries[i] = ent
-
- # Now append any entries we did not match above
- for key in to_add:
- old_entries.append(key)
-
- # Now format them back to strings...
- lines = [str(b) for b in old_entries]
-
- # Ensure it ends with a newline
- lines.append('')
- return '\n'.join(lines)
-
-
-def users_ssh_info(username):
- pw_ent = pwd.getpwnam(username)
- if not pw_ent or not pw_ent.pw_dir:
- raise RuntimeError("Unable to get ssh info for user %r" % (username))
- return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent)
-
-
-def extract_authorized_keys(username):
- (ssh_dir, pw_ent) = users_ssh_info(username)
- auth_key_fn = None
- with util.SeLinuxGuard(ssh_dir, recursive=True):
- try:
- # The 'AuthorizedKeysFile' may contain tokens
- # of the form %T which are substituted during connection set-up.
- # The following tokens are defined: %% is replaced by a literal
- # '%', %h is replaced by the home directory of the user being
- # authenticated and %u is replaced by the username of that user.
- ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG)
- auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip()
- if not auth_key_fn:
- auth_key_fn = "%h/.ssh/authorized_keys"
- auth_key_fn = auth_key_fn.replace("%h", pw_ent.pw_dir)
- auth_key_fn = auth_key_fn.replace("%u", username)
- auth_key_fn = auth_key_fn.replace("%%", '%')
- if not auth_key_fn.startswith('/'):
- auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn)
- except (IOError, OSError):
- # Give up and use a default key filename
- auth_key_fn = os.path.join(ssh_dir, 'authorized_keys')
- util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in ssh "
- "config from %r, using 'AuthorizedKeysFile' file "
- "%r instead", DEF_SSHD_CFG, auth_key_fn)
- return (auth_key_fn, parse_authorized_keys(auth_key_fn))
-
-
-def setup_user_keys(keys, username, options=None):
- # Make sure the users .ssh dir is setup accordingly
- (ssh_dir, pwent) = users_ssh_info(username)
- if not os.path.isdir(ssh_dir):
- util.ensure_dir(ssh_dir, mode=0o700)
- util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
-
- # Turn the 'update' keys given into actual entries
- parser = AuthKeyLineParser()
- key_entries = []
- for k in keys:
- key_entries.append(parser.parse(str(k), options=options))
-
- # Extract the old and make the new
- (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
- with util.SeLinuxGuard(ssh_dir, recursive=True):
- content = update_authorized_keys(auth_key_entries, key_entries)
- util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
- util.write_file(auth_key_fn, content, mode=0o600)
- util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
-
-
-class SshdConfigLine(object):
- def __init__(self, line, k=None, v=None):
- self.line = line
- self._key = k
- self.value = v
-
- @property
- def key(self):
- if self._key is None:
- return None
- # Keywords are case-insensitive
- return self._key.lower()
-
- def __str__(self):
- if self._key is None:
- return str(self.line)
- else:
- v = str(self._key)
- if self.value:
- v += " " + str(self.value)
- return v
-
-
-def parse_ssh_config(fname):
- # See: man sshd_config
- # The file contains keyword-argument pairs, one per line.
- # Lines starting with '#' and empty lines are interpreted as comments.
- # Note: key-words are case-insensitive and arguments are case-sensitive
- lines = []
- if not os.path.isfile(fname):
- return lines
- for line in util.load_file(fname).splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- lines.append(SshdConfigLine(line))
- continue
- try:
- key, val = line.split(None, 1)
- except ValueError:
- key, val = line.split('=', 1)
- lines.append(SshdConfigLine(line, key, val))
- return lines
-
-
-def parse_ssh_config_map(fname):
- lines = parse_ssh_config(fname)
- if not lines:
- return {}
- ret = {}
- for line in lines:
- if not line.key:
- continue
- ret[line.key] = line.value
- return ret
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
deleted file mode 100644
index 47deac6e..00000000
--- a/cloudinit/stages.py
+++ /dev/null
@@ -1,890 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import copy
-import os
-import sys
-
-import six
-from six.moves import cPickle as pickle
-
-from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
-
-from cloudinit import handlers
-
-# Default handlers (used if not overridden)
-from cloudinit.handlers import boot_hook as bh_part
-from cloudinit.handlers import cloud_config as cc_part
-from cloudinit.handlers import shell_script as ss_part
-from cloudinit.handlers import upstart_job as up_part
-
-from cloudinit import cloud
-from cloudinit import config
-from cloudinit import distros
-from cloudinit import helpers
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net import cmdline
-from cloudinit.reporting import events
-from cloudinit import sources
-from cloudinit import type_utils
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-NULL_DATA_SOURCE = None
-NO_PREVIOUS_INSTANCE_ID = "NO_PREVIOUS_INSTANCE_ID"
-
-
-class Init(object):
- def __init__(self, ds_deps=None, reporter=None):
- if ds_deps is not None:
- self.ds_deps = ds_deps
- else:
- self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
- # Created on first use
- self._cfg = None
- self._paths = None
- self._distro = None
- # Changed only when a fetch occurs
- self.datasource = NULL_DATA_SOURCE
- self.ds_restored = False
- self._previous_iid = None
-
- if reporter is None:
- reporter = events.ReportEventStack(
- name="init-reporter", description="init-desc",
- reporting_enabled=False)
- self.reporter = reporter
-
- def _reset(self, reset_ds=False):
- # Recreated on access
- self._cfg = None
- self._paths = None
- self._distro = None
- if reset_ds:
- self.datasource = NULL_DATA_SOURCE
- self.ds_restored = False
-
- @property
- def distro(self):
- if not self._distro:
- # Try to find the right class to use
- system_config = self._extract_cfg('system')
- distro_name = system_config.pop('distro', 'ubuntu')
- distro_cls = distros.fetch(distro_name)
- LOG.debug("Using distro class %s", distro_cls)
- self._distro = distro_cls(distro_name, system_config, self.paths)
- # If we have an active datasource we need to adjust
- # said datasource and move its distro/system config
- # from whatever it was to a new set...
- if self.datasource is not NULL_DATA_SOURCE:
- self.datasource.distro = self._distro
- self.datasource.sys_cfg = system_config
- return self._distro
-
- @property
- def cfg(self):
- return self._extract_cfg('restricted')
-
- def _extract_cfg(self, restriction):
- # Ensure actually read
- self.read_cfg()
- # Nobody gets the real config
- ocfg = copy.deepcopy(self._cfg)
- if restriction == 'restricted':
- ocfg.pop('system_info', None)
- elif restriction == 'system':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {})
- elif restriction == 'paths':
- ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {})
- if not isinstance(ocfg, (dict)):
- ocfg = {}
- return ocfg
-
- @property
- def paths(self):
- if not self._paths:
- path_info = self._extract_cfg('paths')
- self._paths = helpers.Paths(path_info, self.datasource)
- return self._paths
-
- def _initial_subdirs(self):
- c_dir = self.paths.cloud_dir
- initial_dirs = [
- c_dir,
- os.path.join(c_dir, 'scripts'),
- os.path.join(c_dir, 'scripts', 'per-instance'),
- os.path.join(c_dir, 'scripts', 'per-once'),
- os.path.join(c_dir, 'scripts', 'per-boot'),
- os.path.join(c_dir, 'scripts', 'vendor'),
- os.path.join(c_dir, 'seed'),
- os.path.join(c_dir, 'instances'),
- os.path.join(c_dir, 'handlers'),
- os.path.join(c_dir, 'sem'),
- os.path.join(c_dir, 'data'),
- ]
- return initial_dirs
-
- def purge_cache(self, rm_instance_lnk=False):
- rm_list = []
- rm_list.append(self.paths.boot_finished)
- if rm_instance_lnk:
- rm_list.append(self.paths.instance_link)
- for f in rm_list:
- util.del_file(f)
- return len(rm_list)
-
- def initialize(self):
- self._initialize_filesystem()
-
- def _initialize_filesystem(self):
- util.ensure_dirs(self._initial_subdirs())
- log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
- if log_file:
- util.ensure_file(log_file)
- perms = self.cfg.get('syslog_fix_perms')
- if not perms:
- perms = {}
- if not isinstance(perms, list):
- perms = [perms]
-
- error = None
- for perm in perms:
- u, g = util.extract_usergroup(perm)
- try:
- util.chownbyname(log_file, u, g)
- return
- except OSError as e:
- error = e
-
- LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
- log_file, ','.join(perms), error)
-
- def read_cfg(self, extra_fns=None):
- # None check so that we don't keep on re-loading if empty
- if self._cfg is None:
- self._cfg = self._read_cfg(extra_fns)
- # LOG.debug("Loaded 'init' config %s", self._cfg)
-
- def _read_cfg(self, extra_fns):
- no_cfg_paths = helpers.Paths({}, self.datasource)
- merger = helpers.ConfigMerger(paths=no_cfg_paths,
- datasource=self.datasource,
- additional_fns=extra_fns,
- base_cfg=fetch_base_config())
- return merger.cfg
-
- def _restore_from_cache(self):
- # We try to restore from a current link and static path
- # by using the instance link, if purge_cache was called
- # the file wont exist.
- return _pkl_load(self.paths.get_ipath_cur('obj_pkl'))
-
- def _write_to_cache(self):
- if self.datasource is NULL_DATA_SOURCE:
- return False
- return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl"))
-
- def _get_datasources(self):
- # Any config provided???
- pkg_list = self.cfg.get('datasource_pkg_list') or []
- # Add the defaults at the end
- for n in ['', type_utils.obj_name(sources)]:
- if n not in pkg_list:
- pkg_list.append(n)
- cfg_list = self.cfg.get('datasource_list') or []
- return (cfg_list, pkg_list)
-
- def _restore_from_checked_cache(self, existing):
- if existing not in ("check", "trust"):
- raise ValueError("Unexpected value for existing: %s" % existing)
-
- ds = self._restore_from_cache()
- if not ds:
- return (None, "no cache found")
-
- run_iid_fn = self.paths.get_runpath('instance_id')
- if os.path.exists(run_iid_fn):
- run_iid = util.load_file(run_iid_fn).strip()
- else:
- run_iid = None
-
- if run_iid == ds.get_instance_id():
- return (ds, "restored from cache with run check: %s" % ds)
- elif existing == "trust":
- return (ds, "restored from cache: %s" % ds)
- else:
- if (hasattr(ds, 'check_instance_id') and
- ds.check_instance_id(self.cfg)):
- return (ds, "restored from checked cache: %s" % ds)
- else:
- return (None, "cache invalid in datasource: %s" % ds)
-
- def _get_data_source(self, existing):
- if self.datasource is not NULL_DATA_SOURCE:
- return self.datasource
-
- with events.ReportEventStack(
- name="check-cache",
- description="attempting to read from cache [%s]" % existing,
- parent=self.reporter) as myrep:
-
- ds, desc = self._restore_from_checked_cache(existing)
- myrep.description = desc
- self.ds_restored = bool(ds)
- LOG.debug(myrep.description)
-
- if not ds:
- util.del_file(self.paths.instance_link)
- (cfg_list, pkg_list) = self._get_datasources()
- # Deep copy so that user-data handlers can not modify
- # (which will affect user-data handlers down the line...)
- (ds, dsname) = sources.find_source(self.cfg,
- self.distro,
- self.paths,
- copy.deepcopy(self.ds_deps),
- cfg_list,
- pkg_list, self.reporter)
- LOG.info("Loaded datasource %s - %s", dsname, ds)
- self.datasource = ds
- # Ensure we adjust our path members datasource
- # now that we have one (thus allowing ipath to be used)
- self._reset()
- return ds
-
- def _get_instance_subdirs(self):
- return ['handlers', 'scripts', 'sem']
-
- def _get_ipath(self, subname=None):
- # Force a check to see if anything
- # actually comes back, if not
- # then a datasource has not been assigned...
- instance_dir = self.paths.get_ipath(subname)
- if not instance_dir:
- raise RuntimeError(("No instance directory is available."
- " Has a datasource been fetched??"))
- return instance_dir
-
- def _reflect_cur_instance(self):
- # Remove the old symlink and attach a new one so
- # that further reads/writes connect into the right location
- idir = self._get_ipath()
- util.del_file(self.paths.instance_link)
- util.sym_link(idir, self.paths.instance_link)
-
- # Ensures these dirs exist
- dir_list = []
- for d in self._get_instance_subdirs():
- dir_list.append(os.path.join(idir, d))
- util.ensure_dirs(dir_list)
-
- # Write out information on what is being used for the current instance
- # and what may have been used for a previous instance...
- dp = self.paths.get_cpath('data')
-
- # Write what the datasource was and is..
- ds = "%s: %s" % (type_utils.obj_name(self.datasource), self.datasource)
- previous_ds = None
- ds_fn = os.path.join(idir, 'datasource')
- try:
- previous_ds = util.load_file(ds_fn).strip()
- except Exception:
- pass
- if not previous_ds:
- previous_ds = ds
- util.write_file(ds_fn, "%s\n" % ds)
- util.write_file(os.path.join(dp, 'previous-datasource'),
- "%s\n" % (previous_ds))
-
- # What the instance id was and is...
- iid = self.datasource.get_instance_id()
- iid_fn = os.path.join(dp, 'instance-id')
-
- previous_iid = self.previous_iid()
- util.write_file(iid_fn, "%s\n" % iid)
- util.write_file(self.paths.get_runpath('instance_id'), "%s\n" % iid)
- util.write_file(os.path.join(dp, 'previous-instance-id'),
- "%s\n" % (previous_iid))
-
- self._write_to_cache()
- # Ensure needed components are regenerated
- # after change of instance which may cause
- # change of configuration
- self._reset()
- return iid
-
- def previous_iid(self):
- if self._previous_iid is not None:
- return self._previous_iid
-
- dp = self.paths.get_cpath('data')
- iid_fn = os.path.join(dp, 'instance-id')
- try:
- self._previous_iid = util.load_file(iid_fn).strip()
- except Exception:
- self._previous_iid = NO_PREVIOUS_INSTANCE_ID
-
- LOG.debug("previous iid found to be %s", self._previous_iid)
- return self._previous_iid
-
- def is_new_instance(self):
- previous = self.previous_iid()
- ret = (previous == NO_PREVIOUS_INSTANCE_ID or
- previous != self.datasource.get_instance_id())
- return ret
-
- def fetch(self, existing="check"):
- return self._get_data_source(existing=existing)
-
- def instancify(self):
- return self._reflect_cur_instance()
-
- def cloudify(self):
- # Form the needed options to cloudify our members
- return cloud.Cloud(self.datasource,
- self.paths, self.cfg,
- self.distro, helpers.Runners(self.paths),
- reporter=self.reporter)
-
- def update(self):
- self._store_userdata()
- self._store_vendordata()
-
- def _store_userdata(self):
- raw_ud = self.datasource.get_userdata_raw()
- if raw_ud is None:
- raw_ud = b''
- util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
- # processed userdata is a Mime message, so write it as string.
- processed_ud = self.datasource.get_userdata()
- if processed_ud is None:
- raw_ud = ''
- util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
-
- def _store_vendordata(self):
- raw_vd = self.datasource.get_vendordata_raw()
- if raw_vd is None:
- raw_vd = b''
- util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
- # processed vendor data is a Mime message, so write it as string.
- processed_vd = str(self.datasource.get_vendordata())
- if processed_vd is None:
- processed_vd = ''
- util.write_file(self._get_ipath('vendordata'), str(processed_vd),
- 0o600)
-
- def _default_handlers(self, opts=None):
- if opts is None:
- opts = {}
-
- opts.update({
- 'paths': self.paths,
- 'datasource': self.datasource,
- })
- # TODO(harlowja) Hmmm, should we dynamically import these??
- def_handlers = [
- cc_part.CloudConfigPartHandler(**opts),
- ss_part.ShellScriptPartHandler(**opts),
- bh_part.BootHookPartHandler(**opts),
- up_part.UpstartJobPartHandler(**opts),
- ]
- return def_handlers
-
- def _default_userdata_handlers(self):
- return self._default_handlers()
-
- def _default_vendordata_handlers(self):
- return self._default_handlers(
- opts={'script_path': 'vendor_scripts',
- 'cloud_config_path': 'vendor_cloud_config'})
-
- def _do_handlers(self, data_msg, c_handlers_list, frequency,
- excluded=None):
- """
- Generalized handlers suitable for use with either vendordata
- or userdata
- """
- if excluded is None:
- excluded = []
-
- cdir = self.paths.get_cpath("handlers")
- idir = self._get_ipath("handlers")
-
- # Add the path to the plugins dir to the top of our list for importing
- # new handlers.
- #
- # Note(harlowja): instance dir should be read before cloud-dir
- for d in [cdir, idir]:
- if d and d not in sys.path:
- sys.path.insert(0, d)
-
- def register_handlers_in_dir(path):
- # Attempts to register any handler modules under the given path.
- if not path or not os.path.isdir(path):
- return
- potential_handlers = util.find_modules(path)
- for (fname, mod_name) in potential_handlers.items():
- try:
- mod_locs, looked_locs = importer.find_module(
- mod_name, [''], ['list_types', 'handle_part'])
- if not mod_locs:
- LOG.warn("Could not find a valid user-data handler"
- " named %s in file %s (searched %s)",
- mod_name, fname, looked_locs)
- continue
- mod = importer.import_module(mod_locs[0])
- mod = handlers.fixup_handler(mod)
- types = c_handlers.register(mod)
- if types:
- LOG.debug("Added custom handler for %s [%s] from %s",
- types, mod, fname)
- except Exception:
- util.logexc(LOG, "Failed to register handler from %s",
- fname)
-
- # This keeps track of all the active handlers
- c_handlers = helpers.ContentHandlers()
-
- # Add any handlers in the cloud-dir
- register_handlers_in_dir(cdir)
-
- # Register any other handlers that come from the default set. This
- # is done after the cloud-dir handlers so that the cdir modules can
- # take over the default user-data handler content-types.
- for mod in c_handlers_list:
- types = c_handlers.register(mod, overwrite=False)
- if types:
- LOG.debug("Added default handler for %s from %s", types, mod)
-
- # Form our cloud interface
- data = self.cloudify()
-
- def init_handlers():
- # Init the handlers first
- for (_ctype, mod) in c_handlers.items():
- if mod in c_handlers.initialized:
- # Avoid initing the same module twice (if said module
- # is registered to more than one content-type).
- continue
- handlers.call_begin(mod, data, frequency)
- c_handlers.initialized.append(mod)
-
- def walk_handlers(excluded):
- # Walk the user data
- part_data = {
- 'handlers': c_handlers,
- # Any new handlers that are encountered get writen here
- 'handlerdir': idir,
- 'data': data,
- # The default frequency if handlers don't have one
- 'frequency': frequency,
- # This will be used when new handlers are found
- # to help write there contents to files with numbered
- # names...
- 'handlercount': 0,
- 'excluded': excluded,
- }
- handlers.walk(data_msg, handlers.walker_callback, data=part_data)
-
- def finalize_handlers():
- # Give callbacks opportunity to finalize
- for (_ctype, mod) in c_handlers.items():
- if mod not in c_handlers.initialized:
- # Said module was never inited in the first place, so lets
- # not attempt to finalize those that never got called.
- continue
- c_handlers.initialized.remove(mod)
- try:
- handlers.call_end(mod, data, frequency)
- except Exception:
- util.logexc(LOG, "Failed to finalize handler: %s", mod)
-
- try:
- init_handlers()
- walk_handlers(excluded)
- finally:
- finalize_handlers()
-
- def consume_data(self, frequency=PER_INSTANCE):
- # Consume the userdata first, because we need want to let the part
- # handlers run first (for merging stuff)
- with events.ReportEventStack("consume-user-data",
- "reading and applying user-data",
- parent=self.reporter):
- self._consume_userdata(frequency)
- with events.ReportEventStack("consume-vendor-data",
- "reading and applying vendor-data",
- parent=self.reporter):
- self._consume_vendordata(frequency)
-
- # Perform post-consumption adjustments so that
- # modules that run during the init stage reflect
- # this consumed set.
- #
- # They will be recreated on future access...
- self._reset()
- # Note(harlowja): the 'active' datasource will have
- # references to the previous config, distro, paths
- # objects before the load of the userdata happened,
- # this is expected.
-
- def _consume_vendordata(self, frequency=PER_INSTANCE):
- """
- Consume the vendordata and run the part handlers on it
- """
- # User-data should have been consumed first.
- # So we merge the other available cloud-configs (everything except
- # vendor provided), and check whether or not we should consume
- # vendor data at all. That gives user or system a chance to override.
- if not self.datasource.get_vendordata_raw():
- LOG.debug("no vendordata from datasource")
- return
-
- _cc_merger = helpers.ConfigMerger(paths=self._paths,
- datasource=self.datasource,
- additional_fns=[],
- base_cfg=self.cfg,
- include_vendor=False)
- vdcfg = _cc_merger.cfg.get('vendor_data', {})
-
- if not isinstance(vdcfg, dict):
- vdcfg = {'enabled': False}
- LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg)
-
- enabled = vdcfg.get('enabled')
- no_handlers = vdcfg.get('disabled_handlers', None)
-
- if not util.is_true(enabled):
- LOG.debug("vendordata consumption is disabled.")
- return
-
- LOG.debug("vendor data will be consumed. disabled_handlers=%s",
- no_handlers)
-
- # Ensure vendordata source fetched before activation (just incase)
- vendor_data_msg = self.datasource.get_vendordata()
-
- # This keeps track of all the active handlers, while excluding what the
- # users doesn't want run, i.e. boot_hook, cloud_config, shell_script
- c_handlers_list = self._default_vendordata_handlers()
-
- # Run the handlers
- self._do_handlers(vendor_data_msg, c_handlers_list, frequency,
- excluded=no_handlers)
-
- def _consume_userdata(self, frequency=PER_INSTANCE):
- """
- Consume the userdata and run the part handlers
- """
-
- # Ensure datasource fetched before activation (just incase)
- user_data_msg = self.datasource.get_userdata(True)
-
- # This keeps track of all the active handlers
- c_handlers_list = self._default_handlers()
-
- # Run the handlers
- self._do_handlers(user_data_msg, c_handlers_list, frequency)
-
- def _find_networking_config(self):
- disable_file = os.path.join(
- self.paths.get_cpath('data'), 'upgraded-network')
- if os.path.exists(disable_file):
- return (None, disable_file)
-
- cmdline_cfg = ('cmdline', cmdline.read_kernel_cmdline_config())
- dscfg = ('ds', None)
- if self.datasource and hasattr(self.datasource, 'network_config'):
- dscfg = ('ds', self.datasource.network_config)
- sys_cfg = ('system_cfg', self.cfg.get('network'))
-
- for loc, ncfg in (cmdline_cfg, sys_cfg, dscfg):
- if net.is_disabled_cfg(ncfg):
- LOG.debug("network config disabled by %s", loc)
- return (None, loc)
- if ncfg:
- return (ncfg, loc)
- return (net.generate_fallback_config(), "fallback")
-
- def apply_network_config(self, bring_up):
- netcfg, src = self._find_networking_config()
- if netcfg is None:
- LOG.info("network config is disabled by %s", src)
- return
-
- try:
- LOG.debug("applying net config names for %s" % netcfg)
- self.distro.apply_network_config_names(netcfg)
- except Exception as e:
- LOG.warn("Failed to rename devices: %s", e)
-
- if (self.datasource is not NULL_DATA_SOURCE and
- not self.is_new_instance()):
- LOG.debug("not a new instance. network config is not applied.")
- return
-
- LOG.info("Applying network configuration from %s bringup=%s: %s",
- src, bring_up, netcfg)
- try:
- return self.distro.apply_network_config(netcfg, bring_up=bring_up)
- except NotImplementedError:
- LOG.warn("distro '%s' does not implement apply_network_config. "
- "networking may not be configured properly." %
- self.distro)
- return
-
-
-class Modules(object):
- def __init__(self, init, cfg_files=None, reporter=None):
- self.init = init
- self.cfg_files = cfg_files
- # Created on first use
- self._cached_cfg = None
- if reporter is None:
- reporter = events.ReportEventStack(
- name="module-reporter", description="module-desc",
- reporting_enabled=False)
- self.reporter = reporter
-
- @property
- def cfg(self):
- # None check to avoid empty case causing re-reading
- if self._cached_cfg is None:
- merger = helpers.ConfigMerger(paths=self.init.paths,
- datasource=self.init.datasource,
- additional_fns=self.cfg_files,
- base_cfg=self.init.cfg)
- self._cached_cfg = merger.cfg
- # LOG.debug("Loading 'module' config %s", self._cached_cfg)
- # Only give out a copy so that others can't modify this...
- return copy.deepcopy(self._cached_cfg)
-
- def _read_modules(self, name):
- module_list = []
- if name not in self.cfg:
- return module_list
- cfg_mods = self.cfg[name]
- # Create 'module_list', an array of hashes
- # Where hash['mod'] = module name
- # hash['freq'] = frequency
- # hash['args'] = arguments
- for item in cfg_mods:
- if not item:
- continue
- if isinstance(item, six.string_types):
- module_list.append({
- 'mod': item.strip(),
- })
- elif isinstance(item, (list)):
- contents = {}
- # Meant to fall through...
- if len(item) >= 1:
- contents['mod'] = item[0].strip()
- if len(item) >= 2:
- contents['freq'] = item[1].strip()
- if len(item) >= 3:
- contents['args'] = item[2:]
- if contents:
- module_list.append(contents)
- elif isinstance(item, (dict)):
- contents = {}
- valid = False
- if 'name' in item:
- contents['mod'] = item['name'].strip()
- valid = True
- if 'frequency' in item:
- contents['freq'] = item['frequency'].strip()
- if 'args' in item:
- contents['args'] = item['args'] or []
- if contents and valid:
- module_list.append(contents)
- else:
- raise TypeError(("Failed to read '%s' item in config,"
- " unknown type %s") %
- (item, type_utils.obj_name(item)))
- return module_list
-
- def _fixup_modules(self, raw_mods):
- mostly_mods = []
- for raw_mod in raw_mods:
- raw_name = raw_mod['mod']
- freq = raw_mod.get('freq')
- run_args = raw_mod.get('args') or []
- mod_name = config.form_module_name(raw_name)
- if not mod_name:
- continue
- if freq and freq not in FREQUENCIES:
- LOG.warn(("Config specified module %s"
- " has an unknown frequency %s"), raw_name, freq)
- # Reset it so when ran it will get set to a known value
- freq = None
- mod_locs, looked_locs = importer.find_module(
- mod_name, ['', type_utils.obj_name(config)], ['handle'])
- if not mod_locs:
- LOG.warn("Could not find module named %s (searched %s)",
- mod_name, looked_locs)
- continue
- mod = config.fixup_module(importer.import_module(mod_locs[0]))
- mostly_mods.append([mod, raw_name, freq, run_args])
- return mostly_mods
-
- def _run_modules(self, mostly_mods):
- cc = self.init.cloudify()
- # Return which ones ran
- # and which ones failed + the exception of why it failed
- failures = []
- which_ran = []
- for (mod, name, freq, args) in mostly_mods:
- try:
- # Try the modules frequency, otherwise fallback to a known one
- if not freq:
- freq = mod.frequency
- if freq not in FREQUENCIES:
- freq = PER_INSTANCE
- LOG.debug("Running module %s (%s) with frequency %s",
- name, mod, freq)
-
- # Use the configs logger and not our own
- # TODO(harlowja): possibly check the module
- # for having a LOG attr and just give it back
- # its own logger?
- func_args = [name, self.cfg,
- cc, config.LOG, args]
- # Mark it as having started running
- which_ran.append(name)
- # This name will affect the semaphore name created
- run_name = "config-%s" % (name)
-
- desc = "running %s with frequency %s" % (run_name, freq)
- myrep = events.ReportEventStack(
- name=run_name, description=desc, parent=self.reporter)
-
- with myrep:
- ran, _r = cc.run(run_name, mod.handle, func_args,
- freq=freq)
- if ran:
- myrep.message = "%s ran successfully" % run_name
- else:
- myrep.message = "%s previously ran" % run_name
-
- except Exception as e:
- util.logexc(LOG, "Running module %s (%s) failed", name, mod)
- failures.append((name, e))
- return (which_ran, failures)
-
- def run_single(self, mod_name, args=None, freq=None):
- # Form the users module 'specs'
- mod_to_be = {
- 'mod': mod_name,
- 'args': args,
- 'freq': freq,
- }
- # Now resume doing the normal fixups and running
- raw_mods = [mod_to_be]
- mostly_mods = self._fixup_modules(raw_mods)
- return self._run_modules(mostly_mods)
-
- def run_section(self, section_name):
- raw_mods = self._read_modules(section_name)
- mostly_mods = self._fixup_modules(raw_mods)
- d_name = self.init.distro.name
-
- skipped = []
- forced = []
- overridden = self.cfg.get('unverified_modules', [])
- for (mod, name, _freq, _args) in mostly_mods:
- worked_distros = set(mod.distros)
- worked_distros.update(
- distros.Distro.expand_osfamily(mod.osfamilies))
-
- # module does not declare 'distros' or lists this distro
- if not worked_distros or d_name in worked_distros:
- continue
-
- if name in overridden:
- forced.append(name)
- else:
- skipped.append(name)
-
- if skipped:
- LOG.info("Skipping modules %s because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.", skipped, d_name)
- if forced:
- LOG.info("running unverified_modules: %s", forced)
-
- return self._run_modules(mostly_mods)
-
-
-def fetch_base_config():
- base_cfgs = []
- default_cfg = util.get_builtin_cfg()
-
- # Anything in your conf.d location??
- # or the 'default' cloud.cfg location???
- base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
-
- # Kernel/cmdline parameters override system config
- kern_contents = util.read_cc_from_cmdline()
- if kern_contents:
- base_cfgs.append(util.load_yaml(kern_contents, default={}))
-
- # And finally the default gets to play
- if default_cfg:
- base_cfgs.append(default_cfg)
-
- return util.mergemanydict(base_cfgs)
-
-
-def _pkl_store(obj, fname):
- try:
- pk_contents = pickle.dumps(obj)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource %s", obj)
- return False
- try:
- util.write_file(fname, pk_contents, omode="wb", mode=0o400)
- except Exception:
- util.logexc(LOG, "Failed pickling datasource to %s", fname)
- return False
- return True
-
-
-def _pkl_load(fname):
- pickle_contents = None
- try:
- pickle_contents = util.load_file(fname, decode=False)
- except Exception as e:
- if os.path.isfile(fname):
- LOG.warn("failed loading pickle in %s: %s" % (fname, e))
- pass
-
- # This is allowed so just return nothing successfully loaded...
- if not pickle_contents:
- return None
- try:
- return pickle.loads(pickle_contents)
- except Exception:
- util.logexc(LOG, "Failed loading pickled blob from %s", fname)
- return None
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
deleted file mode 100644
index 41ef27e3..00000000
--- a/cloudinit/templater.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2016 Amazon.com, Inc. or its affiliates.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Andrew Jorgensen <ajorgens@amazon.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import collections
-import re
-
-try:
- from Cheetah.Template import Template as CTemplate
- CHEETAH_AVAILABLE = True
-except (ImportError, AttributeError):
- CHEETAH_AVAILABLE = False
-
-try:
- import jinja2
- from jinja2 import Template as JTemplate
- JINJA_AVAILABLE = True
-except (ImportError, AttributeError):
- JINJA_AVAILABLE = False
-
-from cloudinit import log as logging
-from cloudinit import type_utils as tu
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I)
-BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)')
-
-
-def basic_render(content, params):
- """This does simple replacement of bash variable like templates.
-
- It identifies patterns like ${a} or $a and can also identify patterns like
- ${a.b} or $a.b which will look for a key 'b' in the dictionary rooted
- by key 'a'.
- """
-
- def replacer(match):
- # Only 1 of the 2 groups will actually have a valid entry.
- name = match.group(1)
- if name is None:
- name = match.group(2)
- if name is None:
- raise RuntimeError("Match encountered but no valid group present")
- path = collections.deque(name.split("."))
- selected_params = params
- while len(path) > 1:
- key = path.popleft()
- if not isinstance(selected_params, dict):
- raise TypeError("Can not traverse into"
- " non-dictionary '%s' of type %s while"
- " looking for subkey '%s'"
- % (selected_params,
- tu.obj_name(selected_params),
- key))
- selected_params = selected_params[key]
- key = path.popleft()
- if not isinstance(selected_params, dict):
- raise TypeError("Can not extract key '%s' from non-dictionary"
- " '%s' of type %s"
- % (key, selected_params,
- tu.obj_name(selected_params)))
- return str(selected_params[key])
-
- return BASIC_MATCHER.sub(replacer, content)
-
-
-def detect_template(text):
-
- def cheetah_render(content, params):
- return CTemplate(content, searchList=[params]).respond()
-
- def jinja_render(content, params):
- # keep_trailing_newline is in jinja2 2.7+, not 2.6
- add = "\n" if content.endswith("\n") else ""
- return JTemplate(content,
- undefined=jinja2.StrictUndefined,
- trim_blocks=True).render(**params) + add
-
- if text.find("\n") != -1:
- ident, rest = text.split("\n", 1)
- else:
- ident = text
- rest = ''
- type_match = TYPE_MATCHER.match(ident)
- if not type_match:
- if CHEETAH_AVAILABLE:
- LOG.debug("Using Cheetah as the renderer for unknown template.")
- return ('cheetah', cheetah_render, text)
- else:
- return ('basic', basic_render, text)
- else:
- template_type = type_match.group(1).lower().strip()
- if template_type not in ('jinja', 'cheetah', 'basic'):
- raise ValueError("Unknown template rendering type '%s' requested"
- % template_type)
- if template_type == 'jinja' and not JINJA_AVAILABLE:
- LOG.warn("Jinja not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'jinja' and JINJA_AVAILABLE:
- return ('jinja', jinja_render, rest)
- if template_type == 'cheetah' and not CHEETAH_AVAILABLE:
- LOG.warn("Cheetah not available as the selected renderer for"
- " desired template, reverting to the basic renderer.")
- return ('basic', basic_render, rest)
- elif template_type == 'cheetah' and CHEETAH_AVAILABLE:
- return ('cheetah', cheetah_render, rest)
- # Only thing left over is the basic renderer (it is always available).
- return ('basic', basic_render, rest)
-
-
-def render_from_file(fn, params):
- if not params:
- params = {}
- template_type, renderer, content = detect_template(util.load_file(fn))
- LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
- return renderer(content, params)
-
-
-def render_to_file(fn, outfn, params, mode=0o644):
- contents = render_from_file(fn, params)
- util.write_file(outfn, contents, mode=mode)
-
-
-def render_string_to_file(content, outfn, params, mode=0o644):
- contents = render_string(content, params)
- util.write_file(outfn, contents, mode=mode)
-
-
-def render_string(content, params):
- if not params:
- params = {}
- template_type, renderer, content = detect_template(content)
- return renderer(content, params)
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
deleted file mode 100644
index b93efd6a..00000000
--- a/cloudinit/type_utils.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import types
-
-import six
-
-
-if six.PY3:
- _NAME_TYPES = (
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- type,
- )
-else:
- _NAME_TYPES = (
- types.TypeType,
- types.ModuleType,
- types.FunctionType,
- types.LambdaType,
- types.ClassType,
- )
-
-
-def obj_name(obj):
- if isinstance(obj, _NAME_TYPES):
- return six.text_type(obj.__name__)
- else:
- if not hasattr(obj, '__class__'):
- return repr(obj)
- else:
- return obj_name(obj.__class__)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
deleted file mode 100644
index c05e9d90..00000000
--- a/cloudinit/url_helper.py
+++ /dev/null
@@ -1,509 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import requests
-import six
-import time
-
-from email.utils import parsedate
-from functools import partial
-
-import oauthlib.oauth1 as oauth1
-from requests import exceptions
-
-from six.moves.urllib.parse import (
- urlparse, urlunparse,
- quote as urlquote)
-
-from cloudinit import log as logging
-from cloudinit import version
-
-LOG = logging.getLogger(__name__)
-
-if six.PY2:
- import httplib
- NOT_FOUND = httplib.NOT_FOUND
-else:
- import http.client
- NOT_FOUND = http.client.NOT_FOUND
-
-
-# Check if requests has ssl support (added in requests >= 0.8.8)
-SSL_ENABLED = False
-CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
-_REQ_VER = None
-try:
- from distutils.version import LooseVersion
- import pkg_resources
- _REQ = pkg_resources.get_distribution('requests')
- _REQ_VER = LooseVersion(_REQ.version)
- if _REQ_VER >= LooseVersion('0.8.8'):
- SSL_ENABLED = True
- if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'):
- CONFIG_ENABLED = True
-except ImportError:
- pass
-
-
-def _cleanurl(url):
- parsed_url = list(urlparse(url, scheme='http'))
- if not parsed_url[1] and parsed_url[2]:
- # Swap these since this seems to be a common
- # occurrence when given urls like 'www.google.com'
- parsed_url[1] = parsed_url[2]
- parsed_url[2] = ''
- return urlunparse(parsed_url)
-
-
-def combine_url(base, *add_ons):
-
- def combine_single(url, add_on):
- url_parsed = list(urlparse(url))
- path = url_parsed[2]
- if path and not path.endswith("/"):
- path += "/"
- path += urlquote(str(add_on), safe="/:")
- url_parsed[2] = path
- return urlunparse(url_parsed)
-
- url = base
- for add_on in add_ons:
- url = combine_single(url, add_on)
- return url
-
-
-# Made to have same accessors as UrlResponse so that the
-# read_file_or_url can return this or that object and the
-# 'user' of those objects will not need to know the difference.
-class StringResponse(object):
- def __init__(self, contents, code=200):
- self.code = code
- self.headers = {}
- self.contents = contents
- self.url = None
-
- def ok(self, *args, **kwargs):
- if self.code != 200:
- return False
- return True
-
- def __str__(self):
- return self.contents
-
-
-class FileResponse(StringResponse):
- def __init__(self, path, contents, code=200):
- StringResponse.__init__(self, contents, code=code)
- self.url = path
-
-
-class UrlResponse(object):
- def __init__(self, response):
- self._response = response
-
- @property
- def contents(self):
- return self._response.content
-
- @property
- def url(self):
- return self._response.url
-
- def ok(self, redirects_ok=False):
- upper = 300
- if redirects_ok:
- upper = 400
- if self.code >= 200 and self.code < upper:
- return True
- else:
- return False
-
- @property
- def headers(self):
- return self._response.headers
-
- @property
- def code(self):
- return self._response.status_code
-
- def __str__(self):
- return self._response.text
-
-
-class UrlError(IOError):
- def __init__(self, cause, code=None, headers=None, url=None):
- IOError.__init__(self, str(cause))
- self.cause = cause
- self.code = code
- self.headers = headers
- if self.headers is None:
- self.headers = {}
- self.url = url
-
-
-def _get_ssl_args(url, ssl_details):
- ssl_args = {}
- scheme = urlparse(url).scheme
- if scheme == 'https' and ssl_details:
- if not SSL_ENABLED:
- LOG.warn("SSL is not supported in requests v%s, "
- "cert. verification can not occur!", _REQ_VER)
- else:
- if 'ca_certs' in ssl_details and ssl_details['ca_certs']:
- ssl_args['verify'] = ssl_details['ca_certs']
- else:
- ssl_args['verify'] = True
- if 'cert_file' in ssl_details and 'key_file' in ssl_details:
- ssl_args['cert'] = [ssl_details['cert_file'],
- ssl_details['key_file']]
- elif 'cert_file' in ssl_details:
- ssl_args['cert'] = str(ssl_details['cert_file'])
- return ssl_args
-
-
-def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
- headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, exception_cb=None):
- url = _cleanurl(url)
- req_args = {
- 'url': url,
- }
- req_args.update(_get_ssl_args(url, ssl_details))
- req_args['allow_redirects'] = allow_redirects
- req_args['method'] = 'GET'
- if timeout is not None:
- req_args['timeout'] = max(float(timeout), 0)
- if data:
- req_args['method'] = 'POST'
- # It doesn't seem like config
- # was added in older library versions (or newer ones either), thus we
- # need to manually do the retries if it wasn't...
- if CONFIG_ENABLED:
- req_config = {
- 'store_cookies': False,
- }
- # Don't use the retry support built-in
- # since it doesn't allow for 'sleep_times'
- # in between tries....
- # if retries:
- # req_config['max_retries'] = max(int(retries), 0)
- req_args['config'] = req_config
- manual_tries = 1
- if retries:
- manual_tries = max(int(retries) + 1, 1)
-
- def_headers = {
- 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
- }
- if headers:
- def_headers.update(headers)
- headers = def_headers
-
- if not headers_cb:
- def _cb(url):
- return headers
- headers_cb = _cb
- if data:
- req_args['data'] = data
- if sec_between is None:
- sec_between = -1
-
- excps = []
- # Handle retrying ourselves since the built-in support
- # doesn't handle sleeping between tries...
- for i in range(0, manual_tries):
- req_args['headers'] = headers_cb(url)
- filtered_req_args = {}
- for (k, v) in req_args.items():
- if k == 'data':
- continue
- filtered_req_args[k] = v
- try:
- LOG.debug("[%s/%s] open '%s' with %s configuration", i,
- manual_tries, url, filtered_req_args)
-
- r = requests.request(**req_args)
- if check_status:
- r.raise_for_status()
- LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
- r.status_code, len(r.content), (i + 1))
- # Doesn't seem like we can make it use a different
- # subclass for responses, so add our own backward-compat
- # attrs
- return UrlResponse(r)
- except exceptions.RequestException as e:
- if (isinstance(e, (exceptions.HTTPError)) and
- hasattr(e, 'response') and # This appeared in v 0.10.8
- hasattr(e.response, 'status_code')):
- excps.append(UrlError(e, code=e.response.status_code,
- headers=e.response.headers,
- url=url))
- else:
- excps.append(UrlError(e, url=url))
- if SSL_ENABLED and isinstance(e, exceptions.SSLError):
- # ssl exceptions are not going to get fixed by waiting a
- # few seconds
- break
- if exception_cb and exception_cb(req_args.copy(), excps[-1]):
- # if an exception callback was given it should return None
- # a true-ish value means to break and re-raise the exception
- break
- if i + 1 < manual_tries and sec_between > 0:
- LOG.debug("Please wait %s seconds while we wait to try again",
- sec_between)
- time.sleep(sec_between)
- if excps:
- raise excps[-1]
- return None # Should throw before this...
-
-
-def wait_for_url(urls, max_wait=None, timeout=None,
- status_cb=None, headers_cb=None, sleep_time=1,
- exception_cb=None):
- """
- urls: a list of urls to try
- max_wait: roughly the maximum time to wait before giving up
- The max time is *actually* len(urls)*timeout as each url will
- be tried once and given the timeout provided.
- a number <= 0 will always result in only one try
- timeout: the timeout provided to urlopen
- status_cb: call method with string message when a url is not available
- headers_cb: call method with single argument of url to get headers
- for request.
- exception_cb: call method with 2 arguments 'msg' (per status_cb) and
- 'exception', the exception that occurred.
-
- the idea of this routine is to wait for the EC2 metdata service to
- come up. On both Eucalyptus and EC2 we have seen the case where
- the instance hit the MD before the MD service was up. EC2 seems
- to have permenantely fixed this, though.
-
- In openstack, the metadata service might be painfully slow, and
- unable to avoid hitting a timeout of even up to 10 seconds or more
- (LP: #894279) for a simple GET.
-
- Offset those needs with the need to not hang forever (and block boot)
- on a system where cloud-init is configured to look for EC2 Metadata
- service but is not going to find one. It is possible that the instance
- data host (169.254.169.254) may be firewalled off Entirely for a sytem,
- meaning that the connection will block forever unless a timeout is set.
- """
- start_time = time.time()
-
- def log_status_cb(msg, exc=None):
- LOG.debug(msg)
-
- if status_cb is None:
- status_cb = log_status_cb
-
- def timeup(max_wait, start_time):
- return ((max_wait <= 0 or max_wait is None) or
- (time.time() - start_time > max_wait))
-
- loop_n = 0
- while True:
- sleep_time = int(loop_n / 5) + 1
- for url in urls:
- now = time.time()
- if loop_n != 0:
- if timeup(max_wait, start_time):
- break
- if timeout and (now + timeout > (start_time + max_wait)):
- # shorten timeout to not run way over max_time
- timeout = int((start_time + max_wait) - now)
-
- reason = ""
- url_exc = None
- try:
- if headers_cb is not None:
- headers = headers_cb(url)
- else:
- headers = {}
-
- response = readurl(url, headers=headers, timeout=timeout,
- check_status=False)
- if not response.contents:
- reason = "empty response [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
- elif not response.ok():
- reason = "bad status code [%s]" % (response.code)
- url_exc = UrlError(ValueError(reason), code=response.code,
- headers=response.headers, url=url)
- else:
- return url
- except UrlError as e:
- reason = "request error [%s]" % e
- url_exc = e
- except Exception as e:
- reason = "unexpected error [%s]" % e
- url_exc = e
-
- time_taken = int(time.time() - start_time)
- status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
- time_taken,
- max_wait,
- reason)
- status_cb(status_msg)
- if exception_cb:
- # This can be used to alter the headers that will be sent
- # in the future, for example this is what the MAAS datasource
- # does.
- exception_cb(msg=status_msg, exception=url_exc)
-
- if timeup(max_wait, start_time):
- break
-
- loop_n = loop_n + 1
- LOG.debug("Please wait %s seconds while we wait to try again",
- sleep_time)
- time.sleep(sleep_time)
-
- return False
-
-
-class OauthUrlHelper(object):
- def __init__(self, consumer_key=None, token_key=None,
- token_secret=None, consumer_secret=None,
- skew_data_file="/run/oauth_skew.json"):
- self.consumer_key = consumer_key
- self.consumer_secret = consumer_secret or ""
- self.token_key = token_key
- self.token_secret = token_secret
- self.skew_data_file = skew_data_file
- self._do_oauth = True
- self.skew_change_limit = 5
- required = (self.token_key, self.token_secret, self.consumer_key)
- if not any(required):
- self._do_oauth = False
- elif not all(required):
- raise ValueError("all or none of token_key, token_secret, or "
- "consumer_key can be set")
-
- old = self.read_skew_file()
- self.skew_data = old or {}
-
- def read_skew_file(self):
- if self.skew_data_file and os.path.isfile(self.skew_data_file):
- with open(self.skew_data_file, mode="r") as fp:
- return json.load(fp)
- return None
-
- def update_skew_file(self, host, value):
- # this is not atomic
- if not self.skew_data_file:
- return
- cur = self.read_skew_file()
- if cur is None:
- cur = {}
- cur[host] = value
- with open(self.skew_data_file, mode="w") as fp:
- fp.write(json.dumps(cur))
-
- def exception_cb(self, msg, exception):
- if not (isinstance(exception, UrlError) and
- (exception.code == 403 or exception.code == 401)):
- return
-
- if 'date' not in exception.headers:
- LOG.warn("Missing header 'date' in %s response", exception.code)
- return
-
- date = exception.headers['date']
- try:
- remote_time = time.mktime(parsedate(date))
- except Exception as e:
- LOG.warn("Failed to convert datetime '%s': %s", date, e)
- return
-
- skew = int(remote_time - time.time())
- host = urlparse(exception.url).netloc
- old_skew = self.skew_data.get(host, 0)
- if abs(old_skew - skew) > self.skew_change_limit:
- self.update_skew_file(host, skew)
- LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
- self.skew_data[host] = skew
-
- return
-
- def headers_cb(self, url):
- if not self._do_oauth:
- return {}
-
- timestamp = None
- host = urlparse(url).netloc
- if self.skew_data and host in self.skew_data:
- timestamp = int(time.time()) + self.skew_data[host]
-
- return oauth_headers(
- url=url, consumer_key=self.consumer_key,
- token_key=self.token_key, token_secret=self.token_secret,
- consumer_secret=self.consumer_secret, timestamp=timestamp)
-
- def _wrapped(self, wrapped_func, args, kwargs):
- kwargs['headers_cb'] = partial(
- self._headers_cb, kwargs.get('headers_cb'))
- kwargs['exception_cb'] = partial(
- self._exception_cb, kwargs.get('exception_cb'))
- return wrapped_func(*args, **kwargs)
-
- def wait_for_url(self, *args, **kwargs):
- return self._wrapped(wait_for_url, args, kwargs)
-
- def readurl(self, *args, **kwargs):
- return self._wrapped(readurl, args, kwargs)
-
- def _exception_cb(self, extra_exception_cb, msg, exception):
- ret = None
- try:
- if extra_exception_cb:
- ret = extra_exception_cb(msg, exception)
- finally:
- self.exception_cb(msg, exception)
- return ret
-
- def _headers_cb(self, extra_headers_cb, url):
- headers = {}
- if extra_headers_cb:
- headers = extra_headers_cb(url)
- headers.update(self.headers_cb(url))
- return headers
-
-
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
- timestamp=None):
- if timestamp:
- timestamp = str(timestamp)
- else:
- timestamp = None
-
- client = oauth1.Client(
- consumer_key,
- client_secret=consumer_secret,
- resource_owner_key=token_key,
- resource_owner_secret=token_secret,
- signature_method=oauth1.SIGNATURE_PLAINTEXT,
- timestamp=timestamp)
- uri, signed_headers, body = client.sign(url)
- return signed_headers
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
deleted file mode 100644
index 393bf0bb..00000000
--- a/cloudinit/user_data.py
+++ /dev/null
@@ -1,356 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from email.mime.base import MIMEBase
-from email.mime.multipart import MIMEMultipart
-from email.mime.nonmultipart import MIMENonMultipart
-from email.mime.text import MIMEText
-
-import six
-
-from cloudinit import handlers
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-# Constants copied in from the handler module
-NOT_MULTIPART_TYPE = handlers.NOT_MULTIPART_TYPE
-PART_FN_TPL = handlers.PART_FN_TPL
-OCTET_TYPE = handlers.OCTET_TYPE
-
-# Saves typing errors
-CONTENT_TYPE = 'Content-Type'
-
-# Various special content types that cause special actions
-TYPE_NEEDED = ["text/plain", "text/x-not-multipart"]
-INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
-ARCHIVE_TYPES = ["text/cloud-config-archive"]
-UNDEF_TYPE = "text/plain"
-ARCHIVE_UNDEF_TYPE = "text/cloud-config"
-ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
-
-# This seems to hit most of the gzip possible content types.
-DECOMP_TYPES = [
- 'application/gzip',
- 'application/gzip-compressed',
- 'application/gzipped',
- 'application/x-compress',
- 'application/x-compressed',
- 'application/x-gunzip',
- 'application/x-gzip',
- 'application/x-gzip-compressed',
-]
-
-# Msg header used to track attachments
-ATTACHMENT_FIELD = 'Number-Attachments'
-
-# Only the following content types can have there launch index examined
-# in there payload, evey other content type can still provide a header
-EXAMINE_FOR_LAUNCH_INDEX = ["text/cloud-config"]
-
-
-def _replace_header(msg, key, value):
- del msg[key]
- msg[key] = value
-
-
-def _set_filename(msg, filename):
- del msg['Content-Disposition']
- msg.add_header('Content-Disposition',
- 'attachment', filename=str(filename))
-
-
-class UserDataProcessor(object):
- def __init__(self, paths):
- self.paths = paths
- self.ssl_details = util.fetch_ssl_details(paths)
-
- def process(self, blob):
- accumulating_msg = MIMEMultipart()
- if isinstance(blob, list):
- for b in blob:
- self._process_msg(convert_string(b), accumulating_msg)
- else:
- self._process_msg(convert_string(blob), accumulating_msg)
- return accumulating_msg
-
- def _process_msg(self, base_msg, append_msg):
-
- def find_ctype(payload):
- return handlers.type_from_starts_with(payload)
-
- for part in base_msg.walk():
- if is_skippable(part):
- continue
-
- ctype = None
- ctype_orig = part.get_content_type()
- payload = util.fully_decoded_payload(part)
- was_compressed = False
-
- # When the message states it is of a gzipped content type ensure
- # that we attempt to decode said payload so that the decompressed
- # data can be examined (instead of the compressed data).
- if ctype_orig in DECOMP_TYPES:
- try:
- payload = util.decomp_gzip(payload, quiet=False)
- # At this point we don't know what the content-type is
- # since we just decompressed it.
- ctype_orig = None
- was_compressed = True
- except util.DecompressionError as e:
- LOG.warn("Failed decompressing payload from %s of length"
- " %s due to: %s", ctype_orig, len(payload), e)
- continue
-
- # Attempt to figure out the payloads content-type
- if not ctype_orig:
- ctype_orig = UNDEF_TYPE
- if ctype_orig in TYPE_NEEDED:
- ctype = find_ctype(payload)
- if ctype is None:
- ctype = ctype_orig
-
- # In the case where the data was compressed, we want to make sure
- # that we create a new message that contains the found content
- # type with the uncompressed content since later traversals of the
- # messages will expect a part not compressed.
- if was_compressed:
- maintype, subtype = ctype.split("/", 1)
- n_part = MIMENonMultipart(maintype, subtype)
- n_part.set_payload(payload)
- # Copy various headers from the old part to the new one,
- # but don't include all the headers since some are not useful
- # after decoding and decompression.
- if part.get_filename():
- _set_filename(n_part, part.get_filename())
- for h in ('Launch-Index',):
- if h in part:
- _replace_header(n_part, h, str(part[h]))
- part = n_part
-
- if ctype != ctype_orig:
- _replace_header(part, CONTENT_TYPE, ctype)
-
- if ctype in INCLUDE_TYPES:
- self._do_include(payload, append_msg)
- continue
-
- if ctype in ARCHIVE_TYPES:
- self._explode_archive(payload, append_msg)
- continue
-
- # TODO(harlowja): Should this be happening, shouldn't
- # the part header be modified and not the base?
- _replace_header(base_msg, CONTENT_TYPE, ctype)
-
- self._attach_part(append_msg, part)
-
- def _attach_launch_index(self, msg):
- header_idx = msg.get('Launch-Index', None)
- payload_idx = None
- if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
- try:
- # See if it has a launch-index field
- # that might affect the final header
- payload = util.load_yaml(msg.get_payload(decode=True))
- if payload:
- payload_idx = payload.get('launch-index')
- except Exception:
- pass
- # Header overrides contents, for now (?) or the other way around?
- if header_idx is not None:
- payload_idx = header_idx
- # Nothing found in payload, use header (if anything there)
- if payload_idx is None:
- payload_idx = header_idx
- if payload_idx is not None:
- try:
- msg.add_header('Launch-Index', str(int(payload_idx)))
- except (ValueError, TypeError):
- pass
-
- def _get_include_once_filename(self, entry):
- entry_fn = util.hash_blob(entry, 'md5', 64)
- return os.path.join(self.paths.get_ipath_cur('data'),
- 'urlcache', entry_fn)
-
- def _process_before_attach(self, msg, attached_id):
- if not msg.get_filename():
- _set_filename(msg, PART_FN_TPL % (attached_id))
- self._attach_launch_index(msg)
-
- def _do_include(self, content, append_msg):
- # Include a list of urls, one per line
- # also support '#include <url here>'
- # or #include-once '<url here>'
- include_once_on = False
- for line in content.splitlines():
- lc_line = line.lower()
- if lc_line.startswith("#include-once"):
- line = line[len("#include-once"):].lstrip()
- # Every following include will now
- # not be refetched.... but will be
- # re-read from a local urlcache (if it worked)
- include_once_on = True
- elif lc_line.startswith("#include"):
- line = line[len("#include"):].lstrip()
- # Disable the include once if it was on
- # if it wasn't, then this has no effect.
- include_once_on = False
- if line.startswith("#"):
- continue
- include_url = line.strip()
- if not include_url:
- continue
-
- include_once_fn = None
- content = None
- if include_once_on:
- include_once_fn = self._get_include_once_filename(include_url)
- if include_once_on and os.path.isfile(include_once_fn):
- content = util.load_file(include_once_fn)
- else:
- resp = util.read_file_or_url(include_url,
- ssl_details=self.ssl_details)
- if include_once_on and resp.ok():
- util.write_file(include_once_fn, resp.contents, mode=0o600)
- if resp.ok():
- content = resp.contents
- else:
- LOG.warn(("Fetching from %s resulted in"
- " a invalid http code of %s"),
- include_url, resp.code)
-
- if content is not None:
- new_msg = convert_string(content)
- self._process_msg(new_msg, append_msg)
-
- def _explode_archive(self, archive, append_msg):
- entries = util.load_yaml(archive, default=[], allowed=(list, set))
- for ent in entries:
- # ent can be one of:
- # dict { 'filename' : 'value', 'content' :
- # 'value', 'type' : 'value' }
- # filename and type not be present
- # or
- # scalar(payload)
- if isinstance(ent, six.string_types):
- ent = {'content': ent}
- if not isinstance(ent, (dict)):
- # TODO(harlowja) raise?
- continue
-
- content = ent.get('content', '')
- mtype = ent.get('type')
- if not mtype:
- default = ARCHIVE_UNDEF_TYPE
- if isinstance(content, six.binary_type):
- default = ARCHIVE_UNDEF_BINARY_TYPE
- mtype = handlers.type_from_starts_with(content, default)
-
- maintype, subtype = mtype.split('/', 1)
- if maintype == "text":
- if isinstance(content, six.binary_type):
- content = content.decode()
- msg = MIMEText(content, _subtype=subtype)
- else:
- msg = MIMEBase(maintype, subtype)
- msg.set_payload(content)
-
- if 'filename' in ent:
- _set_filename(msg, ent['filename'])
- if 'launch-index' in ent:
- msg.add_header('Launch-Index', str(ent['launch-index']))
-
- for header in list(ent.keys()):
- if header.lower() in ('content', 'filename', 'type',
- 'launch-index', 'content-disposition',
- ATTACHMENT_FIELD.lower(),
- CONTENT_TYPE.lower()):
- continue
- msg.add_header(header, ent[header])
-
- self._attach_part(append_msg, msg)
-
- def _multi_part_count(self, outer_msg, new_count=None):
- """
- Return the number of attachments to this MIMEMultipart by looking
- at its 'Number-Attachments' header.
- """
- if ATTACHMENT_FIELD not in outer_msg:
- outer_msg[ATTACHMENT_FIELD] = '0'
-
- if new_count is not None:
- _replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))
-
- fetched_count = 0
- try:
- fetched_count = int(outer_msg.get(ATTACHMENT_FIELD))
- except (ValueError, TypeError):
- _replace_header(outer_msg, ATTACHMENT_FIELD, str(fetched_count))
- return fetched_count
-
- def _attach_part(self, outer_msg, part):
- """
- Attach a message to an outer message. outermsg must be a MIMEMultipart.
- Modifies a header in the outer message to keep track of number of
- attachments.
- """
- part_count = self._multi_part_count(outer_msg)
- self._process_before_attach(part, part_count + 1)
- outer_msg.attach(part)
- self._multi_part_count(outer_msg, part_count + 1)
-
-
-def is_skippable(part):
- # multipart/* are just containers
- part_maintype = part.get_content_maintype() or ''
- if part_maintype.lower() == 'multipart':
- return True
- return False
-
-
-# Coverts a raw string into a mime message
-def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
- if not raw_data:
- raw_data = ''
-
- def create_binmsg(data, content_type):
- maintype, subtype = content_type.split("/", 1)
- msg = MIMEBase(maintype, subtype)
- msg.set_payload(data)
- return msg
-
- try:
- data = util.decode_binary(util.decomp_gzip(raw_data))
- if "mime-version:" in data[0:4096].lower():
- msg = util.message_from_string(data)
- else:
- msg = create_binmsg(data, content_type)
- except UnicodeDecodeError:
- msg = create_binmsg(raw_data, content_type)
-
- return msg
diff --git a/cloudinit/util.py b/cloudinit/util.py
deleted file mode 100644
index e5dd61a0..00000000
--- a/cloudinit/util.py
+++ /dev/null
@@ -1,2246 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import contextlib
-import copy as obj_copy
-import ctypes
-import email
-import errno
-import glob
-import grp
-import gzip
-import hashlib
-import json
-import os
-import os.path
-import platform
-import pwd
-import random
-import re
-import shutil
-import socket
-import stat
-import string
-import subprocess
-import sys
-import tempfile
-import time
-
-from base64 import b64decode, b64encode
-from six.moves.urllib import parse as urlparse
-
-import six
-import yaml
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import mergers
-from cloudinit import safeyaml
-from cloudinit import type_utils
-from cloudinit import url_helper
-from cloudinit import version
-
-from cloudinit.settings import (CFG_BUILTIN)
-
-
-_DNS_REDIRECT_IP = None
-LOG = logging.getLogger(__name__)
-
-# Helps cleanup filenames to ensure they aren't FS incompatible
-FN_REPLACEMENTS = {
- os.sep: '_',
-}
-FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
-
-TRUE_STRINGS = ('true', '1', 'on', 'yes')
-FALSE_STRINGS = ('off', '0', 'no', 'false')
-
-
-# Helper utils to see if running in a container
-CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
- ['running-in-container'],
- ['lxc-is-container'])
-
-PROC_CMDLINE = None
-
-
-def decode_binary(blob, encoding='utf-8'):
- # Converts a binary type into a text type using given encoding.
- if isinstance(blob, six.text_type):
- return blob
- return blob.decode(encoding)
-
-
-def encode_text(text, encoding='utf-8'):
- # Converts a text string into a binary type using given encoding.
- if isinstance(text, six.binary_type):
- return text
- return text.encode(encoding)
-
-
-def b64d(source):
- # Base64 decode some data, accepting bytes or unicode/str, and returning
- # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
- decoded = b64decode(source)
- try:
- return decoded.decode('utf-8')
- except UnicodeDecodeError:
- return decoded
-
-
-def b64e(source):
- # Base64 encode some data, accepting bytes or unicode/str, and returning
- # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
- if not isinstance(source, bytes):
- source = source.encode('utf-8')
- return b64encode(source).decode('utf-8')
-
-
-def fully_decoded_payload(part):
- # In Python 3, decoding the payload will ironically hand us a bytes object.
- # 'decode' means to decode according to Content-Transfer-Encoding, not
- # according to any charset in the Content-Type. So, if we end up with
- # bytes, first try to decode to str via CT charset, and failing that, try
- # utf-8 using surrogate escapes.
- cte_payload = part.get_payload(decode=True)
- if (six.PY3 and
- part.get_content_maintype() == 'text' and
- isinstance(cte_payload, bytes)):
- charset = part.get_charset()
- if charset and charset.input_codec:
- encoding = charset.input_codec
- else:
- encoding = 'utf-8'
- return cte_payload.decode(encoding, errors='surrogateescape')
- return cte_payload
-
-
-# Path for DMI Data
-DMI_SYS_PATH = "/sys/class/dmi/id"
-
-# dmidecode and /sys/class/dmi/id/* use different names for the same value,
-# this allows us to refer to them by one canonical name
-DMIDECODE_TO_DMI_SYS_MAPPING = {
- 'baseboard-asset-tag': 'board_asset_tag',
- 'baseboard-manufacturer': 'board_vendor',
- 'baseboard-product-name': 'board_name',
- 'baseboard-serial-number': 'board_serial',
- 'baseboard-version': 'board_version',
- 'bios-release-date': 'bios_date',
- 'bios-vendor': 'bios_vendor',
- 'bios-version': 'bios_version',
- 'chassis-asset-tag': 'chassis_asset_tag',
- 'chassis-manufacturer': 'chassis_vendor',
- 'chassis-serial-number': 'chassis_serial',
- 'chassis-version': 'chassis_version',
- 'system-manufacturer': 'sys_vendor',
- 'system-product-name': 'product_name',
- 'system-serial-number': 'product_serial',
- 'system-uuid': 'product_uuid',
- 'system-version': 'product_version',
-}
-
-
-class ProcessExecutionError(IOError):
-
- MESSAGE_TMPL = ('%(description)s\n'
- 'Command: %(cmd)s\n'
- 'Exit code: %(exit_code)s\n'
- 'Reason: %(reason)s\n'
- 'Stdout: %(stdout)r\n'
- 'Stderr: %(stderr)r')
-
- def __init__(self, stdout=None, stderr=None,
- exit_code=None, cmd=None,
- description=None, reason=None,
- errno=None):
- if not cmd:
- self.cmd = '-'
- else:
- self.cmd = cmd
-
- if not description:
- self.description = 'Unexpected error while running command.'
- else:
- self.description = description
-
- if not isinstance(exit_code, six.integer_types):
- self.exit_code = '-'
- else:
- self.exit_code = exit_code
-
- if not stderr:
- self.stderr = ''
- else:
- self.stderr = stderr
-
- if not stdout:
- self.stdout = ''
- else:
- self.stdout = stdout
-
- if reason:
- self.reason = reason
- else:
- self.reason = '-'
-
- self.errno = errno
- message = self.MESSAGE_TMPL % {
- 'description': self.description,
- 'cmd': self.cmd,
- 'exit_code': self.exit_code,
- 'stdout': self.stdout,
- 'stderr': self.stderr,
- 'reason': self.reason,
- }
- IOError.__init__(self, message)
- # For backward compatibility with Python 2.
- if not hasattr(self, 'message'):
- self.message = message
-
-
-class SeLinuxGuard(object):
- def __init__(self, path, recursive=False):
- # Late import since it might not always
- # be possible to use this
- try:
- self.selinux = importer.import_module('selinux')
- except ImportError:
- self.selinux = None
- self.path = path
- self.recursive = recursive
-
- def __enter__(self):
- if self.selinux and self.selinux.is_selinux_enabled():
- return True
- else:
- return False
-
- def __exit__(self, excp_type, excp_value, excp_traceback):
- if not self.selinux or not self.selinux.is_selinux_enabled():
- return
- if not os.path.lexists(self.path):
- return
-
- path = os.path.realpath(self.path)
- # path should be a string, not unicode
- if six.PY2:
- path = str(path)
- try:
- stats = os.lstat(path)
- self.selinux.matchpathcon(path, stats[stat.ST_MODE])
- except OSError:
- return
-
- LOG.debug("Restoring selinux mode for %s (recursive=%s)",
- path, self.recursive)
- self.selinux.restorecon(path, recursive=self.recursive)
-
-
-class MountFailedError(Exception):
- pass
-
-
-class DecompressionError(Exception):
- pass
-
-
-def ExtendedTemporaryFile(**kwargs):
- fh = tempfile.NamedTemporaryFile(**kwargs)
- # Replace its unlink with a quiet version
- # that does not raise errors when the
- # file to unlink has been unlinked elsewhere..
- LOG.debug("Created temporary file %s", fh.name)
- fh.unlink = del_file
-
- # Add a new method that will unlink
- # right 'now' but still lets the exit
- # method attempt to remove it (which will
- # not throw due to our del file being quiet
- # about files that are not there)
- def unlink_now():
- fh.unlink(fh.name)
-
- setattr(fh, 'unlink_now', unlink_now)
- return fh
-
-
-def fork_cb(child_cb, *args, **kwargs):
- fid = os.fork()
- if fid == 0:
- try:
- child_cb(*args, **kwargs)
- os._exit(0)
- except Exception:
- logexc(LOG, "Failed forking and calling callback %s",
- type_utils.obj_name(child_cb))
- os._exit(1)
- else:
- LOG.debug("Forked child %s who will run callback %s",
- fid, type_utils.obj_name(child_cb))
-
-
-def is_true(val, addons=None):
- if isinstance(val, (bool)):
- return val is True
- check_set = TRUE_STRINGS
- if addons:
- check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
- return True
- return False
-
-
-def is_false(val, addons=None):
- if isinstance(val, (bool)):
- return val is False
- check_set = FALSE_STRINGS
- if addons:
- check_set = list(check_set) + addons
- if six.text_type(val).lower().strip() in check_set:
- return True
- return False
-
-
-def translate_bool(val, addons=None):
- if not val:
- # This handles empty lists and false and
- # other things that python believes are false
- return False
- # If its already a boolean skip
- if isinstance(val, (bool)):
- return val
- return is_true(val, addons)
-
-
-def rand_str(strlen=32, select_from=None):
- if not select_from:
- select_from = string.ascii_letters + string.digits
- return "".join([random.choice(select_from) for _x in range(0, strlen)])
-
-
-def rand_dict_key(dictionary, postfix=None):
- if not postfix:
- postfix = ""
- while True:
- newkey = rand_str(strlen=8) + "_" + postfix
- if newkey not in dictionary:
- break
- return newkey
-
-
-def read_conf(fname):
- try:
- return load_yaml(load_file(fname), default={})
- except IOError as e:
- if e.errno == errno.ENOENT:
- return {}
- else:
- raise
-
-
-# Merges X lists, and then keeps the
-# unique ones, but orders by sort order
-# instead of by the original order
-def uniq_merge_sorted(*lists):
- return sorted(uniq_merge(*lists))
-
-
-# Merges X lists and then iterates over those
-# and only keeps the unique items (order preserving)
-# and returns that merged and uniqued list as the
-# final result.
-#
-# Note: if any entry is a string it will be
-# split on commas and empty entries will be
-# evicted and merged in accordingly.
-def uniq_merge(*lists):
- combined_list = []
- for a_list in lists:
- if isinstance(a_list, six.string_types):
- a_list = a_list.strip().split(",")
- # Kickout the empty ones
- a_list = [a for a in a_list if len(a)]
- combined_list.extend(a_list)
- return uniq_list(combined_list)
-
-
-def clean_filename(fn):
- for (k, v) in FN_REPLACEMENTS.items():
- fn = fn.replace(k, v)
- removals = []
- for k in fn:
- if k not in FN_ALLOWED:
- removals.append(k)
- for k in removals:
- fn = fn.replace(k, '')
- fn = fn.strip()
- return fn
-
-
-def decomp_gzip(data, quiet=True, decode=True):
- try:
- buf = six.BytesIO(encode_text(data))
- with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
- if decode:
- return decode_binary(gh.read())
- else:
- return gh.read()
- except Exception as e:
- if quiet:
- return data
- else:
- raise DecompressionError(six.text_type(e))
-
-
-def extract_usergroup(ug_pair):
- if not ug_pair:
- return (None, None)
- ug_parted = ug_pair.split(':', 1)
- u = ug_parted[0].strip()
- if len(ug_parted) == 2:
- g = ug_parted[1].strip()
- else:
- g = None
- if not u or u == "-1" or u.lower() == "none":
- u = None
- if not g or g == "-1" or g.lower() == "none":
- g = None
- return (u, g)
-
-
-def find_modules(root_dir):
- entries = dict()
- for fname in glob.glob(os.path.join(root_dir, "*.py")):
- if not os.path.isfile(fname):
- continue
- modname = os.path.basename(fname)[0:-3]
- modname = modname.strip()
- if modname and modname.find(".") == -1:
- entries[fname] = modname
- return entries
-
-
-def multi_log(text, console=True, stderr=True,
- log=None, log_level=logging.DEBUG):
- if stderr:
- sys.stderr.write(text)
- if console:
- conpath = "/dev/console"
- if os.path.exists(conpath):
- with open(conpath, 'w') as wfh:
- wfh.write(text)
- wfh.flush()
- else:
- # A container may lack /dev/console (arguably a container bug). If
- # it does not exist, then write output to stdout. this will result
- # in duplicate stderr and stdout messages if stderr was True.
- #
- # even though upstart or systemd might have set up output to go to
- # /dev/console, the user may have configured elsewhere via
- # cloud-config 'output'. If there is /dev/console, messages will
- # still get there.
- sys.stdout.write(text)
- if log:
- if text[-1] == "\n":
- log.log(log_level, text[:-1])
- else:
- log.log(log_level, text)
-
-
-def load_json(text, root_types=(dict,)):
- decoded = json.loads(decode_binary(text))
- if not isinstance(decoded, tuple(root_types)):
- expected_types = ", ".join([str(t) for t in root_types])
- raise TypeError("(%s) root types expected, got %s instead"
- % (expected_types, type(decoded)))
- return decoded
-
-
-def is_ipv4(instr):
- """determine if input string is a ipv4 address. return boolean."""
- toks = instr.split('.')
- if len(toks) != 4:
- return False
-
- try:
- toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
- except Exception:
- return False
-
- return len(toks) == 4
-
-
-def get_cfg_option_bool(yobj, key, default=False):
- if key not in yobj:
- return default
- return translate_bool(yobj[key])
-
-
-def get_cfg_option_str(yobj, key, default=None):
- if key not in yobj:
- return default
- val = yobj[key]
- if not isinstance(val, six.string_types):
- val = str(val)
- return val
-
-
-def get_cfg_option_int(yobj, key, default=0):
- return int(get_cfg_option_str(yobj, key, default=default))
-
-
-def system_info():
- return {
- 'platform': platform.platform(),
- 'release': platform.release(),
- 'python': platform.python_version(),
- 'uname': platform.uname(),
- 'dist': platform.linux_distribution(),
- }
-
-
-def get_cfg_option_list(yobj, key, default=None):
- """
- Gets the C{key} config option from C{yobj} as a list of strings. If the
- key is present as a single string it will be returned as a list with one
- string arg.
-
- @param yobj: The configuration object.
- @param key: The configuration key to get.
- @param default: The default to return if key is not found.
- @return: The configuration option as a list of strings or default if key
- is not found.
- """
- if key not in yobj:
- return default
- if yobj[key] is None:
- return []
- val = yobj[key]
- if isinstance(val, (list)):
- cval = [v for v in val]
- return cval
- if not isinstance(val, six.string_types):
- val = str(val)
- return [val]
-
-
-# get a cfg entry by its path array
-# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
-def get_cfg_by_path(yobj, keyp, default=None):
- cur = yobj
- for tok in keyp:
- if tok not in cur:
- return default
- cur = cur[tok]
- return cur
-
-
-def fixup_output(cfg, mode):
- (outfmt, errfmt) = get_output_cfg(cfg, mode)
- redirect_output(outfmt, errfmt)
- return (outfmt, errfmt)
-
-
-# redirect_output(outfmt, errfmt, orig_out, orig_err)
-# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
-# fmt can be:
-# > FILEPATH
-# >> FILEPATH
-# | program [ arg1 [ arg2 [ ... ] ] ]
-#
-# with a '|', arguments are passed to shell, so one level of
-# shell escape is required.
-#
-# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
-# value then output input will not be closed (useful for debugging).
-#
-def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
-
- if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
- LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
- return
-
- if not o_out:
- o_out = sys.stdout
- if not o_err:
- o_err = sys.stderr
-
- if outfmt:
- LOG.debug("Redirecting %s to %s", o_out, outfmt)
- (mode, arg) = outfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("Invalid type for output format: %s" % outfmt)
-
- if o_out:
- os.dup2(new_fp.fileno(), o_out.fileno())
-
- if errfmt == outfmt:
- LOG.debug("Redirecting %s to %s", o_err, outfmt)
- os.dup2(new_fp.fileno(), o_err.fileno())
- return
-
- if errfmt:
- LOG.debug("Redirecting %s to %s", o_err, errfmt)
- (mode, arg) = errfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("Invalid type for error format: %s" % errfmt)
-
- if o_err:
- os.dup2(new_fp.fileno(), o_err.fileno())
-
-
-def make_url(scheme, host, port=None,
- path='', params='', query='', fragment=''):
-
- pieces = []
- pieces.append(scheme or '')
-
- netloc = ''
- if host:
- netloc = str(host)
-
- if port is not None:
- netloc += ":" + "%s" % (port)
-
- pieces.append(netloc or '')
- pieces.append(path or '')
- pieces.append(params or '')
- pieces.append(query or '')
- pieces.append(fragment or '')
-
- return urlparse.urlunparse(pieces)
-
-
-def mergemanydict(srcs, reverse=False):
- if reverse:
- srcs = reversed(srcs)
- merged_cfg = {}
- for cfg in srcs:
- if cfg:
- # Figure out which mergers to apply...
- mergers_to_apply = mergers.dict_extract_mergers(cfg)
- if not mergers_to_apply:
- mergers_to_apply = mergers.default_mergers()
- merger = mergers.construct(mergers_to_apply)
- merged_cfg = merger.merge(merged_cfg, cfg)
- return merged_cfg
-
-
-@contextlib.contextmanager
-def chdir(ndir):
- curr = os.getcwd()
- try:
- os.chdir(ndir)
- yield ndir
- finally:
- os.chdir(curr)
-
-
-@contextlib.contextmanager
-def umask(n_msk):
- old = os.umask(n_msk)
- try:
- yield old
- finally:
- os.umask(old)
-
-
-@contextlib.contextmanager
-def tempdir(**kwargs):
- # This seems like it was only added in python 3.2
- # Make it since its useful...
- # See: http://bugs.python.org/file12970/tempdir.patch
- tdir = tempfile.mkdtemp(**kwargs)
- try:
- yield tdir
- finally:
- del_dir(tdir)
-
-
-def center(text, fill, max_len):
- return '{0:{fill}{align}{size}}'.format(text, fill=fill,
- align="^", size=max_len)
-
-
-def del_dir(path):
- LOG.debug("Recursively deleting %s", path)
- shutil.rmtree(path)
-
-
-def runparts(dirp, skip_no_exist=True, exe_prefix=None):
- if skip_no_exist and not os.path.isdir(dirp):
- return
-
- failed = []
- attempted = []
-
- if exe_prefix is None:
- prefix = []
- elif isinstance(exe_prefix, str):
- prefix = [str(exe_prefix)]
- elif isinstance(exe_prefix, list):
- prefix = exe_prefix
- else:
- raise TypeError("exe_prefix must be None, str, or list")
-
- for exe_name in sorted(os.listdir(dirp)):
- exe_path = os.path.join(dirp, exe_name)
- if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
- attempted.append(exe_path)
- try:
- subp(prefix + [exe_path], capture=False)
- except ProcessExecutionError as e:
- logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
- failed.append(e)
-
- if failed and attempted:
- raise RuntimeError('Runparts: %s failures in %s attempted commands'
- % (len(failed), len(attempted)))
-
-
-# read_optional_seed
-# returns boolean indicating success or failure (presense of files)
-# if files are present, populates 'fill' dictionary with 'user-data' and
-# 'meta-data' entries
-def read_optional_seed(fill, base="", ext="", timeout=5):
- try:
- (md, ud) = read_seeded(base, ext, timeout)
- fill['user-data'] = ud
- fill['meta-data'] = md
- return True
- except url_helper.UrlError as e:
- if e.code == url_helper.NOT_FOUND:
- return False
- raise
-
-
-def fetch_ssl_details(paths=None):
- ssl_details = {}
- # Lookup in these locations for ssl key/cert files
- ssl_cert_paths = [
- '/var/lib/cloud/data/ssl',
- '/var/lib/cloud/instance/data/ssl',
- ]
- if paths:
- ssl_cert_paths.extend([
- os.path.join(paths.get_ipath_cur('data'), 'ssl'),
- os.path.join(paths.get_cpath('data'), 'ssl'),
- ])
- ssl_cert_paths = uniq_merge(ssl_cert_paths)
- ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
- cert_file = None
- for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'cert.pem')):
- cert_file = os.path.join(d, 'cert.pem')
- break
- key_file = None
- for d in ssl_cert_paths:
- if os.path.isfile(os.path.join(d, 'key.pem')):
- key_file = os.path.join(d, 'key.pem')
- break
- if cert_file and key_file:
- ssl_details['cert_file'] = cert_file
- ssl_details['key_file'] = key_file
- elif cert_file:
- ssl_details['cert_file'] = cert_file
- return ssl_details
-
-
-def read_file_or_url(url, timeout=5, retries=10,
- headers=None, data=None, sec_between=1, ssl_details=None,
- headers_cb=None, exception_cb=None):
- url = url.lstrip()
- if url.startswith("/"):
- url = "file://%s" % url
- if url.lower().startswith("file://"):
- if data:
- LOG.warn("Unable to post data to file resource %s", url)
- file_path = url[len("file://"):]
- try:
- contents = load_file(file_path, decode=False)
- except IOError as e:
- code = e.errno
- if e.errno == errno.ENOENT:
- code = url_helper.NOT_FOUND
- raise url_helper.UrlError(cause=e, code=code, headers=None,
- url=url)
- return url_helper.FileResponse(file_path, contents=contents)
- else:
- return url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- headers_cb=headers_cb,
- data=data,
- sec_between=sec_between,
- ssl_details=ssl_details,
- exception_cb=exception_cb)
-
-
-def load_yaml(blob, default=None, allowed=(dict,)):
- loaded = default
- blob = decode_binary(blob)
- try:
- LOG.debug("Attempting to load yaml from string "
- "of length %s with allowed root types %s",
- len(blob), allowed)
- converted = safeyaml.load(blob)
- if not isinstance(converted, allowed):
- # Yes this will just be caught, but thats ok for now...
- raise TypeError(("Yaml load allows %s root types,"
- " but got %s instead") %
- (allowed, type_utils.obj_name(converted)))
- loaded = converted
- except (yaml.YAMLError, TypeError, ValueError):
- if len(blob) == 0:
- LOG.debug("load_yaml given empty string, returning default")
- else:
- logexc(LOG, "Failed loading yaml blob")
- return loaded
-
-
-def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
- if base.startswith("/"):
- base = "file://%s" % base
-
- # default retries for file is 0. for network is 10
- if base.startswith("file://"):
- retries = file_retries
-
- if base.find("%s") >= 0:
- ud_url = base % ("user-data" + ext)
- md_url = base % ("meta-data" + ext)
- else:
- ud_url = "%s%s%s" % (base, "user-data", ext)
- md_url = "%s%s%s" % (base, "meta-data", ext)
-
- md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
- md = None
- if md_resp.ok():
- md = load_yaml(decode_binary(md_resp.contents), default={})
-
- ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
- ud = None
- if ud_resp.ok():
- ud = ud_resp.contents
-
- return (md, ud)
-
-
-def read_conf_d(confd):
- # Get reverse sorted list (later trumps newer)
- confs = sorted(os.listdir(confd), reverse=True)
-
- # Remove anything not ending in '.cfg'
- confs = [f for f in confs if f.endswith(".cfg")]
-
- # Remove anything not a file
- confs = [f for f in confs
- if os.path.isfile(os.path.join(confd, f))]
-
- # Load them all so that they can be merged
- cfgs = []
- for fn in confs:
- cfgs.append(read_conf(os.path.join(confd, fn)))
-
- return mergemanydict(cfgs)
-
-
-def read_conf_with_confd(cfgfile):
- cfg = read_conf(cfgfile)
-
- confd = False
- if "conf_d" in cfg:
- confd = cfg['conf_d']
- if confd:
- if not isinstance(confd, six.string_types):
- raise TypeError(("Config file %s contains 'conf_d' "
- "with non-string type %s") %
- (cfgfile, type_utils.obj_name(confd)))
- else:
- confd = str(confd).strip()
- elif os.path.isdir("%s.d" % cfgfile):
- confd = "%s.d" % cfgfile
-
- if not confd or not os.path.isdir(confd):
- return cfg
-
- # Conf.d settings override input configuration
- confd_cfg = read_conf_d(confd)
- return mergemanydict([confd_cfg, cfg])
-
-
-def read_cc_from_cmdline(cmdline=None):
- # this should support reading cloud-config information from
- # the kernel command line. It is intended to support content of the
- # format:
- # cc: <yaml content here> [end_cc]
- # this would include:
- # cc: ssh_import_id: [smoser, kirkland]\\n
- # cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
- # cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
- if cmdline is None:
- cmdline = get_cmdline()
-
- tag_begin = "cc:"
- tag_end = "end_cc"
- begin_l = len(tag_begin)
- end_l = len(tag_end)
- clen = len(cmdline)
- tokens = []
- begin = cmdline.find(tag_begin)
- while begin >= 0:
- end = cmdline.find(tag_end, begin + begin_l)
- if end < 0:
- end = clen
- tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
- "\n"))
-
- begin = cmdline.find(tag_begin, end + end_l)
-
- return '\n'.join(tokens)
-
-
-def dos2unix(contents):
- # find first end of line
- pos = contents.find('\n')
- if pos <= 0 or contents[pos - 1] != '\r':
- return contents
- return contents.replace('\r\n', '\n')
-
-
-def get_hostname_fqdn(cfg, cloud):
- # return the hostname and fqdn from 'cfg'. If not found in cfg,
- # then fall back to data from cloud
- if "fqdn" in cfg:
- # user specified a fqdn. Default hostname then is based off that
- fqdn = cfg['fqdn']
- hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
- else:
- if "hostname" in cfg and cfg['hostname'].find('.') > 0:
- # user specified hostname, and it had '.' in it
- # be nice to them. set fqdn and hostname from that
- fqdn = cfg['hostname']
- hostname = cfg['hostname'][:fqdn.find('.')]
- else:
- # no fqdn set, get fqdn from cloud.
- # get hostname from cfg if available otherwise cloud
- fqdn = cloud.get_hostname(fqdn=True)
- if "hostname" in cfg:
- hostname = cfg['hostname']
- else:
- hostname = cloud.get_hostname()
- return (hostname, fqdn)
-
-
-def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
- """
- For each host a single line should be present with
- the following information:
-
- IP_address canonical_hostname [aliases...]
-
- Fields of the entry are separated by any number of blanks and/or tab
- characters. Text from a "#" character until the end of the line is a
- comment, and is ignored. Host names may contain only alphanumeric
- characters, minus signs ("-"), and periods ("."). They must begin with
- an alphabetic character and end with an alphanumeric character.
- Optional aliases provide for name changes, alternate spellings, shorter
- hostnames, or generic hostnames (for example, localhost).
- """
- fqdn = None
- try:
- for line in load_file(filename).splitlines():
- hashpos = line.find("#")
- if hashpos >= 0:
- line = line[0:hashpos]
- line = line.strip()
- if not line:
- continue
-
- # If there there is less than 3 entries
- # (IP_address, canonical_hostname, alias)
- # then ignore this line
- toks = line.split()
- if len(toks) < 3:
- continue
-
- if hostname in toks[2:]:
- fqdn = toks[1]
- break
- except IOError:
- pass
- return fqdn
-
-
-def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts=b"#cloud-config", cmdline=None):
- if cmdline is None:
- cmdline = get_cmdline()
-
- data = keyval_str_to_dict(cmdline)
- url = None
- key = None
- for key in names:
- if key in data:
- url = data[key]
- break
-
- if not url:
- return (None, None, None)
-
- resp = read_file_or_url(url)
- # allow callers to pass starts as text when comparing to bytes contents
- starts = encode_text(starts)
- if resp.ok() and resp.contents.startswith(starts):
- return (key, url, resp.contents)
-
- return (key, url, None)
-
-
-def is_resolvable(name):
- """determine if a url is resolvable, return a boolean
- This also attempts to be resilent against dns redirection.
-
- Note, that normal nsswitch resolution is used here. So in order
- to avoid any utilization of 'search' entries in /etc/resolv.conf
- we have to append '.'.
-
- The top level 'invalid' domain is invalid per RFC. And example.com
- should also not exist. The random entry will be resolved inside
- the search list.
- """
- global _DNS_REDIRECT_IP
- if _DNS_REDIRECT_IP is None:
- badips = set()
- badnames = ("does-not-exist.example.com.", "example.invalid.",
- rand_str())
- badresults = {}
- for iname in badnames:
- try:
- result = socket.getaddrinfo(iname, None, 0, 0,
- socket.SOCK_STREAM,
- socket.AI_CANONNAME)
- badresults[iname] = []
- for (_fam, _stype, _proto, cname, sockaddr) in result:
- badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
- badips.add(sockaddr[0])
- except (socket.gaierror, socket.error):
- pass
- _DNS_REDIRECT_IP = badips
- if badresults:
- LOG.debug("detected dns redirection: %s", badresults)
-
- try:
- result = socket.getaddrinfo(name, None)
- # check first result's sockaddr field
- addr = result[0][4][0]
- if addr in _DNS_REDIRECT_IP:
- return False
- return True
- except (socket.gaierror, socket.error):
- return False
-
-
-def get_hostname():
- hostname = socket.gethostname()
- return hostname
-
-
-def gethostbyaddr(ip):
- try:
- return socket.gethostbyaddr(ip)[0]
- except socket.herror:
- return None
-
-
-def is_resolvable_url(url):
- """determine if this url is resolvable (existing or ip)."""
- return is_resolvable(urlparse.urlparse(url).hostname)
-
-
-def search_for_mirror(candidates):
- """
- Search through a list of mirror urls for one that works
- This needs to return quickly.
- """
- for cand in candidates:
- try:
- if is_resolvable_url(cand):
- return cand
- except Exception:
- pass
- return None
-
-
-def close_stdin():
- """
- reopen stdin as /dev/null so even subprocesses or other os level things get
- /dev/null as input.
-
- if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
- value then input will not be closed (useful for debugging).
- """
- if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
- return
- with open(os.devnull) as fp:
- os.dup2(fp.fileno(), sys.stdin.fileno())
-
-
-def find_devs_with(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
- """
- find devices matching given criteria (via blkid)
- criteria can be *one* of:
- TYPE=<filesystem>
- LABEL=<label>
- UUID=<uuid>
- """
- blk_id_cmd = ['blkid']
- options = []
- if criteria:
- # Search for block devices with tokens named NAME that
- # have the value 'value' and display any devices which are found.
- # Common values for NAME include TYPE, LABEL, and UUID.
- # If there are no devices specified on the command line,
- # all block devices will be searched; otherwise,
- # only search the devices specified by the user.
- options.append("-t%s" % (criteria))
- if tag:
- # For each (specified) device, show only the tags that match tag.
- options.append("-s%s" % (tag))
- if no_cache:
- # If you want to start with a clean cache
- # (i.e. don't report devices previously scanned
- # but not necessarily available at this time), specify /dev/null.
- options.extend(["-c", "/dev/null"])
- if oformat:
- # Display blkid's output using the specified format.
- # The format parameter may be:
- # full, value, list, device, udev, export
- options.append('-o%s' % (oformat))
- if path:
- options.append(path)
- cmd = blk_id_cmd + options
- # See man blkid for why 2 is added
- try:
- (out, _err) = subp(cmd, rcs=[0, 2])
- except ProcessExecutionError as e:
- if e.errno == errno.ENOENT:
- # blkid not found...
- out = ""
- else:
- raise
- entries = []
- for line in out.splitlines():
- line = line.strip()
- if line:
- entries.append(line)
- return entries
-
-
-def peek_file(fname, max_bytes):
- LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
- with open(fname, 'rb') as ifh:
- return ifh.read(max_bytes)
-
-
-def uniq_list(in_list):
- out_list = []
- for i in in_list:
- if i in out_list:
- continue
- else:
- out_list.append(i)
- return out_list
-
-
-def load_file(fname, read_cb=None, quiet=False, decode=True):
- LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
- ofh = six.BytesIO()
- try:
- with open(fname, 'rb') as ifh:
- pipe_in_out(ifh, ofh, chunk_cb=read_cb)
- except IOError as e:
- if not quiet:
- raise
- if e.errno != errno.ENOENT:
- raise
- contents = ofh.getvalue()
- LOG.debug("Read %s bytes from %s", len(contents), fname)
- if decode:
- return decode_binary(contents)
- else:
- return contents
-
-
-def get_cmdline():
- if 'DEBUG_PROC_CMDLINE' in os.environ:
- return os.environ["DEBUG_PROC_CMDLINE"]
-
- global PROC_CMDLINE
- if PROC_CMDLINE is not None:
- return PROC_CMDLINE
-
- if is_container():
- try:
- contents = load_file("/proc/1/cmdline")
- # replace nulls with space and drop trailing null
- cmdline = contents.replace("\x00", " ")[:-1]
- except Exception as e:
- LOG.warn("failed reading /proc/1/cmdline: %s", e)
- cmdline = ""
- else:
- try:
- cmdline = load_file("/proc/cmdline").strip()
- except Exception:
- cmdline = ""
-
- PROC_CMDLINE = cmdline
- return cmdline
-
-
-def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
- bytes_piped = 0
- while True:
- data = in_fh.read(chunk_size)
- if len(data) == 0:
- break
- else:
- out_fh.write(data)
- bytes_piped += len(data)
- if chunk_cb:
- chunk_cb(bytes_piped)
- out_fh.flush()
- return bytes_piped
-
-
-def chownbyid(fname, uid=None, gid=None):
- if uid in [None, -1] and gid in [None, -1]:
- # Nothing to do
- return
- LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
- os.chown(fname, uid, gid)
-
-
-def chownbyname(fname, user=None, group=None):
- uid = -1
- gid = -1
- try:
- if user:
- uid = pwd.getpwnam(user).pw_uid
- if group:
- gid = grp.getgrnam(group).gr_gid
- except KeyError as e:
- raise OSError("Unknown user or group: %s" % (e))
- chownbyid(fname, uid, gid)
-
-
-# Always returns well formated values
-# cfg is expected to have an entry 'output' in it, which is a dictionary
-# that includes entries for 'init', 'config', 'final' or 'all'
-# init: /var/log/cloud.out
-# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
-# final:
-# output: "| logger -p"
-# error: "> /dev/null"
-# this returns the specific 'mode' entry, cleanly formatted, with value
-def get_output_cfg(cfg, mode):
- ret = [None, None]
- if not cfg or 'output' not in cfg:
- return ret
-
- outcfg = cfg['output']
- if mode in outcfg:
- modecfg = outcfg[mode]
- else:
- if 'all' not in outcfg:
- return ret
- # if there is a 'all' item in the output list
- # then it applies to all users of this (init, config, final)
- modecfg = outcfg['all']
-
- # if value is a string, it specifies stdout and stderr
- if isinstance(modecfg, str):
- ret = [modecfg, modecfg]
-
- # if its a list, then we expect (stdout, stderr)
- if isinstance(modecfg, list):
- if len(modecfg) > 0:
- ret[0] = modecfg[0]
- if len(modecfg) > 1:
- ret[1] = modecfg[1]
-
- # if it is a dictionary, expect 'out' and 'error'
- # items, which indicate out and error
- if isinstance(modecfg, dict):
- if 'output' in modecfg:
- ret[0] = modecfg['output']
- if 'error' in modecfg:
- ret[1] = modecfg['error']
-
- # if err's entry == "&1", then make it same as stdout
- # as in shell syntax of "echo foo >/dev/null 2>&1"
- if ret[1] == "&1":
- ret[1] = ret[0]
-
- swlist = [">>", ">", "|"]
- for i in range(len(ret)):
- if not ret[i]:
- continue
- val = ret[i].lstrip()
- found = False
- for s in swlist:
- if val.startswith(s):
- val = "%s %s" % (s, val[len(s):].strip())
- found = True
- break
- if not found:
- # default behavior is append
- val = "%s %s" % (">>", val.strip())
- ret[i] = val
-
- return ret
-
-
-def logexc(log, msg, *args):
- # Setting this here allows this to change
- # levels easily (not always error level)
- # or even desirable to have that much junk
- # coming out to a non-debug stream
- if msg:
- log.warn(msg, *args)
- # Debug gets the full trace. However, nose has a bug whereby its
- # logcapture plugin doesn't properly handle the case where there is no
- # actual exception. To avoid tracebacks during the test suite then, we'll
- # do the actual exc_info extraction here, and if there is no exception in
- # flight, we'll just pass in None.
- exc_info = sys.exc_info()
- if exc_info == (None, None, None):
- exc_info = None
- log.debug(msg, exc_info=exc_info, *args)
-
-
-def hash_blob(blob, routine, mlen=None):
- hasher = hashlib.new(routine)
- hasher.update(encode_text(blob))
- digest = hasher.hexdigest()
- # Don't get to long now
- if mlen is not None:
- return digest[0:mlen]
- else:
- return digest
-
-
-def is_user(name):
- try:
- if pwd.getpwnam(name):
- return True
- except KeyError:
- return False
-
-
-def is_group(name):
- try:
- if grp.getgrnam(name):
- return True
- except KeyError:
- return False
-
-
-def rename(src, dest):
- LOG.debug("Renaming %s to %s", src, dest)
- # TODO(harlowja) use a se guard here??
- os.rename(src, dest)
-
-
-def ensure_dirs(dirlist, mode=0o755):
- for d in dirlist:
- ensure_dir(d, mode)
-
-
-def read_write_cmdline_url(target_fn):
- if not os.path.exists(target_fn):
- try:
- (key, url, content) = get_cmdline_url()
- except Exception:
- logexc(LOG, "Failed fetching command line url")
- return
- try:
- if key and content:
- write_file(target_fn, content, mode=0o600)
- LOG.debug(("Wrote to %s with contents of command line"
- " url %s (len=%s)"), target_fn, url, len(content))
- elif key and not content:
- LOG.debug(("Command line key %s with url"
- " %s had no contents"), key, url)
- except Exception:
- logexc(LOG, "Failed writing url content to %s", target_fn)
-
-
-def yaml_dumps(obj, explicit_start=True, explicit_end=True):
- return yaml.safe_dump(obj,
- line_break="\n",
- indent=4,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- default_flow_style=False)
-
-
-def ensure_dir(path, mode=None):
- if not os.path.isdir(path):
- # Make the dir and adjust the mode
- with SeLinuxGuard(os.path.dirname(path), recursive=True):
- os.makedirs(path)
- chmod(path, mode)
- else:
- # Just adjust the mode
- chmod(path, mode)
-
-
-@contextlib.contextmanager
-def unmounter(umount):
- try:
- yield umount
- finally:
- if umount:
- umount_cmd = ["umount", umount]
- subp(umount_cmd)
-
-
-def mounts():
- mounted = {}
- try:
- # Go through mounts to see what is already mounted
- if os.path.exists("/proc/mounts"):
- mount_locs = load_file("/proc/mounts").splitlines()
- method = 'proc'
- else:
- (mountoutput, _err) = subp("mount")
- mount_locs = mountoutput.splitlines()
- method = 'mount'
- mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
- for mpline in mount_locs:
- # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
- # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
- try:
- if method == 'proc':
- (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
- else:
- m = re.search(mountre, mpline)
- dev = m.group(1)
- mp = m.group(2)
- fstype = m.group(3)
- opts = m.group(4)
- except Exception:
- continue
- # If the name of the mount point contains spaces these
- # can be escaped as '\040', so undo that..
- mp = mp.replace("\\040", " ")
- mounted[dev] = {
- 'fstype': fstype,
- 'mountpoint': mp,
- 'opts': opts,
- }
- LOG.debug("Fetched %s mounts from %s", mounted, method)
- except (IOError, OSError):
- logexc(LOG, "Failed fetching mount points")
- return mounted
-
-
-def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
- """
- Mount the device, call method 'callback' passing the directory
- in which it was mounted, then unmount. Return whatever 'callback'
- returned. If data != None, also pass data to callback.
-
- mtype is a filesystem type. it may be a list, string (a single fsname)
- or a list of fsnames.
- """
-
- if isinstance(mtype, str):
- mtypes = [mtype]
- elif isinstance(mtype, (list, tuple)):
- mtypes = list(mtype)
- elif mtype is None:
- mtypes = None
-
- # clean up 'mtype' input a bit based on platform.
- platsys = platform.system().lower()
- if platsys == "linux":
- if mtypes is None:
- mtypes = ["auto"]
- elif platsys.endswith("bsd"):
- if mtypes is None:
- mtypes = ['ufs', 'cd9660', 'vfat']
- for index, mtype in enumerate(mtypes):
- if mtype == "iso9660":
- mtypes[index] = "cd9660"
- else:
- # we cannot do a smart "auto", so just call 'mount' once with no -t
- mtypes = ['']
-
- mounted = mounts()
- with tempdir() as tmpd:
- umount = False
- if os.path.realpath(device) in mounted:
- mountpoint = mounted[os.path.realpath(device)]['mountpoint']
- else:
- failure_reason = None
- for mtype in mtypes:
- mountpoint = None
- try:
- mountcmd = ['mount']
- mountopts = []
- if rw:
- mountopts.append('rw')
- else:
- mountopts.append('ro')
- if sync:
- # This seems like the safe approach to do
- # (ie where this is on by default)
- mountopts.append("sync")
- if mountopts:
- mountcmd.extend(["-o", ",".join(mountopts)])
- if mtype:
- mountcmd.extend(['-t', mtype])
- mountcmd.append(device)
- mountcmd.append(tmpd)
- subp(mountcmd)
- umount = tmpd # This forces it to be unmounted (when set)
- mountpoint = tmpd
- break
- except (IOError, OSError) as exc:
- LOG.debug("Failed mount of '%s' as '%s': %s",
- device, mtype, exc)
- failure_reason = exc
- if not mountpoint:
- raise MountFailedError("Failed mounting %s to %s due to: %s" %
- (device, tmpd, failure_reason))
-
- # Be nice and ensure it ends with a slash
- if not mountpoint.endswith("/"):
- mountpoint += "/"
- with unmounter(umount):
- if data is None:
- ret = callback(mountpoint)
- else:
- ret = callback(mountpoint, data)
- return ret
-
-
-def get_builtin_cfg():
- # Deep copy so that others can't modify
- return obj_copy.deepcopy(CFG_BUILTIN)
-
-
-def sym_link(source, link, force=False):
- LOG.debug("Creating symbolic link from %r => %r", link, source)
- if force and os.path.exists(link):
- del_file(link)
- os.symlink(source, link)
-
-
-def del_file(path):
- LOG.debug("Attempting to remove %s", path)
- try:
- os.unlink(path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise e
-
-
-def copy(src, dest):
- LOG.debug("Copying %s to %s", src, dest)
- shutil.copy(src, dest)
-
-
-def time_rfc2822():
- try:
- ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
- except Exception:
- ts = "??"
- return ts
-
-
-def uptime():
- uptime_str = '??'
- method = 'unknown'
- try:
- if os.path.exists("/proc/uptime"):
- method = '/proc/uptime'
- contents = load_file("/proc/uptime")
- if contents:
- uptime_str = contents.split()[0]
- else:
- method = 'ctypes'
- libc = ctypes.CDLL('/lib/libc.so.7')
- size = ctypes.c_size_t()
- buf = ctypes.c_int()
- size.value = ctypes.sizeof(buf)
- libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
- ctypes.byref(size), None, 0)
- now = time.time()
- bootup = buf.value
- uptime_str = now - bootup
-
- except Exception:
- logexc(LOG, "Unable to read uptime using method: %s" % method)
- return uptime_str
-
-
-def append_file(path, content):
- write_file(path, content, omode="ab", mode=None)
-
-
-def ensure_file(path, mode=0o644):
- write_file(path, content='', omode="ab", mode=mode)
-
-
-def safe_int(possible_int):
- try:
- return int(possible_int)
- except (ValueError, TypeError):
- return None
-
-
-def chmod(path, mode):
- real_mode = safe_int(mode)
- if path and real_mode:
- with SeLinuxGuard(path):
- os.chmod(path, real_mode)
-
-
-def write_file(filename, content, mode=0o644, omode="wb"):
- """
- Writes a file with the given content and sets the file mode as specified.
- Resotres the SELinux context if possible.
-
- @param filename: The full path of the file to write.
- @param content: The content to write to the file.
- @param mode: The filesystem mode to set on the file.
- @param omode: The open mode used when opening the file (w, wb, a, etc.)
- """
- ensure_dir(os.path.dirname(filename))
- if 'b' in omode.lower():
- content = encode_text(content)
- write_type = 'bytes'
- else:
- content = decode_binary(content)
- write_type = 'characters'
- LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode, len(content), write_type)
- with SeLinuxGuard(path=filename):
- with open(filename, omode) as fh:
- fh.write(content)
- fh.flush()
- chmod(filename, mode)
-
-
-def delete_dir_contents(dirname):
- """
- Deletes all contents of a directory without deleting the directory itself.
-
- @param dirname: The directory whose contents should be deleted.
- """
- for node in os.listdir(dirname):
- node_fullpath = os.path.join(dirname, node)
- if os.path.isdir(node_fullpath):
- del_dir(node_fullpath)
- else:
- del_file(node_fullpath)
-
-
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
- logstring=False):
- if rcs is None:
- rcs = [0]
- try:
-
- if not logstring:
- LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
- else:
- LOG.debug(("Running hidden command to protect sensitive "
- "input/output logstring: %s"), logstring)
-
- if not capture:
- stdout = None
- stderr = None
- else:
- stdout = subprocess.PIPE
- stderr = subprocess.PIPE
- stdin = subprocess.PIPE
- kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
- env=env, shell=shell)
- if six.PY3:
- # Use this so subprocess output will be (Python 3) str, not bytes.
- kws['universal_newlines'] = True
- sp = subprocess.Popen(args, **kws)
- (out, err) = sp.communicate(data)
- except OSError as e:
- raise ProcessExecutionError(cmd=args, reason=e,
- errno=e.errno)
- rc = sp.returncode
- if rc not in rcs:
- raise ProcessExecutionError(stdout=out, stderr=err,
- exit_code=rc,
- cmd=args)
- # Just ensure blank instead of none?? (iff capturing)
- if not out and capture:
- out = ''
- if not err and capture:
- err = ''
- return (out, err)
-
-
-def make_header(comment_char="#", base='created'):
- ci_ver = version.version_string()
- header = str(comment_char)
- header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
- header += " on %s" % time_rfc2822()
- return header
-
-
-def abs_join(*paths):
- return os.path.abspath(os.path.join(*paths))
-
-
-# shellify, takes a list of commands
-# for each entry in the list
-# if it is an array, shell protect it (with single ticks)
-# if it is a string, do nothing
-def shellify(cmdlist, add_header=True):
- content = ''
- if add_header:
- content += "#!/bin/sh\n"
- escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
- cmds_made = 0
- for args in cmdlist:
- # If the item is a list, wrap all items in single tick.
- # If its not, then just write it directly.
- if isinstance(args, list):
- fixed = []
- for f in args:
- fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
- content = "%s%s\n" % (content, ' '.join(fixed))
- cmds_made += 1
- elif isinstance(args, six.string_types):
- content = "%s%s\n" % (content, args)
- cmds_made += 1
- else:
- raise RuntimeError(("Unable to shellify type %s"
- " which is not a list or string")
- % (type_utils.obj_name(args)))
- LOG.debug("Shellified %s commands.", cmds_made)
- return content
-
-
-def strip_prefix_suffix(line, prefix=None, suffix=None):
- if prefix and line.startswith(prefix):
- line = line[len(prefix):]
- if suffix and line.endswith(suffix):
- line = line[:-len(suffix)]
- return line
-
-
-def is_container():
- """
- Checks to see if this code running in a container of some sort
- """
-
- for helper in CONTAINER_TESTS:
- try:
- # try to run a helper program. if it returns true/zero
- # then we're inside a container. otherwise, no
- subp(helper)
- return True
- except (IOError, OSError):
- pass
-
- # this code is largely from the logic in
- # ubuntu's /etc/init/container-detect.conf
- try:
- # Detect old-style libvirt
- # Detect OpenVZ containers
- pid1env = get_proc_env(1)
- if "container" in pid1env:
- return True
- if "LIBVIRT_LXC_UUID" in pid1env:
- return True
- except (IOError, OSError):
- pass
-
- # Detect OpenVZ containers
- if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
- return True
-
- try:
- # Detect Vserver containers
- lines = load_file("/proc/self/status").splitlines()
- for line in lines:
- if line.startswith("VxID:"):
- (_key, val) = line.strip().split(":", 1)
- if val != "0":
- return True
- except (IOError, OSError):
- pass
-
- return False
-
-
-def get_proc_env(pid):
- """
- Return the environment in a dict that a given process id was started with.
- """
-
- env = {}
- fn = os.path.join("/proc/", str(pid), "environ")
- try:
- contents = load_file(fn)
- toks = contents.split("\x00")
- for tok in toks:
- if tok == "":
- continue
- (name, val) = tok.split("=", 1)
- if name:
- env[name] = val
- except (IOError, OSError):
- pass
- return env
-
-
-def keyval_str_to_dict(kvstring):
- ret = {}
- for tok in kvstring.split():
- try:
- (key, val) = tok.split("=", 1)
- except ValueError:
- key = tok
- val = True
- ret[key] = val
- return ret
-
-
-def is_partition(device):
- if device.startswith("/dev/"):
- device = device[5:]
-
- return os.path.isfile("/sys/class/block/%s/partition" % device)
-
-
-def expand_package_list(version_fmt, pkgs):
- # we will accept tuples, lists of tuples, or just plain lists
- if not isinstance(pkgs, list):
- pkgs = [pkgs]
-
- pkglist = []
- for pkg in pkgs:
- if isinstance(pkg, six.string_types):
- pkglist.append(pkg)
- continue
-
- if isinstance(pkg, (tuple, list)):
- if len(pkg) < 1 or len(pkg) > 2:
- raise RuntimeError("Invalid package & version tuple.")
-
- if len(pkg) == 2 and pkg[1]:
- pkglist.append(version_fmt % tuple(pkg))
- continue
-
- pkglist.append(pkg[0])
-
- else:
- raise RuntimeError("Invalid package type.")
-
- return pkglist
-
-
-def parse_mount_info(path, mountinfo_lines, log=LOG):
- """Return the mount information for PATH given the lines from
- /proc/$$/mountinfo."""
-
- path_elements = [e for e in path.split('/') if e]
- devpth = None
- fs_type = None
- match_mount_point = None
- match_mount_point_elements = None
- for i, line in enumerate(mountinfo_lines):
- parts = line.split()
-
- # Completely fail if there is anything in any line that is
- # unexpected, as continuing to parse past a bad line could
- # cause an incorrect result to be returned, so it's better
- # return nothing than an incorrect result.
-
- # The minimum number of elements in a valid line is 10.
- if len(parts) < 10:
- log.debug("Line %d has two few columns (%d): %s",
- i + 1, len(parts), line)
- return None
-
- mount_point = parts[4]
- mount_point_elements = [e for e in mount_point.split('/') if e]
-
- # Ignore mounts deeper than the path in question.
- if len(mount_point_elements) > len(path_elements):
- continue
-
- # Ignore mounts where the common path is not the same.
- l = min(len(mount_point_elements), len(path_elements))
- if mount_point_elements[0:l] != path_elements[0:l]:
- continue
-
- # Ignore mount points higher than an already seen mount
- # point.
- if (match_mount_point_elements is not None and
- len(match_mount_point_elements) > len(mount_point_elements)):
- continue
-
- # Find the '-' which terminates a list of optional columns to
- # find the filesystem type and the path to the device. See
- # man 5 proc for the format of this file.
- try:
- i = parts.index('-')
- except ValueError:
- log.debug("Did not find column named '-' in line %d: %s",
- i + 1, line)
- return None
-
- # Get the path to the device.
- try:
- fs_type = parts[i + 1]
- devpth = parts[i + 2]
- except IndexError:
- log.debug("Too few columns after '-' column in line %d: %s",
- i + 1, line)
- return None
-
- match_mount_point = mount_point
- match_mount_point_elements = mount_point_elements
-
- if devpth and fs_type and match_mount_point:
- return (devpth, fs_type, match_mount_point)
- else:
- return None
-
-
-def parse_mtab(path):
- """On older kernels there's no /proc/$$/mountinfo, so use mtab."""
- for line in load_file("/etc/mtab").splitlines():
- devpth, mount_point, fs_type = line.split()[:3]
- if mount_point == path:
- return devpth, fs_type, mount_point
- return None
-
-
-def parse_mount(path):
- (mountoutput, _err) = subp("mount")
- mount_locs = mountoutput.splitlines()
- for line in mount_locs:
- m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
- devpth = m.group(1)
- mount_point = m.group(2)
- fs_type = m.group(3)
- if mount_point == path:
- return devpth, fs_type, mount_point
- return None
-
-
-def get_mount_info(path, log=LOG):
- # Use /proc/$$/mountinfo to find the device where path is mounted.
- # This is done because with a btrfs filesystem using os.stat(path)
- # does not return the ID of the device.
- #
- # Here, / has a device of 18 (decimal).
- #
- # $ stat /
- # File: '/'
- # Size: 234 Blocks: 0 IO Block: 4096 directory
- # Device: 12h/18d Inode: 256 Links: 1
- # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
- # Access: 2013-01-13 07:31:04.358011255 +0000
- # Modify: 2013-01-13 18:48:25.930011255 +0000
- # Change: 2013-01-13 18:48:25.930011255 +0000
- # Birth: -
- #
- # Find where / is mounted:
- #
- # $ mount | grep ' / '
- # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
- #
- # And the device ID for /dev/vda1 is not 18:
- #
- # $ ls -l /dev/vda1
- # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
- #
- # So use /proc/$$/mountinfo to find the device underlying the
- # input path.
- mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
- if os.path.exists(mountinfo_path):
- lines = load_file(mountinfo_path).splitlines()
- return parse_mount_info(path, lines, log)
- elif os.path.exists("/etc/mtab"):
- return parse_mtab(path)
- else:
- return parse_mount(path)
-
-
-def which(program):
- # Return path of program for execution if found in path
- def is_exe(fpath):
- return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
- _fpath, _ = os.path.split(program)
- if _fpath:
- if is_exe(program):
- return program
- else:
- for path in os.environ.get("PATH", "").split(os.pathsep):
- path = path.strip('"')
- exe_file = os.path.join(path, program)
- if is_exe(exe_file):
- return exe_file
-
- return None
-
-
-def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
- if args is None:
- args = []
- if kwargs is None:
- kwargs = {}
-
- start = time.time()
-
- ustart = None
- if get_uptime:
- try:
- ustart = float(uptime())
- except ValueError:
- pass
-
- try:
- ret = func(*args, **kwargs)
- finally:
- delta = time.time() - start
- udelta = None
- if ustart is not None:
- try:
- udelta = float(uptime()) - ustart
- except ValueError:
- pass
-
- tmsg = " took %0.3f seconds" % delta
- if get_uptime:
- if isinstance(udelta, (float)):
- tmsg += " (%0.2f)" % udelta
- else:
- tmsg += " (N/A)"
- try:
- logfunc(msg + tmsg)
- except Exception:
- pass
- return ret
-
-
-def expand_dotted_devname(dotted):
- toks = dotted.rsplit(".", 1)
- if len(toks) > 1:
- return toks
- else:
- return (dotted, None)
-
-
-def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
- # return a dictionary populated with keys in 'required' and 'optional'
- # by reading files in prefix + delim + entry
- if required is None:
- required = []
- if optional is None:
- optional = []
-
- missing = []
- ret = {}
- for f in required + optional:
- try:
- ret[f] = load_file(base + delim + f, quiet=False, decode=False)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
- if f in required:
- missing.append(f)
-
- if len(missing):
- raise ValueError("Missing required files: %s", ','.join(missing))
-
- return ret
-
-
-def read_meminfo(meminfo="/proc/meminfo", raw=False):
- # read a /proc/meminfo style file and return
- # a dict with 'total', 'free', and 'available'
- mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
- kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
- 'MemAvailable:': 'available'}
- ret = {}
- for line in load_file(meminfo).splitlines():
- try:
- key, value, unit = line.split()
- except ValueError:
- key, value = line.split()
- unit = 'B'
- if raw:
- ret[key] = int(value) * mpliers[unit]
- elif key in kmap:
- ret[kmap[key]] = int(value) * mpliers[unit]
-
- return ret
-
-
-def human2bytes(size):
- """Convert human string or integer to size in bytes
- 10M => 10485760
- .5G => 536870912
- """
- size_in = size
- if size.endswith("B"):
- size = size[:-1]
-
- mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
-
- num = size
- mplier = 'B'
- for m in mpliers:
- if size.endswith(m):
- mplier = m
- num = size[0:-len(m)]
-
- try:
- num = float(num)
- except ValueError:
- raise ValueError("'%s' is not valid input." % size_in)
-
- if num < 0:
- raise ValueError("'%s': cannot be negative" % size_in)
-
- return int(num * mpliers[mplier])
-
-
-def _read_dmi_syspath(key):
- """
- Reads dmi data with from /sys/class/dmi/id
- """
- if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
- return None
- mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
- dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
- LOG.debug("querying dmi data %s", dmi_key_path)
- try:
- if not os.path.exists(dmi_key_path):
- LOG.debug("did not find %s", dmi_key_path)
- return None
-
- key_data = load_file(dmi_key_path, decode=False)
- if not key_data:
- LOG.debug("%s did not return any data", dmi_key_path)
- return None
-
- # uninitialized dmi values show as all \xff and /sys appends a '\n'.
- # in that event, return a string of '.' in the same length.
- if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
- key_data = b""
-
- str_data = key_data.decode('utf8').strip()
- LOG.debug("dmi data %s returned %s", dmi_key_path, str_data)
- return str_data
-
- except Exception:
- logexc(LOG, "failed read of %s", dmi_key_path)
- return None
-
-
-def _call_dmidecode(key, dmidecode_path):
- """
- Calls out to dmidecode to get the data out. This is mostly for supporting
- OS's without /sys/class/dmi/id support.
- """
- try:
- cmd = [dmidecode_path, "--string", key]
- (result, _err) = subp(cmd)
- LOG.debug("dmidecode returned '%s' for '%s'", result, key)
- result = result.strip()
- if result.replace(".", "") == "":
- return ""
- return result
- except (IOError, OSError) as _err:
- LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err)
- return None
-
-
-def read_dmi_data(key):
- """
- Wrapper for reading DMI data.
-
- This will do the following (returning the first that produces a
- result):
- 1) Use a mapping to translate `key` from dmidecode naming to
- sysfs naming and look in /sys/class/dmi/... for a value.
- 2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
- 3) Fall-back to passing `key` to `dmidecode --string`.
-
- If all of the above fail to find a value, None will be returned.
- """
- syspath_value = _read_dmi_syspath(key)
- if syspath_value is not None:
- return syspath_value
-
- dmidecode_path = which('dmidecode')
- if dmidecode_path:
- return _call_dmidecode(key, dmidecode_path)
-
- LOG.warn("did not find either path %s or dmidecode command",
- DMI_SYS_PATH)
- return None
-
-
-def message_from_string(string):
- if sys.version_info[:2] < (2, 7):
- return email.message_from_file(six.StringIO(string))
- return email.message_from_string(string)
diff --git a/cloudinit/version.py b/cloudinit/version.py
deleted file mode 100644
index 3d1d1d23..00000000
--- a/cloudinit/version.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from distutils import version as vr
-
-
-def version():
- return vr.StrictVersion("0.7.7")
-
-
-def version_string():
- return str(version())