summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py292
-rw-r--r--cloudinit/sources/DataSourceAzure.py651
-rw-r--r--cloudinit/sources/DataSourceBigstep.py57
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py132
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py253
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py278
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py110
-rw-r--r--cloudinit/sources/DataSourceEc2.py211
-rw-r--r--cloudinit/sources/DataSourceGCE.py167
-rw-r--r--cloudinit/sources/DataSourceMAAS.py353
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py323
-rw-r--r--cloudinit/sources/DataSourceNone.py57
-rw-r--r--cloudinit/sources/DataSourceOVF.py429
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py429
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py168
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py781
-rw-r--r--cloudinit/sources/__init__.py371
-rw-r--r--cloudinit/sources/helpers/__init__.py13
-rw-r--r--cloudinit/sources/helpers/azure.py279
-rw-r--r--cloudinit/sources/helpers/openstack.py648
-rw-r--r--cloudinit/sources/helpers/vmware/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py95
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py129
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py247
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py23
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py27
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py128
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py147
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py154
35 files changed, 0 insertions, 7122 deletions
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
deleted file mode 100644
index a3529609..00000000
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joe VLcek <JVLcek@RedHat.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-This file contains code used to gather the user data passed to an
-instance on RHEVm and vSphere.
-'''
-
-import errno
-import os
-import os.path
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.util import ProcessExecutionError
-
-LOG = logging.getLogger(__name__)
-
-# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
-
-# Shell command lists
-CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
-
-META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
-}
-
-
-def read_user_data_callback(mount_dir):
- '''
- Description:
- This callback will be applied by util.mount_cb() on the mounted
- file.
-
- Deltacloud file name contains deltacloud. Those not using
- Deltacloud but instead instrumenting the injection, could
- drop deltacloud from the file name.
-
- Input:
- mount_dir - Mount directory
-
- Returns:
- User Data
-
- '''
-
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
-
- # First try deltacloud_user_data_file. On failure try user_data_file.
- try:
- user_data = util.load_file(deltacloud_user_data_file).strip()
- except IOError:
- try:
- user_data = util.load_file(user_data_file).strip()
- except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
- return None
-
- return user_data
-
-
-class DataSourceAltCloud(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_cloud_type(self):
- '''
- Description:
- Get the type for the cloud back end this instance is running on
- by examining the string returned by reading the dmi data.
-
- Input:
- None
-
- Returns:
- One of the following strings:
- 'RHEV', 'VSPHERE' or 'UNKNOWN'
-
- '''
-
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data is not available on ARM processors
- LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
- return 'UNKNOWN'
-
- system_name = util.read_dmi_data("system-product-name")
- if not system_name:
- return 'UNKNOWN'
-
- sys_name = system_name.upper()
-
- if sys_name.startswith('RHEV'):
- return 'RHEV'
-
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
-
- return 'UNKNOWN'
-
- def get_data(self):
- '''
- Description:
- User Data is passed to the launching instance which
- is used to perform instance configuration.
-
- Cloud providers expose the user data differently.
- It is necessary to determine which cloud provider
- the current instance is running on to determine
- how to access the user data. Images built with
- image factory will contain a CLOUD_INFO_FILE which
- contains a string identifying the cloud provider.
-
- Images not built with Imagefactory will try to
- determine what the cloud provider is based on system
- information.
- '''
-
- LOG.debug('Invoked get_data()')
-
- if os.path.exists(CLOUD_INFO_FILE):
- try:
- cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
- except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return False
- else:
- cloud_type = self.get_cloud_type()
-
- LOG.debug('cloud_type: ' + str(cloud_type))
-
- if 'RHEV' in cloud_type:
- if self.user_data_rhevm():
- return True
- elif 'VSPHERE' in cloud_type:
- if self.user_data_vsphere():
- return True
- else:
- # there was no recognized alternate cloud type
- # indicating this handler should not be used.
- return False
-
- # No user data found
- util.logexc(LOG, 'Failed accessing user data.')
- return False
-
- def user_data_rhevm(self):
- '''
- RHEVM specific userdata read
-
- If on RHEV-M the user data will be contained on the
- floppy device in file <user_data_file>
- To access it:
- modprobe floppy
-
- Leverage util.mount_cb to:
- mkdir <tmp mount dir>
- mount /dev/fd0 <tmp mount dir>
- The call back passed to util.mount_cb will do:
- read <tmp mount dir>/<user_data_file>
- '''
-
- return_str = None
-
- # modprobe floppy
- try:
- cmd = CMD_PROBE_FLOPPY
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
- return False
-
- floppy_dev = '/dev/fd0'
-
- # udevadm settle for floppy device
- try:
- cmd = CMD_UDEVADM_SETTLE
- cmd.append('--exit-if-exists=' + floppy_dev)
- (cmd_out, _err) = util.subp(cmd)
- LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
- return False
-
- try:
- return_str = util.mount_cb(floppy_dev, read_user_data_callback)
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
-
- self.userdata_raw = return_str
- self.metadata = META_DATA_NOT_SUPPORTED
-
- if return_str:
- return True
- else:
- return False
-
- def user_data_vsphere(self):
- '''
- vSphere specific userdata read
-
- If on vSphere the user data will be contained on the
- cdrom device in file <user_data_file>
- To access it:
- Leverage util.mount_cb to:
- mkdir <tmp mount dir>
- mount /dev/fd0 <tmp mount dir>
- The call back passed to util.mount_cb will do:
- read <tmp mount dir>/<user_data_file>
- '''
-
- return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
- for cdrom_dev in cdrom_list:
- try:
- return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
- if return_str:
- break
- except OSError as err:
- if err.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
-
- self.userdata_raw = return_str
- self.metadata = META_DATA_NOT_SUPPORTED
-
- if return_str:
- return True
- else:
- return False
-
-# Used to match classes to dependencies
-# Source DataSourceAltCloud does not really depend on networking.
-# In the future 'dsmode' like behavior can be added to offer user
-# the ability to run before networking.
-datasources = [
- (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
deleted file mode 100644
index 8c7e8673..00000000
--- a/cloudinit/sources/DataSourceAzure.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import contextlib
-import crypt
-import fnmatch
-import os
-import os.path
-import time
-import xml.etree.ElementTree as ET
-
-from xml.dom import minidom
-
-from cloudinit.sources.helpers.azure import get_metadata_from_fabric
-
-from cloudinit import log as logging
-from cloudinit.settings import PER_ALWAYS
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-DS_NAME = 'Azure'
-DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
-AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = [
- 'sh', '-xc',
- "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
-]
-
-BUILTIN_DS_CONFIG = {
- 'agent_command': AGENT_START,
- 'data_dir': "/var/lib/waagent",
- 'set_hostname': True,
- 'hostname_bounce': {
- 'interface': 'eth0',
- 'policy': True,
- 'command': BOUNCE_COMMAND,
- 'hostname_command': 'hostname',
- },
- 'disk_aliases': {'ephemeral0': '/dev/sdb'},
-}
-
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
- },
- 'fs_setup': [{'filesystem': 'ext4',
- 'device': 'ephemeral0.1',
- 'replace_fs': 'ntfs'}],
-}
-
-DS_CFG_PATH = ['datasource', DS_NAME]
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
-
-# The redacted password fails to meet password complexity requirements
-# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
-
-
-def get_hostname(hostname_command='hostname'):
- return util.subp(hostname_command, capture=True)[0].strip()
-
-
-def set_hostname(hostname, hostname_command='hostname'):
- util.subp([hostname_command, hostname])
-
-
-@contextlib.contextmanager
-def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
- """
- Set a temporary hostname, restoring the previous hostname on exit.
-
- Will have the value of the previous hostname when used as a context
- manager, or None if the hostname was not changed.
- """
- policy = cfg['hostname_bounce']['policy']
- previous_hostname = get_hostname(hostname_command)
- if (not util.is_true(cfg.get('set_hostname')) or
- util.is_false(policy) or
- (previous_hostname == temp_hostname and policy != 'force')):
- yield None
- return
- set_hostname(temp_hostname, hostname_command)
- try:
- yield previous_hostname
- finally:
- set_hostname(previous_hostname, hostname_command)
-
-
-class DataSourceAzureNet(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
- self.cfg = {}
- self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
- hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
- with temporary_hostname(temp_hostname, self.ds_cfg,
- hostname_command=hostname_command) \
- as previous_hostname:
- if (previous_hostname is not None and
- util.is_true(self.ds_cfg.get('set_hostname'))):
- cfg = self.ds_cfg['hostname_bounce']
- try:
- perform_hostname_bounce(hostname=temp_hostname,
- cfg=cfg,
- prev_hostname=previous_hostname)
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(self.ds_cfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("ssh authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: "
- "using fingerprint from fabirc")
-
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(fp_files,))
- if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
-
- metadata = {}
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
- return metadata
-
- def get_data(self):
- # azure removes/ejects the cdrom containing the ovf-env.xml
- # file on reboot. So, in order to successfully reboot we
- # need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
-
- candidates = [self.seed_dir]
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
-
- for cdev in candidates:
- try:
- if cdev.startswith("/dev/"):
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
-
- except NonAzureDataSource:
- continue
- except BrokenAzureDataSource as exc:
- raise exc
- except util.MountFailedError:
- LOG.warn("%s was not mountable", cdev)
- continue
-
- (md, self.userdata_raw, cfg, files) = ret
- self.seed = cdev
- self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
- found = cdev
-
- LOG.debug("found datasource in %s", cdev)
- break
-
- if not found:
- return False
-
- if found == ddir:
- LOG.debug("using files cached in %s", ddir)
-
- # azure / hyper-v provides random data here
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
- if seed:
- self.metadata['random_seed'] = seed
-
- # now update ds_cfg to reflect contents pass in config
- user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
-
- # walinux agent writes files world readable, but expects
- # the directory to be protected.
- write_files(ddir, files, dirmode=0o700)
-
- if self.ds_cfg['agent_command'] == '__builtin__':
- metadata_func = get_metadata_from_fabric
- else:
- metadata_func = self.get_metadata_from_agent
- try:
- fabric_data = metadata_func()
- except Exception as exc:
- LOG.info("Error communicating with Azure fabric; assume we aren't"
- " on Azure.", exc_info=True)
- return False
-
- self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
- self.metadata.update(fabric_data)
-
- found_ephemeral = find_fabric_formatted_ephemeral_disk()
- if found_ephemeral:
- self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
- LOG.debug("using detected ephemeral0 of %s", found_ephemeral)
-
- cc_modules_override = support_new_ephemeral(self.sys_cfg)
- if cc_modules_override:
- self.cfg['cloud_config_modules'] = cc_modules_override
-
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
-
- def get_config_obj(self):
- return self.cfg
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
-
-def count_files(mp):
- return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*'))
-
-
-def find_fabric_formatted_ephemeral_part():
- """
- Locate the first fabric formatted ephemeral device.
- """
- potential_locations = ['/dev/disk/cloud/azure_resource-part1',
- '/dev/disk/azure/resource-part1']
- device_location = None
- for potential_location in potential_locations:
- if os.path.exists(potential_location):
- device_location = potential_location
- break
- if device_location is None:
- return None
- ntfs_devices = util.find_devs_with("TYPE=ntfs")
- real_device = os.path.realpath(device_location)
- if real_device in ntfs_devices:
- return device_location
- return None
-
-
-def find_fabric_formatted_ephemeral_disk():
- """
- Get the ephemeral disk.
- """
- part_dev = find_fabric_formatted_ephemeral_part()
- if part_dev:
- return part_dev.split('-')[0]
- return None
-
-
-def support_new_ephemeral(cfg):
- """
- Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device
- may be presented as a fresh device, or not.
-
- Since the knowledge of when a disk is supposed to be plowed under is
- specific to Windows Azure, the logic resides here in the datasource. When a
- new ephemeral device is detected, cloud-init overrides the default
- frequency for both disk-setup and mounts for the current boot only.
- """
- device = find_fabric_formatted_ephemeral_part()
- if not device:
- LOG.debug("no default fabric formated ephemeral0.1 found")
- return None
- LOG.debug("fabric formated ephemeral0.1 device at %s", device)
-
- file_count = 0
- try:
- file_count = util.mount_cb(device, count_files)
- except Exception:
- return None
- LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count)
-
- if file_count >= 1:
- LOG.debug("fabric prepared ephemeral0.1 will be preserved")
- return None
- else:
- # if device was already mounted, then we need to unmount it
- # race conditions could allow for a check-then-unmount
- # to have a false positive. so just unmount and then check.
- try:
- util.subp(['umount', device])
- except util.ProcessExecutionError as e:
- if device in util.mounts():
- LOG.warn("Failed to unmount %s, will not reformat.", device)
- LOG.debug("Failed umount: %s", e)
- return None
-
- LOG.debug("cloud-init will format ephemeral0.1 this boot.")
- LOG.debug("setting disk_setup and mounts modules 'always' for this boot")
-
- cc_modules = cfg.get('cloud_config_modules')
- if not cc_modules:
- return None
-
- mod_list = []
- for mod in cc_modules:
- if mod in ("disk_setup", "mounts"):
- mod_list.append([mod, PER_ALWAYS])
- LOG.debug("set module '%s' to 'always' for this boot", mod)
- else:
- mod_list.append(mod)
- return mod_list
-
-
-def perform_hostname_bounce(hostname, cfg, prev_hostname):
- # set the hostname to 'hostname' if it is not already set to that.
- # then, if policy is not off, bounce the interface using command
- command = cfg['command']
- interface = cfg['interface']
- policy = cfg['policy']
-
- msg = ("hostname=%s policy=%s interface=%s" %
- (hostname, policy, interface))
- env = os.environ.copy()
- env['interface'] = interface
- env['hostname'] = hostname
- env['old_hostname'] = prev_hostname
-
- if command == "builtin":
- command = BOUNCE_COMMAND
-
- LOG.debug("pubhname: publishing hostname [%s]", msg)
- shell = not isinstance(command, (list, tuple))
- # capture=False, see comments in bug 1202758 and bug 1206164.
- util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
-
-
-def crtfile_to_pubkey(fname, data=None):
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname],
- capture=True, data=data)
- return out.rstrip()
-
-
-def pubkeys_from_crt_files(flist):
- pubkeys = []
- errors = []
- for fname in flist:
- try:
- pubkeys.append(crtfile_to_pubkey(fname))
- except util.ProcessExecutionError:
- errors.append(fname)
-
- if errors:
- LOG.warn("failed to convert the crt files to pubkey: %s", errors)
-
- return pubkeys
-
-
-def wait_for_files(flist, maxwait=60, naplen=.5):
- need = set(flist)
- waited = 0
- while waited < maxwait:
- need -= set([f for f in need if os.path.exists(f)])
- if len(need) == 0:
- return []
- time.sleep(naplen)
- waited += naplen
- return need
-
-
-def write_files(datadir, files, dirmode=None):
-
- def _redact_password(cnt, fname):
- """Azure provides the UserPassword in plain text. So we redact it"""
- try:
- root = ET.fromstring(cnt)
- for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
- elem.text = DEF_PASSWD_REDACTION
- return ET.tostring(root)
- except Exception:
- LOG.critical("failed to redact userpassword in %s", fname)
- return cnt
-
- if not datadir:
- return
- if not files:
- files = {}
- util.ensure_dir(datadir, dirmode)
- for (name, content) in files.items():
- fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
- content = _redact_password(content, fname)
- util.write_file(filename=fname, content=content, mode=0o600)
-
-
-def invoke_agent(cmd):
- # this is a function itself to simplify patching it for test
- if cmd:
- LOG.debug("invoking agent: %s", cmd)
- util.subp(cmd, shell=(not isinstance(cmd, list)))
- else:
- LOG.debug("not invoking agent")
-
-
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-def load_azure_ovf_pubkeys(sshnode):
- # This parses a 'SSH' node formatted like below, and returns
- # an array of dicts.
- # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7',
- # 'path': 'where/to/go'}]
- #
- # <SSH><PublicKeys>
- # <PublicKey><Fingerprint>ABC</FingerPrint><Path>/ABC</Path>
- # ...
- # </PublicKeys></SSH>
- results = find_child(sshnode, lambda n: n.localName == "PublicKeys")
- if len(results) == 0:
- return []
- if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
-
- pubkeys_node = results[0]
- pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
-
- if len(pubkeys) == 0:
- return []
-
- found = []
- text_node = minidom.Document.TEXT_NODE
-
- for pk_node in pubkeys:
- if not pk_node.hasChildNodes():
- continue
-
- cur = {'fingerprint': "", 'path': "", 'value': ""}
- for child in pk_node.childNodes:
- if child.nodeType == text_node or not child.localName:
- continue
-
- name = child.localName.lower()
-
- if name not in cur.keys():
- continue
-
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
- continue
-
- cur[name] = child.childNodes[0].wholeText.strip()
- found.append(cur)
-
- return found
-
-
-def read_azure_ovf(contents):
- try:
- dom = minidom.parseString(contents)
- except Exception as e:
- raise BrokenAzureDataSource("invalid xml: %s" % e)
-
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
-
- if len(results) == 0:
- raise NonAzureDataSource("No ProvisioningSection")
- if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
- provSection = results[0]
-
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
-
- if len(results) == 0:
- raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
- if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- ("LinuxProvisioningConfigurationSet",
- len(results)))
- lpcs = lpcs_nodes[0]
-
- if not lpcs.hasChildNodes():
- raise BrokenAzureDataSource("no child nodes of configuration set")
-
- md_props = 'seedfrom'
- md = {'azure_data': {}}
- cfg = {}
- ud = ""
- password = None
- username = None
-
- for child in lpcs.childNodes:
- if child.nodeType == dom.TEXT_NODE or not child.localName:
- continue
-
- name = child.localName.lower()
-
- simple = False
- value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
- simple = True
- value = child.childNodes[0].wholeText
-
- attrs = dict([(k, v) for k, v in child.attributes.items()])
-
- # we accept either UserData or CustomData. If both are present
- # then behavior is undefined.
- if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
- else:
- ud = value
- elif name == "username":
- username = value
- elif name == "userpassword":
- password = value
- elif name == "hostname":
- md['local-hostname'] = value
- elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
- else:
- dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
- elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
- elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
- elif simple:
- if name in md_props:
- md[name] = value
- else:
- md['azure_data'][name] = value
-
- defuser = {}
- if username:
- defuser['name'] = username
- if password and DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = encrypt_pass(password)
- defuser['lock_passwd'] = False
-
- if defuser:
- cfg['system_info'] = {'default_user': defuser}
-
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
-
- return (md, ud, cfg)
-
-
-def encrypt_pass(password, salt_id="$6$"):
- return crypt.crypt(password, salt_id + util.rand_str(strlen=16))
-
-
-def list_possible_azure_ds_devs():
- # return a sorted list of devices that might have a azure datasource
- devlist = []
- for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
-
-
-def load_azure_ds_dir(source_dir):
- ovf_file = os.path.join(source_dir, "ovf-env.xml")
-
- if not os.path.isfile(ovf_file):
- raise NonAzureDataSource("No ovf-env file found")
-
- with open(ovf_file, "rb") as fp:
- contents = fp.read()
-
- md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
-
-
-class BrokenAzureDataSource(Exception):
- pass
-
-
-class NonAzureDataSource(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
deleted file mode 100644
index f80956a5..00000000
--- a/cloudinit/sources/DataSourceBigstep.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Copyright (C) 2015-2016 Bigstep Cloud Ltd.
-#
-# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com>
-#
-
-import errno
-import json
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceBigstep(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = {}
- self.vendordata_raw = ""
- self.userdata_raw = ""
-
- def get_data(self, apply_filter=False):
- url = get_url_from_file()
- if url is None:
- return False
- response = url_helper.readurl(url)
- decoded = json.loads(response.contents)
- self.metadata = decoded["metadata"]
- self.vendordata_raw = decoded["vendordata_raw"]
- self.userdata_raw = decoded["userdata_raw"]
- return True
-
-
-def get_url_from_file():
- try:
- content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
- except IOError as e:
- # If the file doesn't exist, then the server probably isn't a Bigstep
- # instance; otherwise, another problem exists which needs investigation
- if e.errno == errno.ENOENT:
- return None
- else:
- raise
- return content
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
deleted file mode 100644
index d1f806d6..00000000
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 CloudSigma
-#
-# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from base64 import b64decode
-import os
-import re
-
-from cloudinit.cs_utils import Cepko
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceCloudSigma(sources.DataSource):
- """
- Uses cepko in order to gather the server context from the VM.
-
- For more information about CloudSigma's Server Context:
- http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
- """
- def __init__(self, sys_cfg, distro, paths):
- self.cepko = Cepko()
- self.ssh_public_key = ''
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
-
- def is_running_in_cloudsigma(self):
- """
- Uses dmi data to detect if this instance of cloud-init is running
- in the CloudSigma's infrastructure.
- """
- uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmi data on ARM processors
- LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
- return False
-
- LOG.debug("determining hypervisor product name via dmi data")
- sys_product_name = util.read_dmi_data("system-product-name")
- if not sys_product_name:
- LOG.debug("system-product-name not available in dmi data")
- return False
- else:
- LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
-
- LOG.warn("failed to query dmi data for system product name")
- return False
-
- def get_data(self):
- """
- Metadata is the whole server context and /meta/cloud-config is used
- as userdata.
- """
- dsmode = None
- if not self.is_running_in_cloudsigma():
- return False
-
- try:
- server_context = self.cepko.all().result
- server_meta = server_context['meta']
- except Exception:
- # TODO: check for explicit "config on", and then warn
- # but since no explicit config is available now, just debug.
- LOG.debug("CloudSigma: Unable to read from serial port")
- return False
-
- self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
- if dsmode == sources.DSMODE_DISABLED:
- return False
-
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
- self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
- self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
-
- self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
-
- return True
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- """
- Cleans up and uses the server's name if the latter is set. Otherwise
- the first part from uuid is being used.
- """
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
- else:
- return self.metadata['uuid'].split('-')[0]
-
- def get_public_ssh_keys(self):
- return [self.ssh_public_key]
-
- def get_instance_id(self):
- return self.metadata['uuid']
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceCloudSigmaNet = DataSourceCloudSigma
-
-# Used to match classes to dependencies. Since this datasource uses the serial
-# port network is not really required, so it's okay to load without it, too.
-datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)),
-]
-
-
-def get_datasource_list(depends):
- """
- Return a list of data sources that match this set of dependencies
- """
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
deleted file mode 100644
index 4de1f563..00000000
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Cosmin Luta
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2012 Gerard Dethier
-# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
-#
-# Author: Cosmin Luta <q4break@gmail.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Gerard Dethier <g.dethier@gmail.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from socket import inet_ntoa
-from struct import pack
-import time
-
-from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class CloudStackPasswordServerClient(object):
- """
- Implements password fetching from the CloudStack password server.
-
- http://cloudstack-administration.readthedocs.org/
- en/latest/templates.html#adding-password-management-to-your-templates
- has documentation about the system. This implementation is following that
- found at
- https://github.com/shankerbalan/cloudstack-scripts/
- blob/master/cloud-set-guest-password-debian
- """
-
- def __init__(self, virtual_router_address):
- self.virtual_router_address = virtual_router_address
-
- def _do_request(self, domu_request):
- # The password server was in the past, a broken HTTP server, but is now
- # fixed. wget handles this seamlessly, so it's easier to shell out to
- # that rather than write our own handling code.
- output, _ = util.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
- return output.strip()
-
- def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
- return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
- return password
-
-
-class DataSourceCloudStack(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
- # Cloudstack has its metadata/userdata URLs located at
- # http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
- self.vr_addr = get_vr_address()
- if not self.vr_addr:
- raise RuntimeError("No virtual router found!")
- self.metadata_address = "http://%s/" % (self.vr_addr,)
- self.cfg = {}
-
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- (max_wait, timeout) = self._get_url_settings()
-
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
- start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url)
- else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
-
- return bool(url)
-
- def get_config_obj(self):
- return self.cfg
-
- def get_data(self):
- seed_ret = {}
- if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
- LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
- return True
- try:
- if not self.wait_for_metadata_service():
- return False
- start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
- password_client = CloudStackPasswordServerClient(self.vr_addr)
- try:
- set_password = password_client.get_password()
- except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
- else:
- if set_password:
- self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
- },
- }
- return True
- except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
- return False
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- @property
- def availability_zone(self):
- return self.metadata['availability-zone']
-
-
-def get_default_gateway():
- # Returns the default gateway ip address in the dotted format.
- lines = util.load_file("/proc/net/route").splitlines()
- for line in lines:
- items = line.split("\t")
- if items[1] == "00000000":
- # Found the default route, get the gateway
- gw = inet_ntoa(pack("<L", int(items[2], 16)))
- LOG.debug("Found default route, gateway is %s", gw)
- return gw
- return None
-
-
-def get_dhclient_d():
- # find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp"]
- for d in supported_dirs:
- if os.path.exists(d):
- LOG.debug("Using %s lease directory", d)
- return d
- return None
-
-
-def get_latest_lease():
- # find latest lease file
- lease_d = get_dhclient_d()
- if not lease_d:
- return None
- lease_files = os.listdir(lease_d)
- latest_mtime = -1
- latest_file = None
- for file_name in lease_files:
- if file_name.startswith("dhclient.") and \
- (file_name.endswith(".lease") or file_name.endswith(".leases")):
- abs_path = os.path.join(lease_d, file_name)
- mtime = os.path.getmtime(abs_path)
- if mtime > latest_mtime:
- latest_mtime = mtime
- latest_file = abs_path
- return latest_file
-
-
-def get_vr_address():
- # Get the address of the virtual router via dhcp leases
- # see http://bit.ly/T76eKC for documentation on the virtual router.
- # If no virtual router is detected, fallback on default gateway.
- lease_file = get_latest_lease()
- if not lease_file:
- LOG.debug("No lease file found, using default gateway")
- return get_default_gateway()
-
- latest_address = None
- with open(lease_file, "r") as fd:
- for line in fd:
- if "dhcp-server-identifier" in line:
- words = line.strip(" ;\r\n").split(" ")
- if len(words) > 2:
- dhcp = words[2]
- LOG.debug("Found DHCP identifier %s", dhcp)
- latest_address = dhcp
- if not latest_address:
- # No virtual router found, fallback on default gateway
- LOG.debug("No DHCP found, using default gateway")
- return get_default_gateway()
- return latest_address
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
deleted file mode 100644
index 91d6ff13..00000000
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ /dev/null
@@ -1,278 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.net import eni
-
-from cloudinit.sources.helpers import openstack
-
-LOG = logging.getLogger(__name__)
-
-# Various defaults/constants...
-DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
-DEFAULT_METADATA = {
- "instance-id": DEFAULT_IID,
-}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2',)
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
-
-
-class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
- self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
- self.version = None
- self.ec2_metadata = None
- self._network_config = None
- self.network_json = None
- self.network_eni = None
- self.known_macs = None
- self.files = {}
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
- mstr += "[source=%s]" % (self.source)
- return mstr
-
- def get_data(self):
- found = None
- md = {}
- results = {}
- if os.path.isdir(self.seed_dir):
- try:
- results = read_config_drive(self.seed_dir)
- found = self.seed_dir
- except openstack.NonReadable:
- util.logexc(LOG, "Failed reading config drive from %s",
- self.seed_dir)
- if not found:
- for dev in find_candidate_devs():
- try:
- # Set mtype if freebsd and turn off sync
- if dev.startswith("/dev/cd"):
- mtype = "cd9660"
- sync = False
- else:
- mtype = None
- sync = True
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype, sync=sync)
- found = dev
- except openstack.NonReadable:
- pass
- except util.MountFailedError:
- pass
- except openstack.BrokenMetadata:
- util.logexc(LOG, "Broken config drive: %s", dev)
- if found:
- break
- if not found:
- return False
-
- md = results.get('metadata', {})
- md = util.mergemanydict([md, DEFAULT_METADATA])
-
- self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
- if prev_iid != cur_iid:
- # better would be to handle this centrally, allowing
- # the datasource to do something on new instance id
- # note, networking is only rendered here if dsmode is DSMODE_PASS
- # which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
-
- # This is legacy and sneaky. If dsmode is 'pass' then do not claim
- # the datasource was used, even though we did run on_first_boot above.
- if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
- return False
-
- self.source = found
- self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
-
- vd = results.get('vendordata')
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
- except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- # network_config is an /etc/network/interfaces formated file and is
- # obsolete compared to networkdata (from network_data.json) but both
- # might be present.
- self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self.network_json is not None:
- LOG.debug("network config provided via network_json")
- self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
- elif self.network_eni is not None:
- self._network_config = eni.convert_eni_data(self.network_eni)
- LOG.debug("network config provided via converted eni data")
- else:
- LOG.debug("no network configuration available")
- return self._network_config
-
-
-def read_config_drive(source_dir):
- reader = openstack.ConfigDriveReader(source_dir)
- finders = [
- (reader.read_v2, [], {}),
- (reader.read_v1, [], {}),
- ]
- excps = []
- for (functor, args, kwargs) in finders:
- try:
- return functor(*args, **kwargs)
- except openstack.NonReadable as e:
- excps.append(e)
- raise excps[-1]
-
-
-def get_previous_iid(paths):
- # interestingly, for this purpose the "previous" instance-id is the current
- # instance-id. cloud-init hasn't moved them over yet as this datasource
- # hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
- try:
- return util.load_file(fname).rstrip("\n")
- except IOError:
- return None
-
-
-def on_first_boot(data, distro=None, network=True):
- """Performs any first-boot actions using data read from a config-drive."""
- if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
- if network:
- net_conf = data.get("network_config", '')
- if net_conf and distro:
- LOG.warn("Updating network interfaces from config drive")
- distro.apply_network(net_conf)
- write_injected_files(data.get('files'))
-
-
-def write_injected_files(files):
- if files:
- LOG.debug("Writing %s injected files", len(files))
- for (filename, content) in files.items():
- if not filename.startswith(os.sep):
- filename = os.sep + filename
- try:
- util.write_file(filename, content, mode=0o660)
- except IOError:
- util.logexc(LOG, "Failed writing file: %s", filename)
-
-
-def find_candidate_devs(probe_optical=True):
- """Return a list of devices that may contain the config drive.
-
- The returned list is sorted by search order where the first item has
- should be searched first (highest priority)
-
- config drive v1:
- Per documentation, this is "associated as the last available disk on the
- instance", and should be VFAT.
- Currently, we do not restrict search list to "last available disk"
-
- config drive v2:
- Disk should be:
- * either vfat or iso9660 formated
- * labeled with 'config-2'
- """
- # query optical drive to get it in blkid cache for 2.6 kernels
- if probe_optical:
- for device in OPTICAL_DEVICES:
- try:
- util.find_devs_with(path=device)
- except util.ProcessExecutionError:
- pass
-
- by_fstype = []
- for fs_type in FS_TYPES:
- by_fstype.extend(util.find_devs_with("TYPE=%s" % (fs_type)))
-
- by_label = []
- for label in LABEL_TYPES:
- by_label.extend(util.find_devs_with("LABEL=%s" % (label)))
-
- # give preference to "last available disk" (vdb over vda)
- # note, this is not a perfect rendition of that.
- by_fstype.sort(reverse=True)
- by_label.sort(reverse=True)
-
- # combine list of items by putting by-label items first
- # followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
-
- # We are looking for a block device or partition with necessary label or
- # an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
- return devices
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceConfigDriveNet = DataSourceConfigDrive
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceConfigDrive, (sources.DEP_FILESYSTEM,)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
deleted file mode 100644
index 44a17a00..00000000
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Neal Shrader <neal@digitalocean.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import ec2_utils
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-import functools
-
-
-LOG = logging.getLogger(__name__)
-
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1/',
- 'mirrors_url': 'http://mirrors.digitalocean.com/'
-}
-MD_RETRIES = 0
-MD_TIMEOUT = 1
-
-
-class DataSourceDigitalOcean(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
-
- if self.ds_cfg.get('retries'):
- self.retries = self.ds_cfg['retries']
- else:
- self.retries = MD_RETRIES
-
- if self.ds_cfg.get('timeout'):
- self.timeout = self.ds_cfg['timeout']
- else:
- self.timeout = MD_TIMEOUT
-
- def get_data(self):
- caller = functools.partial(util.read_file_or_url,
- timeout=self.timeout, retries=self.retries)
-
- def mcaller(url):
- return caller(url).contents
-
- md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
- base_url=self.metadata_address,
- caller=mcaller)
-
- self.metadata = md.materialize()
-
- if self.metadata.get('id'):
- return True
- else:
- return False
-
- def get_userdata_raw(self):
- return "\n".join(self.metadata['user-data'])
-
- def get_vendordata_raw(self):
- return "\n".join(self.metadata['vendor-data'])
-
- def get_public_ssh_keys(self):
- public_keys = self.metadata['public-keys']
- if isinstance(public_keys, list):
- return public_keys
- else:
- return [public_keys]
-
- @property
- def availability_zone(self):
- return self.metadata['region']
-
- def get_instance_id(self):
- return self.metadata['id']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- return self.metadata['hostname']
-
- def get_package_mirror_info(self):
- return self.ds_cfg['mirrors_url']
-
- @property
- def launch_index(self):
- return None
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
deleted file mode 100644
index 6fe2a0bb..00000000
--- a/cloudinit/sources/DataSourceEc2.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-
-from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper as uhelp
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-DEF_MD_URL = "http://169.254.169.254"
-
-# Which version we are requesting of the ec2 metadata apis
-DEF_MD_VERSION = '2009-04-04'
-
-# Default metadata urls that will be used if none are provided
-# They will be checked for 'resolveability' and some of the
-# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
-
-
-class DataSourceEc2(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata_address = DEF_MD_URL
- self.seed_dir = os.path.join(paths.seed_dir, "ec2")
- self.api_ver = DEF_MD_VERSION
-
- def get_data(self):
- seed_ret = {}
- if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
- LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
- return True
-
- try:
- if not self.wait_for_metadata_service():
- return False
- start_time = time.time()
- self.userdata_raw = \
- ec2.get_instance_userdata(self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
- return True
- except Exception:
- util.logexc(LOG, "Failed reading from metadata address %s",
- self.metadata_address)
- return False
-
- @property
- def launch_index(self):
- if not self.metadata:
- return None
- return self.metadata.get('ami-launch-index')
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- timeout = 50
- try:
- timeout = max(0, int(mcfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- mcfg = self.ds_cfg
-
- (max_wait, timeout) = self._get_url_settings()
- if max_wait <= 0:
- return False
-
- # Remove addresses from the list that wont resolve.
- mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
- filtered = [x for x in mdurls if util.is_resolvable_url(x)]
-
- if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
-
- if len(filtered):
- mdurls = filtered
- else:
- LOG.warn("Empty metadata url list! using default list")
- mdurls = DEF_MD_URLS
-
- urls = []
- url2base = {}
- for url in mdurls:
- cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
- urls.append(cur)
- url2base[cur] = url
-
- start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url2base[url])
- else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
-
- self.metadata_address = url2base.get(url)
- return bool(url)
-
- def device_name_to_device(self, name):
- # Consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
- return None
-
- # Example:
- # 'block-device-mapping':
- # {'ami': '/dev/sda1',
- # 'ephemeral0': '/dev/sdb',
- # 'root': '/dev/sda1'}
- found = None
- bdm = self.metadata['block-device-mapping']
- for (entname, device) in bdm.items():
- if entname == name:
- found = device
- break
- # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
- if entname == "ephemeral" and name == "ephemeral0":
- found = device
-
- if found is None:
- LOG.debug("Unable to convert %s to a device", name)
- return None
-
- ofound = found
- if not found.startswith("/"):
- found = "/dev/%s" % found
-
- if os.path.exists(found):
- return found
-
- remapped = self._remap_device(os.path.basename(found))
- if remapped:
- LOG.debug("Remapped device name %s => %s", found, remapped)
- return remapped
-
- # On t1.micro, ephemeral0 will appear in block-device-mapping from
- # metadata, but it will not exist on disk (and never will)
- # at this point, we've verified that the path did not exist
- # in the special case of 'ephemeral0' return None to avoid bogus
- # fstab entry (LP: #744019)
- if name == "ephemeral0":
- return None
- return ofound
-
- @property
- def availability_zone(self):
- try:
- return self.metadata['placement']['availability-zone']
- except KeyError:
- return None
-
- @property
- def region(self):
- az = self.availability_zone
- if az is not None:
- return az[:-1]
- return None
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
deleted file mode 100644
index c660a350..00000000
--- a/cloudinit/sources/DataSourceGCE.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Author: Vaidas Jablonskis <jablonskis@gmail.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-from base64 import b64decode
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://metadata.google.internal/computeMetadata/v1/'
-}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-
-
-class GoogleMetadataFetcher(object):
- headers = {'X-Google-Metadata-Request': True}
-
- def __init__(self, metadata_address):
- self.metadata_address = metadata_address
-
- def get_value(self, path, is_text):
- value = None
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=self.headers)
- except url_helper.UrlError as exc:
- msg = "url %s raised exception %s"
- LOG.debug(msg, path, exc)
- else:
- if resp.code == 200:
- if is_text:
- value = util.decode_binary(resp.contents)
- else:
- value = resp.contents
- else:
- LOG.debug("url %s returned code %s", path, resp.code)
- return value
-
-
-class DataSourceGCE(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
-
- # GCE takes sshKeys attribute in the format of '<user>:<public_key>'
- # so we have to trim each key to remove the username part
- def _trim_key(self, public_key):
- try:
- index = public_key.index(':')
- if index > 0:
- return public_key[(index + 1):]
- except Exception:
- return public_key
-
- def get_data(self):
- # url_map: (our-key, path, required, is_text)
- url_map = [
- ('instance-id', ('instance/id',), True, True),
- ('availability-zone', ('instance/zone',), True, True),
- ('local-hostname', ('instance/hostname',), True, True),
- ('public-keys', ('project/attributes/sshKeys',
- 'instance/attributes/sshKeys'), False, True),
- ('user-data', ('instance/attributes/user-data',), False, False),
- ('user-data-encoding', ('instance/attributes/user-data-encoding',),
- False, True),
- ]
-
- # if we cannot resolve the metadata server, then no point in trying
- if not util.is_resolvable_url(self.metadata_address):
- LOG.debug("%s is not resolvable", self.metadata_address)
- return False
-
- metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
- # iterate over url_map keys to get metadata items
- running_on_gce = False
- for (mkey, paths, required, is_text) in url_map:
- value = None
- for path in paths:
- new_value = metadata_fetcher.get_value(path, is_text)
- if new_value is not None:
- value = new_value
- if value:
- running_on_gce = True
- if required and value is None:
- msg = "required key %s returned nothing. not GCE"
- if not running_on_gce:
- LOG.debug(msg, mkey)
- else:
- LOG.warn(msg, mkey)
- return False
- self.metadata[mkey] = value
-
- if self.metadata['public-keys']:
- lines = self.metadata['public-keys'].splitlines()
- self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
-
- if self.metadata['availability-zone']:
- self.metadata['availability-zone'] = self.metadata[
- 'availability-zone'].split('/')[-1]
-
- encoding = self.metadata.get('user-data-encoding')
- if encoding:
- if encoding == 'base64':
- self.metadata['user-data'] = b64decode(
- self.metadata['user-data'])
- else:
- LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
-
- return running_on_gce
-
- @property
- def launch_index(self):
- # GCE does not provide lauch_index property
- return None
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def get_public_ssh_keys(self):
- return self.metadata['public-keys']
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- # GCE has long FDQN's and has asked for short hostnames
- return self.metadata['local-hostname'].split('.')[0]
-
- def get_userdata_raw(self):
- return self.metadata['user-data']
-
- @property
- def availability_zone(self):
- return self.metadata['availability-zone']
-
- @property
- def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
deleted file mode 100644
index d828f078..00000000
--- a/cloudinit/sources/DataSourceMAAS.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-import errno
-import os
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-MD_VERSION = "2012-03-01"
-
-BINARY_FIELDS = ('user-data',)
-
-
-class DataSourceMAAS(sources.DataSource):
- """
- DataSourceMAAS reads instance information from MAAS.
- Given a config metadata_url, and oauth tokens, it expects to find
- files under the root named:
- instance-id
- user-data
- hostname
- """
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
- self.oauth_helper = self._get_helper()
-
- def _get_helper(self):
- mcfg = self.ds_cfg
- # If we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return url_helper.OauthUrlHelper()
-
- return url_helper.OauthUrlHelper(
- consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
- token_secret=mcfg['token_secret'],
- consumer_secret=mcfg.get('consumer_secret'))
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [%s]" % (root, self.base_url)
-
- def get_data(self):
- mcfg = self.ds_cfg
-
- try:
- (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
- self.userdata_raw = userdata
- self.metadata = metadata
- self.base_url = self.seed_dir
- return True
- except MAASSeedDirNone:
- pass
- except MAASSeedDirMalformed as exc:
- LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
- raise
-
- # If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
- if not url:
- return False
-
- try:
- # doing this here actually has a side affect of
- # getting oauth time-fix in place. As no where else would
- # retry by default, so even if we could fix the timestamp
- # we would not.
- if not self.wait_for_metadata_service(url):
- return False
-
- self.base_url = url
-
- (userdata, metadata) = read_maas_seed_url(
- self.base_url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1)
- self.userdata_raw = userdata
- self.metadata = metadata
- return True
- except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", url)
- return False
-
- def wait_for_metadata_service(self, url):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- if timeout in mcfg:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- LOG.warn("Failed to get timeout, using %s" % timeout)
-
- starttime = time.time()
- check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
- urls = [check_url]
- url = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
-
- if url:
- LOG.debug("Using metadata source: '%s'", url)
- else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
-
- return bool(url)
-
-
-def read_maas_seed_dir(seed_d):
- """
- Return user-data and metadata for a maas seed dir in seed_d.
- Expected format of seed_d are the following files:
- * instance-id
- * local-hostname
- * user-data
- """
- if not os.path.isdir(seed_d):
- raise MAASSeedDirNone("%s: not a directory")
-
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
- for fname in files:
- try:
- md[fname] = util.load_file(os.path.join(seed_d, fname),
- decode=fname not in BINARY_FIELDS)
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
-
- return check_seed_contents(md, seed_d)
-
-
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
- """
- Read the maas datasource at seed_url.
- read_file_or_url is a method that should provide an interface
- like util.read_file_or_url
-
- Expected format of seed_url is are the following files:
- * <seed_url>/<version>/meta-data/instance-id
- * <seed_url>/<version>/meta-data/local-hostname
- * <seed_url>/<version>/user-data
- """
- base_url = "%s/%s" % (seed_url, version)
- file_order = [
- 'local-hostname',
- 'instance-id',
- 'public-keys',
- 'user-data',
- ]
- files = {
- 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
- 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
- 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
- 'user-data': "%s/%s" % (base_url, 'user-data'),
- }
-
- if read_file_or_url is None:
- read_file_or_url = util.read_file_or_url
-
- md = {}
- for name in file_order:
- url = files.get(name)
- if name == 'user-data':
- item_retries = 0
- else:
- item_retries = retries
-
- try:
- ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=item_retries,
- timeout=timeout, ssl_details=ssl_details)
- if resp.ok():
- if name in BINARY_FIELDS:
- md[name] = resp.contents
- else:
- md[name] = util.decode_binary(resp.contents)
- else:
- LOG.warn(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
- except url_helper.UrlError as e:
- if e.code != 404:
- raise
- return check_seed_contents(md, seed_url)
-
-
-def check_seed_contents(content, seed):
- """Validate if content is Is the content a dict that is valid as a
- return for a datasource.
- Either return a (userdata, metadata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
- """
- md_required = ('instance-id', 'local-hostname')
- if len(content) == 0:
- raise MAASSeedDirNone("%s: no data files found" % seed)
-
- found = list(content.keys())
- missing = [k for k in md_required if k not in found]
- if len(missing):
- raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
-
- userdata = content.get('user-data', b"")
- md = {}
- for (key, val) in content.items():
- if key == 'user-data':
- continue
- md[key] = val
-
- return (userdata, md)
-
-
-class MAASSeedDirNone(Exception):
- pass
-
-
-class MAASSeedDirMalformed(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
-
-
-if __name__ == "__main__":
- def main():
- """
- Call with single argument of directory or http or https url.
- If url is given additional arguments are allowed, which will be
- interpreted as consumer_key, token_key, token_secret, consumer_secret
- """
- import argparse
- import pprint
-
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
-
- subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
-
- args = parser.parse_args()
-
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
-
- if args.config:
- cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
- for key in creds.keys():
- if key in cfg and creds[key] is None:
- creds[key] = cfg[key]
-
- oauth_helper = url_helper.OauthUrlHelper(**creds)
-
- def geturl(url):
- # the retry is to ensure that oauth timestamp gets fixed
- return oauth_helper.readurl(url, retries=1).contents
-
- def printurl(url):
- print("== %s ==\n%s\n" % (url, geturl(url).decode()))
-
- def crawl(url):
- if url.endswith("/"):
- for line in geturl(url).decode().splitlines():
- if line.endswith("/"):
- crawl("%s%s" % (url, line))
- elif line == "meta-data":
- # meta-data is a dir, it *should* end in a /
- crawl("%s%s" % (url, "meta-data/"))
- else:
- printurl("%s%s" % (url, line))
- else:
- printurl(url)
-
- if args.subcmd == "check-seed":
- readurl = oauth_helper.readurl
- if args.url[0] == "/" or args.url.startswith("file://"):
- readurl = None
- (userdata, metadata) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
- print("=== userdata ===")
- print(userdata.decode())
- print("=== metadata ===")
- pprint.pprint(metadata)
-
- elif args.subcmd == "get":
- printurl(args.url)
-
- elif args.subcmd == "crawl":
- if not args.url.endswith("/"):
- args.url = "%s/" % args.url
- crawl(args.url)
-
- main()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
deleted file mode 100644
index cdc9eef5..00000000
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import os
-
-from cloudinit import log as logging
-from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNoCloud(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
- self.seed_dir = None
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
-
- def get_data(self):
- defaults = {
- "instance-id": "nocloud",
- "dsmode": self.dsmode,
- }
-
- found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': {}}
-
- try:
- # Parse the kernel command line, getting data passed in
- md = {}
- if load_cmdline_data(md):
- found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
- except Exception:
- util.logexc(LOG, "Unable to parse command line data")
- return False
-
- # Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
-
- for path in self.seed_dirs:
- try:
- seeded = util.pathprefix2dict(path, **pp2d_kwargs)
- found.append(path)
- LOG.debug("Using seeded data from %s", path)
- mydata = _merge_new_seed(mydata, seeded)
- break
- except ValueError as e:
- pass
-
- # If the datasource config had a 'seedfrom' entry, then that takes
- # precedence over a 'seedfrom' that was found in a filesystem
- # but not over external media
- if self.ds_cfg.get('seedfrom'):
- found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
-
- # fields appropriately named can also just come from the datasource
- # config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
- mydata = _merge_new_seed(mydata, self.ds_cfg)
- found.append("ds_config")
-
- def _pp2d_callback(mp, data):
- return util.pathprefix2dict(mp, **data)
-
- label = self.ds_cfg.get('fs_label', "cidata")
- if label is not None:
- # Query optical drive to get it in blkid cache for 2.6 kernels
- util.find_devs_with(path="/dev/sr0")
- util.find_devs_with(path="/dev/sr1")
-
- fslist = util.find_devs_with("TYPE=vfat")
- fslist.extend(util.find_devs_with("TYPE=iso9660"))
-
- label_list = util.find_devs_with("LABEL=%s" % label)
- devlist = list(set(fslist) & set(label_list))
- devlist.sort(reverse=True)
-
- for dev in devlist:
- try:
- LOG.debug("Attempting to use data from %s", dev)
-
- try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
- except ValueError as e:
- if dev in label_list:
- LOG.warn("device %s with label=%s not a"
- "valid seed.", dev, label)
- continue
-
- mydata = _merge_new_seed(mydata, seeded)
-
- LOG.debug("Using data from %s", dev)
- found.append(dev)
- break
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
-
- # There was no indication on kernel cmdline or data
- # in the seeddir suggesting this handler should be used.
- if len(found) == 0:
- return False
-
- # The special argument "seedfrom" indicates we should
- # attempt to seed the userdata / metadata from its value
- # its primarily value is in allowing the user to type less
- # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
- seedfound = False
- for proto in self.supported_seed_starts:
- if seedfrom.startswith(proto):
- seedfound = proto
- break
- if not seedfound:
- LOG.debug("Seed from %s not supported by %s", seedfrom, self)
- return False
-
- # This could throw errors, but the user told us to do it
- # so if errors are raised, let them raise
- (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- LOG.debug("Using seeded cache data from %s", seedfrom)
-
- # Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- found.append(seedfrom)
-
- # Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
-
- self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
- return False
-
- self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- # we check kernel command line or files.
- current = self.get_instance_id()
- if not current:
- return None
-
- # LP: #1568150 need getattr in the case that an old class object
- # has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
- quick_id = _quick_read_instance_id(dirs=dirs)
- if not quick_id:
- return None
- return quick_id == current
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self._network_eni is not None:
- self._network_config = eni.convert_eni_data(self._network_eni)
- return self._network_config
-
-
-def _quick_read_instance_id(dirs=None):
- if dirs is None:
- dirs = []
-
- iid_key = 'instance-id'
- fill = {}
- if load_cmdline_data(fill) and iid_key in fill:
- return fill[iid_key]
-
- for d in dirs:
- if d is None:
- continue
- try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
- if iid_key in md:
- return md[iid_key]
- except ValueError:
- pass
-
- return None
-
-
-def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
- for idstr, dsmode in pairs:
- if parse_cmdline_data(idstr, fill, cmdline):
- # if dsmode was explicitly in the commanad line, then
- # prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
- return True
- return False
-
-
-# Returns true or false indicating if cmdline indicated
-# that this module should be used. Updates dictionary 'fill'
-# with data that was found.
-# Example cmdline:
-# root=LABEL=uec-rootfs ro ds=nocloud
-def parse_cmdline_data(ds_id, fill, cmdline=None):
- if cmdline is None:
- cmdline = util.get_cmdline()
- cmdline = " %s " % cmdline
-
- if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline):
- return False
-
- argline = ""
- # cmdline can contain:
- # ds=nocloud[;key=val;key=val]
- for tok in cmdline.split():
- if tok.startswith(ds_id):
- argline = tok.split("=", 1)
-
- # argline array is now 'nocloud' followed optionally by
- # a ';' and then key=value pairs also terminated with ';'
- tmp = argline[1].split(";")
- if len(tmp) > 1:
- kvpairs = tmp[1:]
- else:
- kvpairs = ()
-
- # short2long mapping to save cmdline typing
- s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"}
- for item in kvpairs:
- if item == "":
- continue
- try:
- (k, v) = item.split("=", 1)
- except Exception:
- k = item
- v = None
- if k in s2l:
- k = s2l[k]
- fill[k] = v
-
- return True
-
-
-def _merge_new_seed(cur, seeded):
- ret = cur.copy()
-
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
-
- if seeded.get('network-config'):
- ret['network-config'] = util.load_yaml(seeded['network-config'])
-
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
- return ret
-
-
-class DataSourceNoCloudNet(DataSourceNoCloud):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
- self.supported_seed_starts = ("http://", "https://", "ftp://")
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
deleted file mode 100644
index d1a62b2a..00000000
--- a/cloudinit/sources/DataSourceNone.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import log as logging
-from cloudinit import sources
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNone(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
- self.metadata = {}
- self.userdata_raw = ''
-
- def get_data(self):
- # If the datasource config has any provided 'fallback'
- # userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
- return True
-
- def get_instance_id(self):
- return 'iid-datasource-none'
-
- @property
- def is_disconnected(self):
- return True
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- (DataSourceNone, []),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
deleted file mode 100644
index 43347cfb..00000000
--- a/cloudinit/sources/DataSourceOVF.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from xml.dom import minidom
-
-import base64
-import os
-import re
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
- enable_nics,
- get_nics_to_enable,
- set_customization_status
-)
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceOVF(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
- self.environment = None
- self.cfg = {}
- self.supported_seed_starts = ("/", "file://")
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s]" % (root, self.seed)
-
- def get_data(self):
- found = []
- md = {}
- ud = ""
- vmwarePlatformFound = False
- vmwareImcConfigFilePath = ''
-
- defaults = {
- "instance-id": "iid-dsovf",
- }
-
- (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
-
- system_type = util.read_dmi_data("system-product-name")
- if system_type is None:
- LOG.debug("No system-product-name found")
-
- if seedfile:
- # Found a seed dir
- seed = os.path.join(self.paths.seed_dir, seedfile)
- (md, ud, cfg) = read_ovf_environment(contents)
- self.environment = contents
- found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
- LOG.debug("VMware Virtualization Platform found")
- if not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True):
- deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
- "libdeployPkgPlugin.so")
- if not deployPkgPluginPath:
- deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",
- "libdeployPkgPlugin.so")
- if deployPkgPluginPath:
- # When the VM is powered on, the "VMware Tools" daemon
- # copies the customization specification file to
- # /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory.
- vmwareImcConfigFilePath = util.log_time(
- logfunc=LOG.debug,
- msg="waiting for configuration file",
- func=wait_for_imc_cfg_file,
- args=("/var/run/vmware-imc", "cust.cfg"))
-
- if vmwareImcConfigFilePath:
- LOG.debug("Found VMware DeployPkg Config File at %s" %
- vmwareImcConfigFilePath)
- else:
- LOG.debug("Did not find VMware DeployPkg Config File Path")
- else:
- LOG.debug("Customization for VMware platform is disabled.")
-
- if vmwareImcConfigFilePath:
- nics = ""
- try:
- cf = ConfigFile(vmwareImcConfigFilePath)
- conf = Config(cf)
- (md, ud, cfg) = read_vmware_imc(conf)
- dirpath = os.path.dirname(vmwareImcConfigFilePath)
- nics = get_nics_to_enable(dirpath)
- except Exception as e:
- LOG.debug("Error parsing the customization Config File")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
- enable_nics(nics)
- return False
- finally:
- util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
-
- try:
- LOG.debug("Applying the Network customization")
- nicConfigurator = NicConfigurator(conf.nics)
- nicConfigurator.configure()
- except Exception as e:
- LOG.debug("Error applying the Network Configuration")
- LOG.exception(e)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
- enable_nics(nics)
- return False
-
- vmwarePlatformFound = True
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
- enable_nics(nics)
- else:
- np = {'iso': transport_iso9660,
- 'vmware-guestd': transport_vmware_guestd, }
- name = None
- for (name, transfunc) in np.items():
- (contents, _dev, _fname) = transfunc()
- if contents:
- break
- if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
- self.environment = contents
- found.append(name)
-
- # There was no OVF transports found
- if len(found) == 0 and not vmwarePlatformFound:
- return False
-
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
- seedfound = False
- for proto in self.supported_seed_starts:
- if seedfrom.startswith(proto):
- seedfound = proto
- break
- if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
- return False
-
- (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- LOG.debug("Using seeded cache data from %s", seedfrom)
-
- md = util.mergemanydict([md, md_seed])
- found.append(seedfrom)
-
- # Now that we have exhausted any other places merge in the defaults
- md = util.mergemanydict([md, defaults])
-
- self.seed = ",".join(found)
- self.metadata = md
- self.userdata_raw = ud
- self.cfg = cfg
- return True
-
- def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
- return []
- pks = self.metadata['public-keys']
- if isinstance(pks, (list)):
- return pks
- else:
- return [pks]
-
- # The data sources' config_obj is a cloud-config formatted
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return self.cfg
-
-
-class DataSourceOVFNet(DataSourceOVF):
- def __init__(self, sys_cfg, distro, paths):
- DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
- self.supported_seed_starts = ("http://", "https://", "ftp://")
-
-
-def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
- waited = 0
-
- while waited < maxwait:
- fileFullPath = search_file(dirpath, filename)
- if fileFullPath:
- return fileFullPath
- time.sleep(naplen)
- waited += naplen
- return None
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_vmware_imc(config):
- md = {}
- cfg = {}
- ud = ""
- if config.host_name:
- if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
- else:
- md['local-hostname'] = config.host_name
-
- if config.timezone:
- cfg['timezone'] = config.timezone
-
- return (md, ud, cfg)
-
-
-# This will return a dict with some content
-# meta-data, user-data, some config
-def read_ovf_environment(contents):
- props = get_properties(contents)
- md = {}
- cfg = {}
- ud = ""
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for (prop, val) in props.items():
- if prop == 'hostname':
- prop = "local-hostname"
- if prop in md_props:
- md[prop] = val
- elif prop in cfg_props:
- cfg[prop] = val
- elif prop == "user-data":
- try:
- ud = base64.decodestring(val)
- except Exception:
- ud = val
- return (md, ud, cfg)
-
-
-# Returns tuple of filename (in 'dirname', and the contents of the file)
-# on "not found", returns 'None' for filename and False for contents
-def get_ovf_env(dirname):
- env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
- for fname in env_names:
- full_fn = os.path.join(dirname, fname)
- if os.path.isfile(full_fn):
- try:
- contents = util.load_file(full_fn)
- return (fname, contents)
- except Exception:
- util.logexc(LOG, "Failed loading ovf file %s", full_fn)
- return (None, False)
-
-
-# Transport functions take no input and return
-# a 3 tuple of content, path, filename
-def transport_iso9660(require_iso=True):
-
- # default_regex matches values in
- # /lib/udev/rules.d/60-cdrom_id.rules
- # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
- envname = "CLOUD_INIT_CDROM_DEV_REGEX"
- default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
-
- devname_regex = os.environ.get(envname, default_regex)
- cdmatch = re.compile(devname_regex)
-
- # Go through mounts to see if it was already mounted
- mounts = util.mounts()
- for (dev, info) in mounts.items():
- fstype = info['fstype']
- if fstype != "iso9660" and require_iso:
- continue
- if cdmatch.match(dev[5:]) is None: # take off '/dev/'
- continue
- mp = info['mountpoint']
- (fname, contents) = get_ovf_env(mp)
- if contents is not False:
- return (contents, dev, fname)
-
- if require_iso:
- mtype = "iso9660"
- else:
- mtype = None
-
- devs = os.listdir("/dev/")
- devs.sort()
- for dev in devs:
- fullp = os.path.join("/dev/", dev)
-
- if (fullp in mounts or
- not cdmatch.match(dev) or os.path.isdir(fullp)):
- continue
-
- try:
- # See if we can read anything at all...??
- util.peek_file(fullp, 512)
- except IOError:
- continue
-
- try:
- (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype)
- except util.MountFailedError:
- LOG.debug("%s not mountable as iso9660" % fullp)
- continue
-
- if contents is not False:
- return (contents, fullp, fname)
-
- return (False, None, None)
-
-
-def transport_vmware_guestd():
- # http://blogs.vmware.com/vapp/2009/07/ \
- # selfconfiguration-and-the-ovf-environment.html
- # try:
- # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv']
- # (out, err) = subp(cmd)
- # return(out, 'guestinfo.ovfEnv', 'vmware-guestd')
- # except:
- # # would need to error check here and see why this failed
- # # to know if log/error should be raised
- # return(False, None, None)
- return (False, None, None)
-
-
-def find_child(node, filter_func):
- ret = []
- if not node.hasChildNodes():
- return ret
- for child in node.childNodes:
- if filter_func(child):
- ret.append(child)
- return ret
-
-
-def get_properties(contents):
-
- dom = minidom.parseString(contents)
- if dom.documentElement.localName != "Environment":
- raise XmlError("No Environment Node")
-
- if not dom.documentElement.hasChildNodes():
- raise XmlError("No Child Nodes")
-
- envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
-
- # could also check here that elem.namespaceURI ==
- # "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
-
- if len(propSections) == 0:
- raise XmlError("No 'PropertySection's")
-
- props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
-
- for elem in propElems:
- key = elem.attributes.getNamedItemNS(envNsURI, "key").value
- val = elem.attributes.getNamedItemNS(envNsURI, "value").value
- props[key] = val
-
- return props
-
-
-def search_file(dirpath, filename):
- if not dirpath or not filename:
- return None
-
- for root, dirs, files in os.walk(dirpath):
- if filename in files:
- return os.path.join(root, filename)
-
- return None
-
-
-class XmlError(Exception):
- pass
-
-
-# Used to match classes to dependencies
-datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
- (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-)
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
deleted file mode 100644
index 7b3a76b9..00000000
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-# Copyright (C) 2012-2013 CERIT Scientific Cloud
-# Copyright (C) 2012-2013 OpenNebula.org
-# Copyright (C) 2014 Consejo Superior de Investigaciones Cientificas
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-# Author: Vlastimil Holer <xholer@mail.muni.cz>
-# Author: Javier Fontan <jfontan@opennebula.org>
-# Author: Enol Fernandez <enolfc@ifca.unican.es>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import re
-import string
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
-
-LOG = logging.getLogger(__name__)
-
-DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
-CONTEXT_DISK_FILES = ["context.sh"]
-
-
-class DataSourceOpenNebula(sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
-
- def get_data(self):
- defaults = {"instance-id": DEFAULT_IID}
- results = None
- seed = None
-
- # decide parseuser for context.sh shell reader
- parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
-
- candidates = [self.seed_dir]
- candidates.extend(find_candidate_devs())
- for cdev in candidates:
- try:
- if os.path.isdir(self.seed_dir):
- results = read_context_disk_dir(cdev, asuser=parseuser)
- elif cdev.startswith("/dev"):
- results = util.mount_cb(cdev, read_context_disk_dir,
- data=parseuser)
- except NonContextDiskDir:
- continue
- except BrokenContextDiskDir as exc:
- raise exc
- except util.MountFailedError:
- LOG.warn("%s was not mountable" % cdev)
-
- if results:
- seed = cdev
- LOG.debug("found datasource in %s", cdev)
- break
-
- if not seed:
- return False
-
- # merge fetched metadata with datasource defaults
- md = results['metadata']
- md = util.mergemanydict([md, defaults])
-
- # check for valid user specified dsmode
- self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
-
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- self.seed = seed
- self.network_eni = results.get("network_config")
- self.metadata = md
- self.userdata_raw = results.get('userdata')
- return True
-
- def get_hostname(self, fqdn=False, resolve_ip=None):
- if resolve_ip is None:
- if self.dsmode == sources.DSMODE_NETWORK:
- resolve_ip = True
- else:
- resolve_ip = False
- return sources.DataSource.get_hostname(self, fqdn, resolve_ip)
-
-
-class NonContextDiskDir(Exception):
- pass
-
-
-class BrokenContextDiskDir(Exception):
- pass
-
-
-class OpenNebulaNetwork(object):
- REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
-
- def __init__(self, ip, context):
- self.ip = ip
- self.context = context
- self.ifaces = self.get_ifaces()
-
- def get_ifaces(self):
- return self.REG_DEV_MAC.findall(self.ip)
-
- def mac2ip(self, mac):
- components = mac.split(':')[2:]
- return [str(int(c, 16)) for c in components]
-
- def get_ip(self, dev, components):
- var_name = dev.upper() + '_IP'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components)
-
- def get_mask(self, dev):
- var_name = dev.upper() + '_MASK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '255.255.255.0'
-
- def get_network(self, dev, components):
- var_name = dev.upper() + '_NETWORK'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return '.'.join(components[:-1]) + '.0'
-
- def get_gateway(self, dev):
- var_name = dev.upper() + '_GATEWAY'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def get_dns(self, dev):
- var_name = dev.upper() + '_DNS'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def get_domain(self, dev):
- var_name = dev.upper() + '_DOMAIN'
- if var_name in self.context:
- return self.context[var_name]
- else:
- return None
-
- def gen_conf(self):
- global_dns = []
- if 'DNS' in self.context:
- global_dns.append(self.context['DNS'])
-
- conf = []
- conf.append('auto lo')
- conf.append('iface lo inet loopback')
- conf.append('')
-
- for i in self.ifaces:
- dev = i[0]
- mac = i[1]
- ip_components = self.mac2ip(mac)
-
- conf.append('auto ' + dev)
- conf.append('iface ' + dev + ' inet static')
- conf.append(' address ' + self.get_ip(dev, ip_components))
- conf.append(' network ' + self.get_network(dev, ip_components))
- conf.append(' netmask ' + self.get_mask(dev))
-
- gateway = self.get_gateway(dev)
- if gateway:
- conf.append(' gateway ' + gateway)
-
- domain = self.get_domain(dev)
- if domain:
- conf.append(' dns-search ' + domain)
-
- # add global DNS servers to all interfaces
- dns = self.get_dns(dev)
- if global_dns or dns:
- all_dns = global_dns
- if dns:
- all_dns.append(dns)
- conf.append(' dns-nameservers ' + ' '.join(all_dns))
-
- conf.append('')
-
- return "\n".join(conf)
-
-
-def find_candidate_devs():
- """
- Return a list of devices that may contain the context disk.
- """
- combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
- devs = util.find_devs_with(f)
- devs.sort()
- for d in devs:
- if d not in combined:
- combined.append(d)
-
- return combined
-
-
-def switch_user_cmd(user):
- return ['sudo', '-u', user]
-
-
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
-
- if isinstance(bash, str):
- bash = [bash]
- elif bash is None:
- bash = ['bash', '-e']
-
- if switch_user_cb is None:
- switch_user_cb = switch_user_cmd
-
- # allvars expands to all existing variables by using '${!x*}' notation
- # where x is lower or upper case letters or '_'
- allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"]
-
- keylist_in = keylist
- if keylist is None:
- keylist = allvars
- keylist_in = []
-
- setup = '\n'.join(('__v="";', '',))
-
- def varprinter(vlist):
- # output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
-
- # the rendered 'bcmd' is bash syntax that does
- # setup: declare variables we use (so they show up in 'all')
- # varprinter(allvars): print all variables known at beginning
- # content: execute the provided content
- # varprinter(keylist): print all variables known after content
- #
- # output is then a null terminated array of:
- # literal '_start_'
- # key=value (for each preset variable)
- # literal '_start_'
- # key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
-
- cmd = []
- if asuser is not None:
- cmd = switch_user_cb(asuser)
-
- cmd.extend(bash)
-
- (output, _error) = util.subp(cmd, data=bcmd)
-
- # exclude vars in bash that change on their own or that we used
- excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v")
- preset = {}
- ret = {}
- target = None
- output = output[0:-1] # remove trailing null
-
- # go through output. First _start_ is for 'preset', second for 'target'.
- # Add to target only things were changed and not in volitile
- for line in output.split("\x00"):
- try:
- (key, val) = line.split("=", 1)
- if target is preset:
- target[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
- ret[key] = val
- except ValueError:
- if line != "_start_":
- raise
- if target is None:
- target = preset
- elif target is preset:
- target = ret
-
- return ret
-
-
-def read_context_disk_dir(source_dir, asuser=None):
- """
- read_context_disk_dir(source_dir):
- read source_dir and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a NonContextDiskDir
- """
- found = {}
- for af in CONTEXT_DISK_FILES:
- fn = os.path.join(source_dir, af)
- if os.path.isfile(fn):
- found[af] = fn
-
- if not found:
- raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
-
- context = {}
- results = {'userdata': None, 'metadata': {}}
-
- if "context.sh" in found:
- if asuser is not None:
- try:
- pwd.getpwnam(asuser)
- except KeyError as e:
- raise BrokenContextDiskDir("configured user '%s' "
- "does not exist", asuser)
- try:
- path = os.path.join(source_dir, 'context.sh')
- content = util.load_file(path)
- context = parse_shell_config(content, asuser=asuser)
- except util.ProcessExecutionError as e:
- raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
- except IOError as e:
- raise NonContextDiskDir("Error reading context.sh: %s" % (e))
- else:
- raise NonContextDiskDir("Missing context.sh")
-
- if not context:
- return results
-
- results['metadata'] = context
-
- # process single or multiple SSH keys
- ssh_key_var = None
- if "SSH_KEY" in context:
- ssh_key_var = "SSH_KEY"
- elif "SSH_PUBLIC_KEY" in context:
- ssh_key_var = "SSH_PUBLIC_KEY"
-
- if ssh_key_var:
- lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not
- l.startswith("#")]
-
- # custom hostname -- try hostname or leave cloud-init
- # itself create hostname from IP address later
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
- if k in context:
- results['metadata']['local-hostname'] = context[k]
- break
-
- # raw user data
- if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
- elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
-
- # b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
- if encoding == "base64":
- try:
- results['userdata'] = util.b64d(results['userdata'])
- except TypeError:
- LOG.warn("Failed base64 decoding of userdata")
-
- # generate static /etc/network/interfaces
- # only if there are any required context variables
- # http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context:
- if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['/sbin/ip', 'link'])
- net = OpenNebulaNetwork(out, context)
- results['network-interfaces'] = net.gen_conf()
- break
-
- return results
-
-
-# Legacy: Must be present in case we load an old pkl object
-DataSourceOpenNebulaNet = DataSourceOpenNebula
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
deleted file mode 100644
index c06d17f3..00000000
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2014 Yahoo! Inc.
-#
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import time
-
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.sources.helpers import openstack
-
-LOG = logging.getLogger(__name__)
-
-# Various defaults/constants...
-DEF_MD_URL = "http://169.254.169.254"
-DEFAULT_IID = "iid-dsopenstack"
-DEFAULT_METADATA = {
- "instance-id": DEFAULT_IID,
-}
-
-
-class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
- def __init__(self, sys_cfg, distro, paths):
- super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
- self.metadata_address = None
- self.ssl_details = util.fetch_ssl_details(self.paths)
- self.version = None
- self.files = {}
- self.ec2_metadata = None
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
- return mstr
-
- def _get_url_settings(self):
- # TODO(harlowja): this is shared with ec2 datasource, we should just
- # move it to a shared location instead...
- # Note: the defaults here are different though.
-
- # max_wait < 0 indicates do not wait
- max_wait = -1
- timeout = 10
-
- try:
- max_wait = int(self.ds_cfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- try:
- timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
- filtered = [x for x in urls if util.is_resolvable_url(x)]
- if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
- if len(filtered):
- urls = filtered
- else:
- LOG.warn("Empty metadata url list! using default list")
- urls = [DEF_MD_URL]
-
- md_urls = []
- url2base = {}
- for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
- md_urls.append(md_url)
- url2base[md_url] = url
-
- (max_wait, timeout) = self._get_url_settings()
- start_time = time.time()
- avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
- timeout=timeout)
- if avail_url:
- LOG.debug("Using metadata source: '%s'", url2base[avail_url])
- else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
-
- self.metadata_address = url2base.get(avail_url)
- return bool(avail_url)
-
- def get_data(self, retries=5, timeout=5):
- try:
- if not self.wait_for_metadata_service():
- return False
- except IOError:
- return False
-
- try:
- results = util.log_time(LOG.debug,
- 'Crawl of openstack metadata service',
- read_metadata_service,
- args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': retries,
- 'timeout': timeout})
- except openstack.NonReadable:
- return False
- except (openstack.BrokenMetadata, IOError):
- util.logexc(LOG, "Broken metadata address %s",
- self.metadata_address)
- return False
-
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
- if self.dsmode == sources.DSMODE_DISABLED:
- return False
-
- md = results.get('metadata', {})
- md = util.mergemanydict([md, DEFAULT_METADATA])
- self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
-
- vd = results.get('vendordata')
- self.vendordata_pure = vd
- try:
- self.vendordata_raw = openstack.convert_vendordata_json(vd)
- except ValueError as e:
- LOG.warn("Invalid content in vendor-data: %s", e)
- self.vendordata_raw = None
-
- return True
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
-
-
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
- return reader.read_v2()
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
deleted file mode 100644
index ccc86883..00000000
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ /dev/null
@@ -1,781 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2013 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Datasource for provisioning on SmartOS. This works on Joyent
-# and public/private Clouds using SmartOS.
-#
-# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
-# The meta-data is transmitted via key/value pairs made by
-# requests on the console. For example, to get the hostname, you
-# would send "GET hostname" on /dev/ttyS1.
-# For Linux Guests running in LX-Brand Zones on SmartOS hosts
-# a socket (/native/.zonecontrol/metadata.sock) is used instead
-# of a serial console.
-#
-# Certain behavior is defined by the DataDictionary
-# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
-# Comments with "@datadictionary" are snippets of the definition
-
-import base64
-import binascii
-import json
-import os
-import random
-import re
-import socket
-
-from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-SMARTOS_ATTRIB_MAP = {
- # Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
-}
-
-SMARTOS_ATTRIB_JSON = {
- # Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
-}
-
-SMARTOS_ENV_LX_BRAND = "lx-brand"
-SMARTOS_ENV_KVM = "kvm"
-
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
-NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
-]
-
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
-SERIAL_TIMEOUT = 60
-
-# BUILT-IN DATASOURCE CONFIGURATION
-# The following is the built-in configuration. If the values
-# are not set via the system configuration, then these default
-# will be used:
-# serial_device: which serial device to use for the meta-data
-# serial_timeout: how long to wait on the device
-# no_base64_decode: values which are not base64 encoded and
-# are fetched directly from SmartOS, not meta-data values
-# base64_keys: meta-data keys that are delivered in base64
-# base64_all: with the exclusion of no_base64_decode values,
-# treat all meta-data as base64 encoded
-# disk_setup: describes how to partition the ephemeral drive
-# fs_setup: describes how to format the ephemeral drive
-#
-BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
-}
-
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
- },
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext3',
- 'device': 'ephemeral0'}],
-}
-
-# builtin vendor-data is a boothook that writes a script into
-# /var/lib/cloud/scripts/per-boot. *That* script then handles
-# executing the 'operator-script' and 'user-script' files
-# that cloud-init writes into /var/lib/cloud/instance/data/
-# if they exist.
-#
-# This is all very indirect, but its done like this so that at
-# some point in the future, perhaps cloud-init wouldn't do it at
-# all, but rather the vendor actually provide vendor-data that accomplished
-# their desires. (That is the point of vendor-data).
-#
-# cloud-init does cheat a bit, and write the operator-script and user-script
-# itself. It could have the vendor-script do that, but it seems better
-# to not require the image to contain a tool (mdata-get) to read those
-# keys when we have a perfectly good one inside cloud-init.
-BUILTIN_VENDOR_DATA = """\
-#cloud-boothook
-#!/bin/sh
-fname="%(per_boot_d)s/01_smartos_vendor_data.sh"
-mkdir -p "${fname%%/*}"
-cat > "$fname" <<"END_SCRIPT"
-#!/bin/sh
-##
-# This file is written as part of the default vendor data for SmartOS.
-# The SmartOS datasource writes the listed file from the listed metadata key
-# sdc:operator-script -> %(operator_script)s
-# user-script -> %(user_script)s
-#
-# You can view content with 'mdata-get <key>'
-#
-for script in "%(operator_script)s" "%(user_script)s"; do
- [ -x "$script" ] || continue
- echo "executing '$script'" 1>&2
- "$script"
-done
-END_SCRIPT
-chmod +x "$fname"
-"""
-
-
-# @datadictionary: this is legacy path for placing files from metadata
-# per the SmartOS location. It is not preferable, but is done for
-# legacy reasons
-LEGACY_USER_D = "/var/db"
-
-
-class DataSourceSmartOS(sources.DataSource):
- _unset = "_unset"
- smartos_type = _unset
- md_client = _unset
-
- def __init__(self, sys_cfg, distro, paths):
- sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
-
- self.metadata = {}
- self.network_data = None
- self._network_config = None
-
- self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
-
- self._init()
-
- def __str__(self):
- root = sources.DataSource.__str__(self)
- return "%s [client=%s]" % (root, self.md_client)
-
- def _init(self):
- if self.smartos_type == self._unset:
- self.smartos_type = get_smartos_environ()
- if self.smartos_type is None:
- self.md_client = None
-
- if self.md_client == self._unset:
- self.md_client = jmc_client_factory(
- smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
-
- def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
-
- When run in a zone, the host OS will look for /var/svc/provisioning
- to be renamed as /var/svc/provision_success. This should be done
- after meta-data is successfully retrieved and from this point
- the host considers the provision of the zone to be a success and
- keeps the zone running.
- '''
-
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
-
- def get_data(self):
- self._init()
-
- md = {}
- ud = ""
-
- if not self.smartos_type:
- LOG.debug("Not running on smartos")
- return False
-
- if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
- return False
-
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
- smartos_noun, strip = attribute
- md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
-
- for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
- md[ci_noun] = self.md_client.get_json(smartos_noun)
-
- # @datadictionary: This key may contain a program that is written
- # to a file in the filesystem of the guest on each boot and then
- # executed. It may be of any format that would be considered
- # executable in the guest instance.
- #
- # We write 'user-script' and 'operator-script' into the
- # instance/data directory. The default vendor-data then handles
- # executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
- u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
-
- # @datadictionary: This key has no defined format, but its value
- # is written to the file /var/db/mdata-user-data on each boot prior
- # to the phase that runs user-script. This file is not to be executed.
- # This allows a configuration file of some kind to be injected into
- # the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
- u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
- write_boot_content(u_data, u_data_f)
-
- # Handle the cloud-init regular meta
- if not md['local-hostname']:
- md['local-hostname'] = md['instance-id']
-
- ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
- }
-
- self.metadata = util.mergemanydict([md, self.metadata])
- self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
-
- self._set_provisioned()
- return True
-
- def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
-
- def get_config_obj(self):
- if self.smartos_type == SMARTOS_ENV_KVM:
- return BUILTIN_CLOUD_CONFIG
- return {}
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- @property
- def network_config(self):
- if self._network_config is None:
- if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(self.network_data))
- return self._network_config
-
-
-class JoyentMetadataFetchException(Exception):
- pass
-
-
-class JoyentMetadataClient(object):
- """
- A client implementing v2 of the Joyent Metadata Protocol Specification.
-
- The full specification can be found at
- http://eng.joyent.com/mdata/protocol.html
- """
- line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
-
- def __init__(self, smartos_type=None, fp=None):
- if smartos_type is None:
- smartos_type = get_smartos_environ()
- self.smartos_type = smartos_type
- self.fp = fp
-
- def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
-
- def _get_value_from_frame(self, expected_request_id, frame):
- frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
- raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
- raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
- raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
- return None
- value = util.b64d(frame_data['payload'])
- LOG.debug('Value "%s" found.', value)
- return value
-
- def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
- if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
- LOG.debug('Writing "%s" to metadata transport.', msg)
-
- need_close = False
- if not self.fp:
- self.open_transport()
- need_close = True
-
- self.fp.write(msg.encode('ascii'))
- self.fp.flush()
-
- response = bytearray()
- response.extend(self.fp.read(1))
- while response[-1:] != b'\n':
- response.extend(self.fp.read(1))
-
- if need_close:
- self.close_transport()
-
- response = response.rstrip().decode('ascii')
- LOG.debug('Read "%s" from metadata transport.', response)
-
- if 'SUCCESS' not in response:
- return None
-
- value = self._get_value_from_frame(request_id, response)
- return value
-
- def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
- if result is None:
- return default
- if result and strip:
- result = result.strip()
- return result
-
- def get_json(self, key, default=None):
- result = self.get(key, default=default)
- if result is None:
- return default
- return json.loads(result)
-
- def list(self):
- result = self.request(rtype='KEYS')
- if result:
- result = result.split('\n')
- return result
-
- def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
-
- def delete(self, key):
- return self.request(rtype='DELETE', param=key)
-
- def close_transport(self):
- if self.fp:
- self.fp.close()
- self.fp = None
-
- def __enter__(self):
- if self.fp:
- return self
- self.open_transport()
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.close_transport()
- return
-
- def open_transport(self):
- raise NotImplementedError
-
-
-class JoyentMetadataSocketClient(JoyentMetadataClient):
- def __init__(self, socketpath):
- self.socketpath = socketpath
-
- def open_transport(self):
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
-
- def exists(self):
- return os.path.exists(self.socketpath)
-
- def __repr__(self):
- return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
-
-
-class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=None):
- super(JoyentMetadataSerialClient, self).__init__(smartos_type)
- self.device = device
- self.timeout = timeout
-
- def exists(self):
- return os.path.exists(self.device)
-
- def open_transport(self):
- ser = serial.Serial(self.device, timeout=self.timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % self.device)
- self.fp = ser
-
- def __repr__(self):
- return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
-
-
-class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
- """V1 of the protocol was not safe for all values.
- Thus, we allowed the user to pass values in as base64 encoded.
- Users may still reasonably expect to be able to send base64 data
- and have it transparently decoded. So even though the V2 format is
- now used, and is safe (using base64 itself), we keep legacy support.
-
- The way for a user to do this was:
- a.) specify 'base64_keys' key whose value is a comma delimited
- list of keys that were base64 encoded.
- b.) base64_all: string interpreted as a boolean that indicates
- if all keys are base64 encoded.
- c.) set a key named b64-<keyname> with a boolean indicating that
- <keyname> is base64 encoded."""
-
- def __init__(self, device, timeout=10, smartos_type=None):
- s = super(JoyentMetadataLegacySerialClient, self)
- s.__init__(device, timeout, smartos_type)
- self.base64_keys = None
- self.base64_all = None
-
- def _init_base64_keys(self, reset=False):
- if reset:
- self.base64_keys = None
- self.base64_all = None
-
- keys = None
- if self.base64_all is None:
- keys = self.list()
- if 'base64_all' in keys:
- self.base64_all = util.is_true(self._get("base64_all"))
- else:
- self.base64_all = False
-
- if self.base64_all:
- # short circuit if base64_all is true
- return
-
- if self.base64_keys is None:
- if keys is None:
- keys = self.list()
- b64_keys = set()
- if 'base64_keys' in keys:
- b64_keys = set(self._get("base64_keys").split(","))
-
- # now add any b64-<keyname> that has a true value
- for key in [k[3:] for k in keys if k.startswith("b64-")]:
- if util.is_true(self._get(key)):
- b64_keys.add(key)
- else:
- if key in b64_keys:
- b64_keys.remove(key)
-
- self.base64_keys = b64_keys
-
- def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
-
- def is_b64_encoded(self, key, reset=False):
- if key in NO_BASE64_DECODE:
- return False
-
- self._init_base64_keys(reset=reset)
- if self.base64_all:
- return True
-
- return key in self.base64_keys
-
- def get(self, key, default=None, strip=False):
- mdefault = object()
- val = self._get(key, strip=False, default=mdefault)
- if val is mdefault:
- return default
-
- if self.is_b64_encoded(key):
- try:
- val = base64.b64decode(val.encode()).decode()
- # Bogus input produces different errors in Python 2 and 3
- except (TypeError, binascii.Error):
- LOG.warn("Failed base64 decoding key '%s': %s", key, val)
-
- if strip:
- val = val.strip()
-
- return val
-
-
-def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
-
- if smartos_type is None:
- smartos_type = get_smartos_environ(uname_version)
-
- if smartos_type is None:
- return None
- elif smartos_type == SMARTOS_ENV_KVM:
- return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
- elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile)
-
- raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
-
-
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
- """
- Write the content to content_f. Under the following rules:
- 1. If no content, remove the file
- 2. Write the content
- 3. If executable and no file magic, add it
- 4. If there is a link, create it
-
- @param content: what to write
- @param content_f: the file name
- @param backup_d: the directory to save the backup at
- @param link: if defined, location to create a symlink to
- @param shebang: if no file magic, set shebang
- @param mode: file mode
-
- Becuase of the way that Cloud-init executes scripts (no shell),
- a script will fail to execute if does not have a magic bit (shebang) set
- for the file. If shebang=True, then the script will be checked for a magic
- bit and to the SmartOS default of assuming that bash.
- """
-
- if not content and os.path.exists(content_f):
- os.unlink(content_f)
- if link and os.path.islink(link):
- os.unlink(link)
- if not content:
- return
-
- util.write_file(content_f, content, mode=mode)
-
- if shebang and not content.startswith("#!"):
- try:
- cmd = ["file", "--brief", "--mime-type", content_f]
- (f_type, _err) = util.subp(cmd)
- LOG.debug("script %s mime type is %s", content_f, f_type)
- if f_type.strip() == "text/plain":
- new_content = "\n".join(["#!/bin/bash", content])
- util.write_file(content_f, new_content, mode=mode)
- LOG.debug("added shebang to file %s", content_f)
-
- except Exception as e:
- util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
-
- if link:
- try:
- if os.path.islink(link):
- os.unlink(link)
- if content and os.path.exists(content_f):
- util.ensure_dir(os.path.dirname(link))
- os.symlink(content_f, link)
- except IOError as e:
- util.logexc(LOG, "failed establishing content link: %s", e)
-
-
-def get_smartos_environ(uname_version=None, product_name=None,
- uname_arch=None):
- uname = os.uname()
- if uname_arch is None:
- uname_arch = uname[4]
-
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
- return None
-
- # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
- # report 'BrandZ virtual linux' as the kernel version
- if uname_version is None:
- uname_version = uname[3]
- if uname_version.lower() == 'brandz virtual linux':
- return SMARTOS_ENV_LX_BRAND
-
- if product_name is None:
- system_type = util.read_dmi_data("system-product-name")
- else:
- system_type = product_name
-
- if system_type and 'smartdc' in system_type.lower():
- return SMARTOS_ENV_KVM
-
- return None
-
-
-# Covert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None):
- """Return a dictionary of network_config by parsing provided
- SMARTOS sdc:nics configuration data
-
- sdc:nics data is a dictionary of properties of a nic and the ip
- configuration desired. Additional nic dictionaries are appended
- to the list.
-
- Converting the format is straightforward though it does include
- duplicate information as well as data which appears to be relevant
- to the hostOS rather than the guest.
-
- For each entry in the nics list returned from query sdc:nics, we
- create a type: physical entry, and extract the interface properties:
- 'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining
- keys are related to ip configuration. For each ip in the 'ips' list
- we create a subnet entry under 'subnets' pairing the ip to a one in
- the 'gateways' list.
- """
-
- valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
- ],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'gateway',
- 'metric',
- 'netmask',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
- ],
- }
-
- config = []
- for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
-
- subnets = []
- for ip, gw in zip(nic['ips'], nic['gateways']):
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- 'gateway': gw,
- })
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
-# Used to match classes to dependencies
-datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
-]
-
-
-# Return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return sources.list_from_depends(depends, datasources)
-
-
-if __name__ == "__main__":
- import sys
- jmc = jmc_client_factory()
- if jmc is None:
- print("Do not appear to be on smartos.")
- sys.exit(1)
- if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()))
- else:
- keys = sys.argv[1:]
-
- data = {}
- for key in keys:
- if key in SMARTOS_ATTRIB_JSON:
- keyname = SMARTOS_ATTRIB_JSON[key]
- data[key] = jmc.get_json(keyname)
- else:
- if key in SMARTOS_ATTRIB_MAP:
- keyname, strip = SMARTOS_ATTRIB_MAP[key]
- else:
- keyname, strip = (key, False)
- val = jmc.get(keyname, strip=strip)
- data[key] = jmc.get(keyname, strip=strip)
-
- print(json.dumps(data, indent=1))
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
deleted file mode 100644
index 87b8e524..00000000
--- a/cloudinit/sources/__init__.py
+++ /dev/null
@@ -1,371 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import os
-
-import six
-
-from cloudinit import importer
-from cloudinit import log as logging
-from cloudinit import type_utils
-from cloudinit import user_data as ud
-from cloudinit import util
-
-from cloudinit.filters import launch_index
-from cloudinit.reporting import events
-
-DSMODE_DISABLED = "disabled"
-DSMODE_LOCAL = "local"
-DSMODE_NETWORK = "net"
-DSMODE_PASS = "pass"
-
-VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
-
-DEP_FILESYSTEM = "FILESYSTEM"
-DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
-
-LOG = logging.getLogger(__name__)
-
-
-class DataSourceNotFoundException(Exception):
- pass
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DataSource(object):
-
- dsmode = DSMODE_NETWORK
-
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
- self.sys_cfg = sys_cfg
- self.distro = distro
- self.paths = paths
- self.userdata = None
- self.metadata = None
- self.userdata_raw = None
- self.vendordata = None
- self.vendordata_raw = None
-
- # find the datasource config name.
- # remove 'DataSource' from classname on front, and remove 'Net' on end.
- # Both Foo and FooNet sources expect config in cfg['sources']['Foo']
- name = type_utils.obj_name(self)
- if name.startswith(DS_PREFIX):
- name = name[len(DS_PREFIX):]
- if name.endswith('Net'):
- name = name[0:-3]
-
- self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource", name), {})
- if not self.ds_cfg:
- self.ds_cfg = {}
-
- if not ud_proc:
- self.ud_proc = ud.UserDataProcessor(self.paths)
- else:
- self.ud_proc = ud_proc
-
- def __str__(self):
- return type_utils.obj_name(self)
-
- def get_userdata(self, apply_filter=False):
- if self.userdata is None:
- self.userdata = self.ud_proc.process(self.get_userdata_raw())
- if apply_filter:
- return self._filter_xdata(self.userdata)
- return self.userdata
-
- def get_vendordata(self):
- if self.vendordata is None:
- self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
- return self.vendordata
-
- @property
- def launch_index(self):
- if not self.metadata:
- return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
- return None
-
- def _filter_xdata(self, processed_ud):
- filters = [
- launch_index.Filter(util.safe_int(self.launch_index)),
- ]
- new_ud = processed_ud
- for f in filters:
- new_ud = f.apply(new_ud)
- return new_ud
-
- @property
- def is_disconnected(self):
- return False
-
- def get_userdata_raw(self):
- return self.userdata_raw
-
- def get_vendordata_raw(self):
- return self.vendordata_raw
-
- # the data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return {}
-
- def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
-
- def _remap_device(self, short_name):
- # LP: #611137
- # the metadata service may believe that devices are named 'sda'
- # when the kernel named them 'vda' or 'xvda'
- # we want to return the correct value for what will actually
- # exist in this instance
- mappings = {"sd": ("vd", "xvd", "vtb")}
- for (nfrom, tlist) in mappings.items():
- if not short_name.startswith(nfrom):
- continue
- for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
- if os.path.exists(cand):
- return cand
- return None
-
- def device_name_to_device(self, _name):
- # translate a 'name' to a device
- # the primary function at this point is on ec2
- # to consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- return None
-
- def get_locale(self):
- return 'en_US.UTF-8'
-
- @property
- def availability_zone(self):
- return self.metadata.get('availability-zone',
- self.metadata.get('availability_zone'))
-
- @property
- def region(self):
- return self.metadata.get('region')
-
- def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
- # Return a magic not really instance id string
- return "iid-datasource"
- return str(self.metadata['instance-id'])
-
- def get_hostname(self, fqdn=False, resolve_ip=False):
- defdomain = "localdomain"
- defhost = "localhost"
- domain = defdomain
-
- if not self.metadata or 'local-hostname' not in self.metadata:
- # this is somewhat questionable really.
- # the cloud datasource was asked for a hostname
- # and didn't have one. raising error might be more appropriate
- # but instead, basically look up the existing hostname
- toks = []
- hostname = util.get_hostname()
- fqdn = util.get_fqdn_from_hosts(hostname)
- if fqdn and fqdn.find(".") > 0:
- toks = str(fqdn).split(".")
- elif hostname:
- toks = [hostname, defdomain]
- else:
- toks = [defhost, defdomain]
- else:
- # if there is an ipv4 address in 'local-hostname', then
- # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
- if util.is_ipv4(lhost):
- toks = []
- if resolve_ip:
- toks = util.gethostbyaddr(lhost)
-
- if toks:
- toks = str(toks).split('.')
- else:
- toks = ["ip-%s" % lhost.replace(".", "-")]
- else:
- toks = lhost.split(".")
-
- if len(toks) > 1:
- hostname = toks[0]
- domain = '.'.join(toks[1:])
- else:
- hostname = toks[0]
-
- if fqdn:
- return "%s.%s" % (hostname, domain)
- else:
- return hostname
-
- def get_package_mirror_info(self):
- return self.distro.get_package_mirror_info(data_source=self)
-
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still
- return False
-
- @staticmethod
- def _determine_dsmode(candidates, default=None, valid=None):
- # return the first candidate that is non None, warn if not valid
- if default is None:
- default = DSMODE_NETWORK
-
- if valid is None:
- valid = VALID_DSMODES
-
- for candidate in candidates:
- if candidate is None:
- continue
- if candidate in valid:
- return candidate
- else:
- LOG.warn("invalid dsmode '%s', using default=%s",
- candidate, default)
- return default
-
- return default
-
- @property
- def network_config(self):
- return None
-
- @property
- def first_instance_boot(self):
- return
-
-
-def normalize_pubkey_data(pubkey_data):
- keys = []
-
- if not pubkey_data:
- return keys
-
- if isinstance(pubkey_data, six.string_types):
- return str(pubkey_data).splitlines()
-
- if isinstance(pubkey_data, (list, set)):
- return list(pubkey_data)
-
- if isinstance(pubkey_data, (dict)):
- for (_keyname, klist) in pubkey_data.items():
- # lp:506332 uec metadata service responds with
- # data that makes boto populate a string for 'klist' rather
- # than a list.
- if isinstance(klist, six.string_types):
- klist = [klist]
- if isinstance(klist, (list, set)):
- for pkey in klist:
- # There is an empty string at
- # the end of the keylist, trim it
- if pkey:
- keys.append(pkey)
-
- return keys
-
-
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
- ds_list = list_sources(cfg_list, ds_deps, pkg_list)
- ds_names = [type_utils.obj_name(f) for f in ds_list]
- mode = "network" if DEP_NETWORK in ds_deps else "local"
- LOG.debug("Searching for %s data source in: %s", mode, ds_names)
-
- for name, cls in zip(ds_names, ds_list):
- myrep = events.ReportEventStack(
- name="search-%s" % name.replace("DataSource", ""),
- description="searching for %s data from %s" % (mode, name),
- message="no %s data found from %s" % (mode, name),
- parent=reporter)
- try:
- with myrep:
- LOG.debug("Seeing if we can get any data from %s", cls)
- s = cls(sys_cfg, distro, paths)
- if s.get_data():
- myrep.message = "found %s data from %s" % (mode, name)
- return (s, type_utils.obj_name(cls))
- except Exception:
- util.logexc(LOG, "Getting data from %s failed", cls)
-
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
- raise DataSourceNotFoundException(msg)
-
-
-# Return a list of classes that have the same depends as 'depends'
-# iterate through cfg_list, loading "DataSource*" modules
-# and calling their "get_datasource_list".
-# Return an ordered list of classes that match (if any)
-def list_sources(cfg_list, depends, pkg_list):
- src_list = []
- LOG.debug(("Looking for for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
- for ds_name in cfg_list:
- if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
- for m_loc in m_locs:
- mod = importer.import_module(m_loc)
- lister = getattr(mod, "get_datasource_list")
- matches = lister(depends)
- if matches:
- src_list.extend(matches)
- break
- return src_list
-
-
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
- # quickly (local check only) if self.instance_id is still valid
- # we check kernel command line or files.
- if not instance_id:
- return False
-
- dmi_value = util.read_dmi_data(field)
- if not dmi_value:
- return False
- return instance_id.lower() == dmi_value.lower()
-
-
-# 'depends' is a list of dependencies (DEP_FILESYSTEM)
-# ds_list is a list of 2 item lists
-# ds_list = [
-# ( class, ( depends-that-this-class-needs ) )
-# }
-# It returns a list of 'class' that matched these deps exactly
-# It mainly is a helper function for DataSourceCollections
-def list_from_depends(depends, ds_list):
- ret_list = []
- depset = set(depends)
- for (cls, deps) in ds_list:
- if depset == set(deps):
- ret_list.append(cls)
- return ret_list
diff --git a/cloudinit/sources/helpers/__init__.py b/cloudinit/sources/helpers/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
deleted file mode 100644
index 63ccf10e..00000000
--- a/cloudinit/sources/helpers/azure.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import logging
-import os
-import re
-import socket
-import struct
-import tempfile
-import time
-
-from contextlib import contextmanager
-from xml.etree import ElementTree
-
-from cloudinit import util
-
-
-LOG = logging.getLogger(__name__)
-
-
-@contextmanager
-def cd(newdir):
- prevdir = os.getcwd()
- os.chdir(os.path.expanduser(newdir))
- try:
- yield
- finally:
- os.chdir(prevdir)
-
-
-class AzureEndpointHttpClient(object):
-
- headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
- }
-
- def __init__(self, certificate):
- self.extra_secure_headers = {
- "x-ms-cipher-name": "DES_EDE3_CBC",
- "x-ms-guest-agent-public-x509-cert": certificate,
- }
-
- def get(self, url, secure=False):
- headers = self.headers
- if secure:
- headers = self.headers.copy()
- headers.update(self.extra_secure_headers)
- return util.read_file_or_url(url, headers=headers)
-
- def post(self, url, data=None, extra_headers=None):
- headers = self.headers
- if extra_headers is not None:
- headers = self.headers.copy()
- headers.update(extra_headers)
- return util.read_file_or_url(url, data=data, headers=headers)
-
-
-class GoalState(object):
-
- def __init__(self, xml, http_client):
- self.http_client = http_client
- self.root = ElementTree.fromstring(xml)
- self._certificates_xml = None
-
- def _text_from_xpath(self, xpath):
- element = self.root.find(xpath)
- if element is not None:
- return element.text
- return None
-
- @property
- def container_id(self):
- return self._text_from_xpath('./Container/ContainerId')
-
- @property
- def incarnation(self):
- return self._text_from_xpath('./Incarnation')
-
- @property
- def instance_id(self):
- return self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
-
- @property
- def certificates_xml(self):
- if self._certificates_xml is None:
- url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
- if url is not None:
- self._certificates_xml = self.http_client.get(
- url, secure=True).contents
- return self._certificates_xml
-
-
-class OpenSSLManager(object):
-
- certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
- }
-
- def __init__(self):
- self.tmpdir = tempfile.mkdtemp()
- self.certificate = None
- self.generate_certificate()
-
- def clean_up(self):
- util.del_dir(self.tmpdir)
-
- def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
- if self.certificate is not None:
- LOG.debug('Certificate already generated.')
- return
- with cd(self.tmpdir):
- util.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
- if "CERTIFICATE" not in line:
- certificate += line.rstrip()
- self.certificate = certificate
- LOG.debug('New certificate generated.')
-
- def parse_certificates(self, certificates_xml):
- tag = ElementTree.fromstring(certificates_xml).find(
- './/Data')
- certificates_content = tag.text
- lines = [
- b'MIME-Version: 1.0',
- b'Content-Disposition: attachment; filename="Certificates.p7m"',
- b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
- ]
- with cd(self.tmpdir):
- with open('Certificates.p7m', 'wb') as f:
- f.write(b'\n'.join(lines))
- out, _ = util.subp(
- 'openssl cms -decrypt -in Certificates.p7m -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True)
- private_keys, certificates = [], []
- current = []
- for line in out.splitlines():
- current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
- private_keys.append('\n'.join(current))
- current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificates.append('\n'.join(current))
- current = []
- keys = []
- for certificate in certificates:
- with cd(self.tmpdir):
- public_key, _ = util.subp(
- 'openssl x509 -noout -pubkey |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
- data=certificate,
- shell=True)
- keys.append(public_key)
- return keys
-
-
-class WALinuxAgentShim(object):
-
- REPORT_READY_XML_TEMPLATE = '\n'.join([
- '<?xml version="1.0" encoding="utf-8"?>',
- '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
- ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
- ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
- ' <Container>',
- ' <ContainerId>{container_id}</ContainerId>',
- ' <RoleInstanceList>',
- ' <Role>',
- ' <InstanceId>{instance_id}</InstanceId>',
- ' <Health>',
- ' <State>Ready</State>',
- ' </Health>',
- ' </Role>',
- ' </RoleInstanceList>',
- ' </Container>',
- '</Health>'])
-
- def __init__(self):
- LOG.debug('WALinuxAgentShim instantiated...')
- self.endpoint = self.find_endpoint()
- self.openssl_manager = None
- self.values = {}
-
- def clean_up(self):
- if self.openssl_manager is not None:
- self.openssl_manager.clean_up()
-
- @staticmethod
- def get_ip_from_lease_value(lease_value):
- unescaped_value = lease_value.replace('\\', '')
- if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
- if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
- hex_string += hex_pair
- packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
- else:
- packed_bytes = unescaped_value.encode('utf-8')
- return socket.inet_ntoa(packed_bytes)
-
- @staticmethod
- def find_endpoint():
- LOG.debug('Finding Azure endpoint...')
- content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
- value = None
- for line in content.splitlines():
- if 'unknown-245' in line:
- value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
- if value is None:
- raise ValueError('No endpoint found in DHCP config.')
- endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
- LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
- return endpoint_ip_address
-
- def register_with_azure_and_fetch_data(self):
- self.openssl_manager = OpenSSLManager()
- http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
- LOG.info('Registering with Azure...')
- attempts = 0
- while True:
- try:
- response = http_client.get(
- 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
- except Exception:
- if attempts < 10:
- time.sleep(attempts + 1)
- else:
- raise
- else:
- break
- attempts += 1
- LOG.debug('Successfully fetched GoalState XML.')
- goal_state = GoalState(response.contents, http_client)
- public_keys = []
- if goal_state.certificates_xml is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
- public_keys = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
- data = {
- 'public-keys': public_keys,
- }
- self._report_ready(goal_state, http_client)
- return data
-
- def _report_ready(self, goal_state, http_client):
- LOG.debug('Reporting ready to Azure fabric.')
- document = self.REPORT_READY_XML_TEMPLATE.format(
- incarnation=goal_state.incarnation,
- container_id=goal_state.container_id,
- instance_id=goal_state.instance_id,
- )
- http_client.post(
- "http://{0}/machine?comp=health".format(self.endpoint),
- data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
- )
- LOG.info('Reported ready to Azure fabric.')
-
-
-def get_metadata_from_fabric():
- shim = WALinuxAgentShim()
- try:
- return shim.register_with_azure_and_fetch_data()
- finally:
- shim.clean_up()
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
deleted file mode 100644
index 2e7a1d47..00000000
--- a/cloudinit/sources/helpers/openstack.py
+++ /dev/null
@@ -1,648 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Yahoo! Inc.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Joshua Harlow <harlowja@yahoo-inc.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import abc
-import base64
-import copy
-import functools
-import os
-
-import six
-
-from cloudinit import ec2_utils
-from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-# For reference: http://tinyurl.com/laora4c
-
-LOG = logging.getLogger(__name__)
-
-FILES_V1 = {
- # Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
-}
-KEY_COPIES = (
- # Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
-)
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
-# keep this in chronological order. new supported versions go at the end.
-OS_VERSIONS = (
- OS_FOLSOM,
- OS_GRIZZLY,
- OS_HAVANA,
- OS_LIBERTY,
-)
-
-
-class NonReadable(IOError):
- pass
-
-
-class BrokenMetadata(IOError):
- pass
-
-
-class SourceMixin(object):
- def _ec2_name_to_device(self, name):
- if not self.ec2_metadata:
- return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
- for (ent_name, device) in bdm.items():
- if name == ent_name:
- return device
- return None
-
- def get_public_ssh_keys(self):
- name = "public_keys"
- if self.version == 1:
- name = "public-keys"
- return sources.normalize_pubkey_data(self.metadata.get(name))
-
- def _os_name_to_device(self, name):
- device = None
- try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
- dev_entries = util.find_devs_with(criteria)
- if dev_entries:
- device = dev_entries[0]
- except util.ProcessExecutionError:
- pass
- return device
-
- def _validate_device_name(self, device):
- if not device:
- return None
- if not device.startswith("/"):
- device = "/dev/%s" % device
- if os.path.exists(device):
- return device
- # Durn, try adjusting the mapping
- remapped = self._remap_device(os.path.basename(device))
- if remapped:
- LOG.debug("Remapped device name %s => %s", device, remapped)
- return remapped
- return None
-
- def device_name_to_device(self, name):
- # Translate a 'name' to a 'physical' device
- if not name:
- return None
- # Try the ec2 mapping first
- names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
- device = None
- LOG.debug("Using ec2 style lookup to find device %s", names)
- for n in names:
- device = self._ec2_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Try the openstack way second
- if not device:
- LOG.debug("Using openstack style lookup to find device %s", names)
- for n in names:
- device = self._os_name_to_device(n)
- device = self._validate_device_name(device)
- if device:
- break
- # Ok give up...
- if not device:
- return None
- else:
- LOG.debug("Mapped %s to device %s", name, device)
- return device
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseReader(object):
-
- def __init__(self, base_path):
- self.base_path = base_path
-
- @abc.abstractmethod
- def _path_join(self, base, *add_ons):
- pass
-
- @abc.abstractmethod
- def _path_read(self, path, decode=False):
- pass
-
- @abc.abstractmethod
- def _fetch_available_versions(self):
- pass
-
- @abc.abstractmethod
- def _read_ec2_metadata(self):
- pass
-
- def _find_working_version(self):
- try:
- versions_available = self._fetch_available_versions()
- except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
- versions_available = []
-
- # openstack.OS_VERSIONS is stored in chronological order, so
- # reverse it to check newest first.
- supported = [v for v in reversed(list(OS_VERSIONS))]
- selected_version = OS_LATEST
-
- for potential_version in supported:
- if potential_version not in versions_available:
- continue
- selected_version = potential_version
- break
-
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
- return selected_version
-
- def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
- path_pieces = path.split("/")
- valid_pieces = [p for p in path_pieces if len(p)]
- if not valid_pieces:
- raise BrokenMetadata("Item %s has no valid content path" % (item))
- path = self._path_join(self.base_path, "openstack", *path_pieces)
- return self._path_read(path, decode=decode)
-
- def read_v2(self):
- """Reads a version 2 formatted location.
-
- Return a dict with metadata, userdata, ec2-metadata, dsmode,
- network_config, files and version (2).
-
- If not a valid location, raise a NonReadable exception.
- """
-
- load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list) + six.string_types)
-
- def datafiles(version):
- files = {}
- files['metadata'] = (
- # File path to read
- self._path_join("openstack", version, 'meta_data.json'),
- # Is it required?
- True,
- # Translator function (applied after loading)
- util.load_json,
- )
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
- False,
- lambda x: x,
- )
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
- False,
- load_json_anytype,
- )
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
- False,
- load_json_anytype,
- )
- return files
-
- results = {
- 'userdata': '',
- 'version': 2,
- }
- data = datafiles(self._find_working_version())
- for (name, (path, required, translator)) in data.items():
- path = self._path_join(self.base_path, path)
- data = None
- found = False
- try:
- data = self._path_read(path)
- except IOError as e:
- if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
- else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
- else:
- found = True
- if required and not found:
- raise NonReadable("Missing mandatory path: %s" % path)
- if found and translator:
- try:
- data = translator(data)
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
- if found:
- results[name] = data
-
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
- try:
- metadata['random_seed'] = base64.b64decode(random_seed)
- except (ValueError, TypeError) as e:
- raise BrokenMetadata("Badly formatted metadata"
- " random_seed entry: %s" % e)
-
- # load any files that were provided
- files = {}
- metadata_files = metadata.get('files', [])
- for item in metadata_files:
- if 'path' not in item:
- continue
- path = item['path']
- try:
- files[path] = self._read_content_path(item)
- except Exception as e:
- raise BrokenMetadata("Failed to read provided "
- "file %s: %s" % (path, e))
- results['files'] = files
-
- # The 'network_config' item in metadata is a content pointer
- # to the network config that should be applied. It is just a
- # ubuntu/debian '/etc/network/interfaces' file.
- net_item = metadata.get("network_config", None)
- if net_item:
- try:
- content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
- except IOError as e:
- raise BrokenMetadata("Failed to read network"
- " configuration: %s" % (e))
-
- # To openstack, user can specify meta ('nova boot --meta=key=value')
- # and those will appear under metadata['meta'].
- # if they specify 'dsmode' they're indicating the mode that they intend
- # for this datasource to operate in.
- try:
- results['dsmode'] = metadata['meta']['dsmode']
- except KeyError:
- pass
-
- # Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
-
- # Perform some misc. metadata key renames...
- for (target_key, source_key, is_required) in KEY_COPIES:
- if is_required and source_key not in metadata:
- raise BrokenMetadata("No '%s' entry in metadata" % source_key)
- if source_key in metadata:
- metadata[target_key] = metadata.get(source_key)
- return results
-
-
-class ConfigDriveReader(BaseReader):
- def __init__(self, base_path):
- super(ConfigDriveReader, self).__init__(base_path)
- self._versions = None
-
- def _path_join(self, base, *add_ons):
- components = [base] + list(add_ons)
- return os.path.join(*components)
-
- def _path_read(self, path, decode=False):
- return util.load_file(path, decode=decode)
-
- def _fetch_available_versions(self):
- if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
- self._versions = sorted(found)
- return self._versions
-
- def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
- if not os.path.exists(path):
- return {}
- else:
- try:
- return util.load_json(self._path_read(path))
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
-
- def read_v1(self):
- """Reads a version 1 formatted location.
-
- Return a dict with metadata, userdata, dsmode, files and version (1).
-
- If not a valid path, raise a NonReadable exception.
- """
-
- found = {}
- for name in FILES_V1.keys():
- path = self._path_join(self.base_path, name)
- if os.path.exists(path):
- found[name] = path
- if len(found) == 0:
- raise NonReadable("%s: no files found" % (self.base_path))
-
- md = {}
- for (name, (key, translator, default)) in FILES_V1.items():
- if name in found:
- path = found[name]
- try:
- contents = self._path_read(path)
- except IOError:
- raise BrokenMetadata("Failed to read: %s" % path)
- try:
- md[key] = translator(contents)
- except Exception as e:
- raise BrokenMetadata("Failed to process "
- "path %s: %s" % (path, e))
- else:
- md[key] = copy.deepcopy(default)
-
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
-
- # keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
- if keydata:
- lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
-
- # config-drive-v1 has no way for openstack to provide the instance-id
- # so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
-
- results = {
- 'version': 1,
- 'metadata': md,
- }
-
- # allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
-
- # config-drive-v1 has no way of specifying user-data, so the user has
- # to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
-
- # this implementation does not support files other than
- # network/interfaces and authorized_keys...
- results['files'] = {}
-
- return results
-
-
-class MetadataReader(BaseReader):
- def __init__(self, base_url, ssl_details=None, timeout=5, retries=5):
- super(MetadataReader, self).__init__(base_url)
- self.ssl_details = ssl_details
- self.timeout = float(timeout)
- self.retries = int(retries)
- self._versions = None
-
- def _fetch_available_versions(self):
- # <baseurl>/openstack/ returns a newline separated list of versions
- if self._versions is not None:
- return self._versions
- found = []
- version_path = self._path_join(self.base_path, "openstack")
- content = self._path_read(version_path)
- for line in content.splitlines():
- line = line.strip()
- if not line:
- continue
- found.append(line)
- self._versions = found
- return self._versions
-
- def _path_read(self, path, decode=False):
-
- def should_retry_cb(_request_args, cause):
- try:
- code = int(cause.code)
- if code >= 400:
- return False
- except (TypeError, ValueError):
- # Older versions of requests didn't have a code.
- pass
- return True
-
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
- if decode:
- return response.contents.decode()
- else:
- return response.contents
-
- def _path_join(self, base, *add_ons):
- return url_helper.combine_url(base, *add_ons)
-
- def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
-
-
-# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
-def convert_net_json(network_json=None, known_macs=None):
- """Return a dictionary of network_config by parsing provided
- OpenStack ConfigDrive NetworkData json format
-
- OpenStack network_data.json provides a 3 element dictionary
- - "links" (links are network devices, physical or virtual)
- - "networks" (networks are ip network configurations for one or more
- links)
- - services (non-ip services, like dns)
-
- networks and links are combined via network items referencing specific
- links via a 'link_id' which maps to a links 'id' field.
-
- To convert this format to network_config yaml, we first iterate over the
- links and then walk the network list to determine if any of the networks
- utilize the current link; if so we generate a subnet entry for the device
-
- We also need to map network_data.json fields to network_config fields. For
- example, the network_data links 'id' field is equivalent to network_config
- 'name' field for devices. We apply more of this mapping to the various
- link types that we encounter.
-
- There are additional fields that are populated in the network_data.json
- from OpenStack that are not relevant to network_config yaml, so we
- enumerate a dictionary of valid keys for network_yaml and apply filtering
- to drop these superflous keys from the network_config yaml.
- """
- if network_json is None:
- return None
-
- # dict of network_config key for filtering network_json
- valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
- ],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
- ],
- }
-
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
-
- config = []
- for link in links:
- subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
- # 'name' is not in openstack spec yet, but we will support it if it is
- # present. The 'id' in the spec is currently implemented as the host
- # nic's name, meaning something like 'tap-adfasdffd'. We do not want
- # to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
- if 'dhcp' in network['type']:
- t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
- subnet.update({
- 'type': t,
- })
- else:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
- subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']:
- cfg.update({
- 'type': 'physical',
- 'mac_address': link['ethernet_mac_address']})
- elif link['type'] in ['bond']:
- params = {}
- for k, v in link.items():
- if k == 'bond_links':
- continue
- elif k.startswith('bond'):
- params.update({k: v})
- cfg.update({
- 'bond_interfaces': copy.deepcopy(link['bond_links']),
- 'params': params,
- })
- elif link['type'] in ['vlan']:
- cfg.update({
- 'name': "%s.%s" % (link['vlan_link'],
- link['vlan_id']),
- 'vlan_link': link['vlan_link'],
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- else:
- raise ValueError(
- 'Unknown network_data link type: %s' % link['type'])
-
- config.append(cfg)
-
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
-
- if need_names:
- if known_macs is None:
- known_macs = net.get_interfaces_by_mac()
-
- for d in need_names:
- mac = d.get('mac_address')
- if not mac:
- raise ValueError("No mac_address or name entry for %s" % d)
- if mac not in known_macs:
- raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
-
- for service in services:
- cfg = service
- cfg.update({'type': 'nameserver'})
- config.append(cfg)
-
- return {'version': 1, 'config': config}
-
-
-def convert_vendordata_json(data, recurse=True):
- """data: a loaded json *object* (strings, arrays, dicts).
- return something suitable for cloudinit vendordata_raw.
-
- if data is:
- None: return None
- string: return string
- list: return data
- the list is then processed in UserDataProcessor
- dict: return convert_vendordata_json(data.get('cloud-init'))
- """
- if not data:
- return None
- if isinstance(data, six.string_types):
- return data
- if isinstance(data, list):
- return copy.deepcopy(data)
- if isinstance(data, dict):
- if recurse is True:
- return convert_vendordata_json(data.get('cloud-init'),
- recurse=False)
- raise ValueError("vendordata['cloud-init'] cannot be dict")
- raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/vmware/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py
deleted file mode 100644
index 386225d5..00000000
--- a/cloudinit/sources/helpers/vmware/imc/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# vi: ts=4 expandtab
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
deleted file mode 100644
index fb53ec1d..00000000
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class BootProtoEnum(object):
- """Specifies the NIC Boot Settings."""
-
- DHCP = 'dhcp'
- STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
deleted file mode 100644
index d645c497..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .nic import Nic
-
-
-class Config(object):
- """
- Stores the Contents specified in the Customization
- Specification file.
- """
-
- DNS = 'DNS|NAMESERVER|'
- SUFFIX = 'DNS|SUFFIX|'
- PASS = 'PASSWORD|-PASS'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- HOSTNAME = 'NETWORK|HOSTNAME'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
-
- def __init__(self, configFile):
- self._configFile = configFile
-
- @property
- def host_name(self):
- """Return the hostname."""
- return self._configFile.get(Config.HOSTNAME, None)
-
- @property
- def domain_name(self):
- """Return the domain name."""
- return self._configFile.get(Config.DOMAINNAME, None)
-
- @property
- def timezone(self):
- """Return the timezone."""
- return self._configFile.get(Config.TIMEZONE, None)
-
- @property
- def utc(self):
- """Retrieves whether to set time to UTC or Local."""
- return self._configFile.get(Config.UTC, None)
-
- @property
- def admin_password(self):
- """Return the root password to be set."""
- return self._configFile.get(Config.PASS, None)
-
- @property
- def name_servers(self):
- """Return the list of DNS servers."""
- res = []
- cnt = self._configFile.get_count_with_prefix(Config.DNS)
- for i in range(1, cnt + 1):
- key = Config.DNS + str(i)
- res.append(self._configFile[key])
-
- return res
-
- @property
- def dns_suffixes(self):
- """Return the list of DNS Suffixes."""
- res = []
- cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
- for i in range(1, cnt + 1):
- key = Config.SUFFIX + str(i)
- res.append(self._configFile[key])
-
- return res
-
- @property
- def nics(self):
- """Return the list of associated NICs."""
- res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
- res.append(Nic(nic, self._configFile))
-
- return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
deleted file mode 100644
index bb9fb7dc..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
-from .config_source import ConfigSource
-
-logger = logging.getLogger(__name__)
-
-
-class ConfigFile(ConfigSource, dict):
- """ConfigFile module to load the content from a specified source."""
-
- def __init__(self, filename):
- self._loadConfigFile(filename)
- pass
-
- def _insertKey(self, key, val):
- """
- Inserts a Key Value pair.
-
- Keyword arguments:
- key -- The key to insert
- val -- The value to insert for the key
-
- """
- key = key.strip()
- val = val.strip()
-
- if key.startswith('-') or '|-' in key:
- canLog = False
- else:
- canLog = True
-
- # "sensitive" settings shall not be logged
- if canLog:
- logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
- else:
- logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
-
- self[key] = val
-
- def _loadConfigFile(self, filename):
- """
- Parses properties from the specified config file.
-
- Any previously available properties will be removed.
- Sensitive data will not be logged in case the key starts
- from '-'.
-
- Keyword arguments:
- filename - The full path to the config file.
- """
- logger.info('Parsing the config file %s.' % filename)
-
- config = configparser.ConfigParser()
- config.optionxform = str
- config.read(filename)
-
- self.clear()
-
- for category in config.sections():
- logger.debug("FOUND CATEGORY = '%s'" % category)
-
- for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
-
- def should_keep_current_value(self, key):
- """
- Determines whether a value for a property must be kept.
-
- If the propery is missing, it is treated as it should be not
- changed by the engine.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "empty" value which is used to indicate
- # "removal"
- return key not in self
-
- def should_remove_current_value(self, key):
- """
- Determines whether a value for the property must be removed.
-
- If the specified key is empty, it is treated as it should be
- removed by the engine.
-
- Return true if the value can be removed, false otherwise.
-
- Keyword arguments:
- key -- The key to search for.
- """
- # helps to distinguish from "missing" value which is used to indicate
- # "keeping unchanged"
- if key in self:
- return not bool(self[key])
- else:
- return False
-
- def get_count_with_prefix(self, prefix):
- """
- Return the total count of keys that start with the specified prefix.
-
- Keyword arguments:
- prefix -- prefix of the key
- """
- return len([key for key in self if key.startswith(prefix)])
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
deleted file mode 100644
index b28830f5..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .config_source import ConfigSource
-
-
-class ConfigNamespace(ConfigSource):
- """Specifies the Config Namespace."""
- pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
deleted file mode 100644
index 511cc918..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2016 VMware INC.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import os
-import re
-
-from cloudinit import util
-
-logger = logging.getLogger(__name__)
-
-
-class NicConfigurator(object):
- def __init__(self, nics):
- """
- Initialize the Nic Configurator
- @param nics (list) an array of nics to configure
- """
- self.nics = nics
- self.mac2Name = {}
- self.ipv4PrimaryGateway = None
- self.ipv6PrimaryGateway = None
- self.find_devices()
- self._primaryNic = self.get_primary_nic()
-
- def get_primary_nic(self):
- """
- Retrieve the primary nic if it exists
- @return (NicBase): the primary nic if exists, None otherwise
- """
- primary_nics = [nic for nic in self.nics if nic.primary]
- if not primary_nics:
- return None
- elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
- else:
- return primary_nics[0]
-
- def find_devices(self):
- """
- Create the mac2Name dictionary
- The mac address(es) are in the lower case
- """
- cmd = ['ip', 'addr', 'show']
- (output, err) = util.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
-
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
- for section in sections:
- match = re.search(macPat, section)
- if not match: # Only keep info about nics
- continue
- mac = match.group(1).lower()
- name = section.split(':', 1)[0]
- self.mac2Name[mac] = name
-
- def gen_one_nic(self, nic):
- """
- Return the lines needed to configure a nic
- @return (str list): the string list to configure the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
- name = self.mac2Name.get(nic.mac.lower())
- if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
-
- if nic.onboot:
- lines.append('auto %s' % name)
-
- # Customize IPv4
- lines.extend(self.gen_ipv4(name, nic))
-
- # Customize IPv6
- lines.extend(self.gen_ipv6(name, nic))
-
- lines.append('')
-
- return lines
-
- def gen_ipv4(self, name, nic):
- """
- Return the lines needed to configure the IPv4 setting of a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
-
- bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
- lines.append('iface %s inet %s' % (name, bootproto))
-
- if bootproto != 'static':
- return lines
-
- # Static Ipv4
- v4 = nic.staticIpv4
- if v4.ip:
- lines.append(' address %s' % v4.ip)
- if v4.netmask:
- lines.append(' netmask %s' % v4.netmask)
-
- # Add the primary gateway
- if nic.primary and v4.gateways:
- self.ipv4PrimaryGateway = v4.gateways[0]
- lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
- return lines
-
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self.gen_ipv4_route(nic, v4.gateways))
-
- return lines
-
- def gen_ipv4_route(self, nic, gateways):
- """
- Return the lines needed to configure additional Ipv4 route
- @return (str list): the string list to configure the gateways
- @param nic (NicBase): the nic to configure
- @param gateways (str list): the list of gateways
- """
- lines = []
-
- for gateway in gateways:
- lines.append(' up route add default gw %s metric 10000' %
- gateway)
-
- return lines
-
- def gen_ipv6(self, name, nic):
- """
- Return the lines needed to configure the gateways for a nic
- @return (str list): the string list to configure the gateways
- @param name (str): name of the nic
- @param nic (NicBase): the nic to configure
- """
- lines = []
-
- if not nic.staticIpv6:
- return lines
-
- # Static Ipv6
- addrs = nic.staticIpv6
- lines.append('iface %s inet6 static' % name)
- lines.append(' address %s' % addrs[0].ip)
- lines.append(' netmask %s' % addrs[0].netmask)
-
- for addr in addrs[1:]:
- lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
- addr.netmask))
- # Add the primary gateway
- if nic.primary:
- for addr in addrs:
- if addr.gateway:
- self.ipv6PrimaryGateway = addr.gateway
- lines.append(' gateway %s' % self.ipv6PrimaryGateway)
- return lines
-
- # Add routes if there is no primary nic
- if not self._primaryNic:
- lines.extend(self._genIpv6Route(name, nic, addrs))
-
- return lines
-
- def _genIpv6Route(self, name, nic, addrs):
- lines = []
-
- for addr in addrs:
- lines.append(' up route -A inet6 add default gw '
- '%s metric 10000' % addr.gateway)
-
- return lines
-
- def generate(self):
- """Return the lines that is needed to configure the nics"""
- lines = []
- lines.append('iface lo inet loopback')
- lines.append('auto lo')
- lines.append('')
-
- for nic in self.nics:
- lines.extend(self.gen_one_nic(nic))
-
- return lines
-
- def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
-
- # Ignore the return code 1.
- util.subp(["pkill", "dhclient"], rcs=[0, 1])
- util.subp(["rm", "-f", "/var/lib/dhcp/*"])
-
- def if_down_up(self):
- names = []
- for nic in self.nics:
- name = self.mac2Name.get(nic.mac.lower())
- names.append(name)
-
- for name in names:
- logger.info('Bring down interface %s' % name)
- util.subp(["ifdown", "%s" % name])
-
- self.clear_dhcp()
-
- for name in names:
- logger.info('Bring up interface %s' % name)
- util.subp(["ifup", "%s" % name])
-
- def configure(self):
- """
- Configure the /etc/network/intefaces
- Make a back up of the original
- """
- containingDir = '/etc/network'
-
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
-
- if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
- os.rename(interfaceFile, originalFile)
-
- lines = self.generate()
- with open(interfaceFile, 'w') as fp:
- for line in lines:
- fp.write('%s\n' % line)
-
- self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
deleted file mode 100644
index 28ef306a..00000000
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ConfigSource(object):
- """Specifies a source for the Config Content."""
- pass
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
deleted file mode 100644
index d1546852..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustErrorEnum(object):
- """Specifies different errors of Guest Customization engine"""
-
- GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
deleted file mode 100644
index ce90c898..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustEventEnum(object):
- """Specifies different types of Guest Customization Events"""
-
- GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
- GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101
- GUESTCUST_EVENT_ENABLE_NICS = 103
- GUESTCUST_EVENT_QUERY_NICS = 104
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
deleted file mode 100644
index 422a096d..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class GuestCustStateEnum(object):
- """Specifies different states of Guest Customization engine"""
-
- GUESTCUST_STATE_RUNNING = 4
- GUESTCUST_STATE_DONE = 5
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
deleted file mode 100644
index c07c5949..00000000
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2016 Canonical Ltd.
-# Copyright (C) 2016 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import os
-import time
-
-from cloudinit import util
-
-from .guestcust_event import GuestCustEventEnum
-from .guestcust_state import GuestCustStateEnum
-
-logger = logging.getLogger(__name__)
-
-
-CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
-QUERY_NICS_SUPPORTED = "queryNicsSupported"
-NICS_STATUS_CONNECTED = "connected"
-
-
-# This will send a RPC command to the underlying
-# VMware Virtualization Platform.
-def send_rpc(rpc):
- if not rpc:
- return None
-
- out = ""
- err = "Error sending the RPC command"
-
- try:
- logger.debug("Sending RPC command: %s", rpc)
- (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
- # Remove the trailing newline in the output.
- if out:
- out = out.rstrip()
- except Exception as e:
- logger.debug("Failed to send RPC command")
- logger.exception(e)
-
- return (out, err)
-
-
-# This will send the customization status to the
-# underlying VMware Virtualization Platform.
-def set_customization_status(custstate, custerror, errormessage=None):
- message = ""
-
- if errormessage:
- message = CLOUDINIT_LOG_FILE + "@" + errormessage
- else:
- message = CLOUDINIT_LOG_FILE
-
- rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message)
- (out, err) = send_rpc(rpc)
- return (out, err)
-
-
-# This will read the file nics.txt in the specified directory
-# and return the content
-def get_nics_to_enable(dirpath):
- if not dirpath:
- return None
-
- NICS_SIZE = 1024
- nicsfilepath = os.path.join(dirpath, "nics.txt")
- if not os.path.exists(nicsfilepath):
- return None
-
- with open(nicsfilepath, 'r') as fp:
- nics = fp.read(NICS_SIZE)
-
- return nics
-
-
-# This will send a RPC command to the underlying VMware Virtualization platform
-# and enable nics.
-def enable_nics(nics):
- if not nics:
- logger.warning("No Nics found")
- return
-
- enableNicsWaitRetries = 5
- enableNicsWaitCount = 5
- enableNicsWaitSeconds = 1
-
- for attempt in range(0, enableNicsWaitRetries):
- logger.debug("Trying to connect interfaces, attempt %d", attempt)
- (out, err) = set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
- if not out:
- time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
- continue
-
- if out != QUERY_NICS_SUPPORTED:
- logger.warning("NICS connection status query is not supported")
- return
-
- for count in range(0, enableNicsWaitCount):
- (out, err) = set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
- if out and out == NICS_STATUS_CONNECTED:
- logger.info("NICS are connected on %d second", count)
- return
-
- time.sleep(enableNicsWaitSeconds)
-
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
deleted file mode 100644
index 873ddc3b..00000000
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class Ipv4ModeEnum(object):
- """
- The IPv4 configuration mode which directly represents the user's goal.
-
- This mode effectively acts as a contract of the in-guest customization
- engine. It must be set based on what the user has requested and should
- not be changed by those layers. It's up to the in-guest engine to
- interpret and materialize the user's request.
- """
-
- # The legacy mode which only allows dhcp/static based on whether IPv4
- # addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
-
- # IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
-
- # IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
-
- # IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
-
- # IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
deleted file mode 100644
index b5d704ea..00000000
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from .boot_proto import BootProtoEnum
-from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
-
-
-class Nic(NicBase):
- """
- Holds the information about each NIC specified
- in the customization specification file
- """
-
- def __init__(self, name, configFile):
- self._name = name
- self._configFile = configFile
-
- def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
-
- def _get_count_with_prefix(self, prefix):
- return self._configFile.get_count_with_prefix(self.name + prefix)
-
- @property
- def name(self):
- return self._name
-
- @property
- def mac(self):
- return self._get('MACADDR').lower()
-
- @property
- def primary(self):
- value = self._get('PRIMARY')
- if value:
- value = value.lower()
- return value == 'yes' or value == 'true'
- else:
- return False
-
- @property
- def onboot(self):
- value = self._get('ONBOOT')
- if value:
- value = value.lower()
- return value == 'yes' or value == 'true'
- else:
- return False
-
- @property
- def bootProto(self):
- value = self._get('BOOTPROTO')
- if value:
- return value.lower()
- else:
- return ""
-
- @property
- def ipv4_mode(self):
- value = self._get('IPv4_MODE')
- if value:
- return value.lower()
- else:
- return ""
-
- @property
- def staticIpv4(self):
- """
- Checks the BOOTPROTO property and returns StaticIPv4Addr
- configuration object if STATIC configuration is set.
- """
- if self.bootProto == BootProtoEnum.STATIC:
- return [StaticIpv4Addr(self)]
- else:
- return None
-
- @property
- def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
-
- if not cnt:
- return None
-
- result = []
- for index in range(1, cnt + 1):
- result.append(StaticIpv6Addr(self, index))
-
- return result
-
-
-class StaticIpv4Addr(StaticIpv4Base):
- """Static IPV4 Setting."""
-
- def __init__(self, nic):
- self._nic = nic
-
- @property
- def ip(self):
- return self._nic._get('IPADDR')
-
- @property
- def netmask(self):
- return self._nic._get('NETMASK')
-
- @property
- def gateways(self):
- value = self._nic._get('GATEWAY')
- if value:
- return [x.strip() for x in value.split(',')]
- else:
- return None
-
-
-class StaticIpv6Addr(StaticIpv6Base):
- """Static IPV6 Address."""
-
- def __init__(self, nic, index):
- self._nic = nic
- self._index = index
-
- @property
- def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
-
- @property
- def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
-
- @property
- def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
deleted file mode 100644
index 3c892db0..00000000
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2015 Canonical Ltd.
-# Copyright (C) 2015 VMware Inc.
-#
-# Author: Sankar Tanguturi <stanguturi@vmware.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-class NicBase(object):
- """
- Define what are expected of each nic.
- The following properties should be provided in an implementation class.
- """
-
- @property
- def mac(self):
- """
- Retrieves the mac address of the nic
- @return (str) : the MACADDR setting
- """
- raise NotImplementedError('MACADDR')
-
- @property
- def primary(self):
- """
- Retrieves whether the nic is the primary nic
- Indicates whether NIC will be used to define the default gateway.
- If none of the NICs is configured to be primary, default gateway won't
- be set.
- @return (bool): the PRIMARY setting
- """
- raise NotImplementedError('PRIMARY')
-
- @property
- def onboot(self):
- """
- Retrieves whether the nic should be up at the boot time
- @return (bool) : the ONBOOT setting
- """
- raise NotImplementedError('ONBOOT')
-
- @property
- def bootProto(self):
- """
- Retrieves the boot protocol of the nic
- @return (str): the BOOTPROTO setting, valid values: dhcp and static.
- """
- raise NotImplementedError('BOOTPROTO')
-
- @property
- def ipv4_mode(self):
- """
- Retrieves the IPv4_MODE
- @return (str): the IPv4_MODE setting, valid values:
- backwards_compatible, static, dhcp, disabled, as_is
- """
- raise NotImplementedError('IPv4_MODE')
-
- @property
- def staticIpv4(self):
- """
- Retrieves the static IPv4 configuration of the nic
- @return (StaticIpv4Base list): the static ipv4 setting
- """
- raise NotImplementedError('Static IPv4')
-
- @property
- def staticIpv6(self):
- """
- Retrieves the IPv6 configuration of the nic
- @return (StaticIpv6Base list): the static ipv6 setting
- """
- raise NotImplementedError('Static Ipv6')
-
- def validate(self):
- """
- Validate the object
- For example, the staticIpv4 property is required and should not be
- empty when ipv4Mode is STATIC
- """
- raise NotImplementedError('Check constraints on properties')
-
-
-class StaticIpv4Base(object):
- """
- Define what are expected of a static IPv4 setting
- The following properties should be provided in an implementation class.
- """
-
- @property
- def ip(self):
- """
- Retrieves the Ipv4 address
- @return (str): the IPADDR setting
- """
- raise NotImplementedError('Ipv4 Address')
-
- @property
- def netmask(self):
- """
- Retrieves the Ipv4 NETMASK setting
- @return (str): the NETMASK setting
- """
- raise NotImplementedError('Ipv4 NETMASK')
-
- @property
- def gateways(self):
- """
- Retrieves the gateways on this Ipv4 subnet
- @return (str list): the GATEWAY setting
- """
- raise NotImplementedError('Ipv4 GATEWAY')
-
-
-class StaticIpv6Base(object):
- """Define what are expected of a static IPv6 setting
- The following properties should be provided in an implementation class.
- """
-
- @property
- def ip(self):
- """
- Retrieves the Ipv6 address
- @return (str): the IPv6ADDR setting
- """
- raise NotImplementedError('Ipv6 Address')
-
- @property
- def netmask(self):
- """
- Retrieves the Ipv6 NETMASK setting
- @return (str): the IPv6NETMASK setting
- """
- raise NotImplementedError('Ipv6 NETMASK')
-
- @property
- def gateway(self):
- """
- Retrieves the Ipv6 GATEWAY setting
- @return (str): the IPv6GATEWAY setting
- """
- raise NotImplementedError('Ipv6 GATEWAY')