summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@brickies.net>2016-08-23 16:48:31 -0400
committerScott Moser <smoser@brickies.net>2016-08-23 16:48:31 -0400
commita54e05069b4d7e393166c0f880bbe374c15cada4 (patch)
treeffded0c6a2c2c6936a8689fe19b2d01f82308664
parent4b09e0ff5ae61f9bbd1a66cd6feda6f65b2a5a0b (diff)
downloadcloud-init-git-ubuntu/0.6.3-0ubuntu1.5.tar.gz
Import version 0.6.3-0ubuntu1.5ubuntu/0.6.3-0ubuntu1.5
Imported using git-dsc-commit.
-rw-r--r--debian/changelog19
-rw-r--r--debian/control1
-rw-r--r--debian/patches/lp-1031065-nonet-not-start-networking.patch78
-rw-r--r--debian/patches/lp-1037567-add-config-drive-v2-support.conf1113
-rw-r--r--debian/patches/lp-1077020-fix-ca-certificates-blanklines.patch163
-rw-r--r--debian/patches/series3
-rwxr-xr-xdebian/update-grub-legacy-ec217
7 files changed, 1388 insertions, 6 deletions
diff --git a/debian/changelog b/debian/changelog
index 089c8c6b..c650a307 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,22 @@
+cloud-init (0.6.3-0ubuntu1.5) precise-proposed; urgency=low
+
+ * debian/update-grub-legacy-ec2: consider kernels bootable on ec2
+ that are named -generic, in addition to -virtual. This fixes a problem
+ where the kernels installed by linux-lts-quantal were not added to
+ /boot/grub/menu.lst (LP: #1005551)
+ * debian/patches/lp-1077020-fix-ca-certificates-blanklines.patch: fix
+ adding of empty lines in ca-certificates file (LP: #1077020)
+ * debian/patches/lp-1031065-nonet-not-start-networking.patch: do not 'start
+ networking' in cloud-init-nonet upstart job. Doing so can cause networking
+ to be started earlier than it should be. Instead, add a
+ cloud-init-container job that runs only in a container and emits
+ net-device-added (LP: #1031065).
+ * debian/patches/lp-1037567-add-config-drive-v2-support.conf:
+ backport support for config-drive-v2 which is part of Openstack Nova in
+ Folsom and later. (LP: #1037567) (LP: #1100545)
+
+ -- Scott Moser <smoser@ubuntu.com> Wed, 16 Jan 2013 19:37:57 -0500
+
cloud-init (0.6.3-0ubuntu1.4) precise-proposed; urgency=high
* lp-1100491-fix-broken-add-sources.patch: fix use of
diff --git a/debian/control b/debian/control
index 6e0320d2..fdf36565 100644
--- a/debian/control
+++ b/debian/control
@@ -17,6 +17,7 @@ Package: cloud-init
Architecture: all
Depends: cloud-utils,
ifupdown (>= 0.6.10ubuntu5),
+ mountall (>= 2.36.3),
procps,
python,
python-cheetah,
diff --git a/debian/patches/lp-1031065-nonet-not-start-networking.patch b/debian/patches/lp-1031065-nonet-not-start-networking.patch
new file mode 100644
index 00000000..ae6c845b
--- /dev/null
+++ b/debian/patches/lp-1031065-nonet-not-start-networking.patch
@@ -0,0 +1,78 @@
+Author: Scott Moser <smoser@brickies.net>
+Bug: https://launchpad.net/bugs/1031065
+Applied-Upstream: revno 647
+Description: remove 'start networking' from cloud-init-nonet
+ do not 'start networking' in cloud-init-nonet, but add
+ cloud-init-container job that runs only if in container and emits
+ net-device-added (LP: #1031065)
+--- /dev/null
++++ b/upstart/cloud-init-container.conf
+@@ -0,0 +1,57 @@
++# in a lxc container, events for network interfaces do not
++# get created or may be missed. This helps cloud-init-nonet along
++# by emitting those events if they have not been emitted.
++
++start on container
++stop on static-network-up
++task
++
++emits net-device-added
++
++console output
++
++script
++ # if we are inside a container, then we may have to emit the ifup
++ # events for 'auto' network devices.
++ set -f
++
++ # from /etc/network/if-up.d/upstart
++ MARK_DEV_PREFIX="/run/network/ifup."
++ MARK_STATIC_NETWORK_EMITTED="/run/network/static-network-up-emitted"
++ # if the all static network interfaces are already up, nothing to do
++ [ -f "$MARK_STATIC_NETWORK_EMITTED" ] && exit 0
++
++ # ifquery will exit failure if there is no /run/network directory
++ # normally that would get created by one of network-interface.conf
++ # or networking.conf. But, it is possible that we're running
++ # before either of those have.
++ mkdir -p /run/network
++
++ # get list of all 'auto' interfaces. if there are none, nothing to do.
++ auto_list=$(ifquery --list --allow auto 2>/dev/null) || :
++ [ -z "$auto_list" ] && exit 0
++ set -- ${auto_list}
++ [ "$*" = "lo" ] && exit 0
++
++ # we only want to emit for interfaces that do not exist, so filter
++ # out anything that does not exist.
++ for iface in "$@"; do
++ [ "$iface" = "lo" ] && continue
++ # skip interfaces that are already up
++ [ -f "${MARK_DEV_PREFIX}${iface}" ] && continue
++
++ if [ -d /sys/net ]; then
++ # if /sys is mounted, and there is no /sys/net/iface, then no device
++ [ -e "/sys/net/$iface" ] && continue
++ else
++ # sys wasn't mounted, so just check via 'ifconfig'
++ ifconfig "$iface" >/dev/null 2>&1 || continue
++ fi
++ initctl emit --no-wait net-device-added "INTERFACE=$iface" &&
++ emitted="$emitted $iface" ||
++ echo "warn: ${UPSTART_JOB} failed to emit net-device-added INTERFACE=$iface"
++ done
++
++ [ -z "${emitted# }" ] ||
++ echo "${UPSTART_JOB}: emitted ifup for ${emitted# }"
++end script
+--- a/upstart/cloud-init-nonet.conf
++++ b/upstart/cloud-init-nonet.conf
+@@ -18,8 +18,6 @@ script
+
+ [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0
+
+- start networking >/dev/null
+-
+ short=10; long=120;
+ sleep ${short}
+ echo $UPSTART_JOB "waiting ${long} seconds for a network device."
diff --git a/debian/patches/lp-1037567-add-config-drive-v2-support.conf b/debian/patches/lp-1037567-add-config-drive-v2-support.conf
new file mode 100644
index 00000000..93dedcb6
--- /dev/null
+++ b/debian/patches/lp-1037567-add-config-drive-v2-support.conf
@@ -0,0 +1,1113 @@
+Author: Scott Moser <smoser@brickies.net>
+Bug: https://launchpad.net/bugs/1037567
+Applied-Upstream: yes
+Description: add support for config-drive-v2
+ Openstack Nova in Folsom released with a a different format of config-drive
+ (config-drive-v2) from previous releases.
+ .
+ This pulls back cloudinit/sources/DataSourceConfigDrive.py from trunk
+ and necessary other code. It modifies that file only where necessary
+ to remain closer to trunk's version.
+ .
+ This patch also includes the trunk fix for LP: #1100545.
+--- a/cloudinit/DataSourceConfigDrive.py
++++ b/cloudinit/DataSourceConfigDrive.py
+@@ -1,6 +1,10 @@
++# vi: ts=4 expandtab
++#
+ # Copyright (C) 2012 Canonical Ltd.
++# Copyright (C) 2012 Yahoo! Inc.
+ #
+ # Author: Scott Moser <scott.moser@canonical.com>
++# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+ #
+ # This program is free software: you can redistribute it and/or modify
+ # it under the terms of the GNU General Public License version 3, as
+@@ -14,218 +18,511 @@
+ # You should have received a copy of the GNU General Public License
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+-import cloudinit.DataSource as DataSource
+-
+-from cloudinit import seeddir as base_seeddir
+-from cloudinit import log
+-import cloudinit.util as util
+-import os.path
+-import os
+ import json
++import logging
++import os
+ import subprocess
+
+-DEFAULT_IID = "iid-dsconfigdrive"
++from cloudinit import get_cpath
++from cloudinit import DataSource as sources
++from cloudinit import seeddir as base_seeddir
++from cloudinit import util
+
++LOG = logging.getLogger(__name__)
+
+-class DataSourceConfigDrive(DataSource.DataSource):
+- seed = None
+- seeddir = base_seeddir + '/config_drive'
+- cfg = {}
+- userdata_raw = None
+- metadata = None
+- dsmode = "local"
++# Various defaults/constants...
++DEFAULT_IID = "iid-dsconfigdrive"
++DEFAULT_MODE = 'pass'
++CFG_DRIVE_FILES_V1 = [
++ "etc/network/interfaces",
++ "root/.ssh/authorized_keys",
++ "meta.js",
++]
++DEFAULT_METADATA = {
++ "instance-id": DEFAULT_IID,
++}
++VALID_DSMODES = ("local", "net", "pass", "disabled")
++
++
++class DataSourceConfigDrive(sources.DataSource):
++ seed_dir = base_seeddir + '/config_drive'
++
++ def __init__(self, sys_cfg=None):
++ sources.DataSource.__init__(self, sys_cfg)
++ self.source = None
++ self.dsmode = 'local'
++ self.version = None
++ self.ec2_metadata = None
+
+ def __str__(self):
+- mstr = "DataSourceConfigDrive[%s]" % self.dsmode
+- mstr = mstr + " [seed=%s]" % self.seed
+- return(mstr)
++ mstr = "DataSourceConfigDrive [%s,ver=%s]" % (self.dsmode,
++ self.version)
++ mstr += "[source=%s]" % (self.source)
++ return mstr
++
++ def _ec2_name_to_device(self, name):
++ if not self.ec2_metadata:
++ return None
++ bdm = self.ec2_metadata.get('block-device-mapping', {})
++ for (ent_name, device) in bdm.items():
++ if name == ent_name:
++ return device
++ return None
++
++ def _os_name_to_device(self, name):
++ device = None
++ try:
++ criteria = 'LABEL=%s' % (name)
++ if name in ['swap']:
++ criteria = 'TYPE=%s' % (name)
++ dev_entries = util.find_devs_with(criteria)
++ if dev_entries:
++ device = dev_entries[0]
++ except subprocess.CalledProcessError:
++ pass
++ return device
++
++ def _validate_device_name(self, device):
++ if not device:
++ return None
++ if not device.startswith("/"):
++ device = "/dev/%s" % device
++ if os.path.exists(device):
++ return device
++ # Durn, try adjusting the mapping
++ remapped = self._remap_device(os.path.basename(device))
++ if remapped:
++ LOG.debug("Remapped device name %s => %s", device, remapped)
++ return remapped
++ return None
++
++ def device_name_to_device(self, name):
++ # Translate a 'name' to a 'physical' device
++ if not name:
++ return None
++ # Try the ec2 mapping first
++ names = [name]
++ if name == 'root':
++ names.insert(0, 'ami')
++ if name == 'ami':
++ names.append('root')
++ device = None
++ LOG.debug("Using ec2 metadata lookup to find device %s", names)
++ for n in names:
++ device = self._ec2_name_to_device(n)
++ device = self._validate_device_name(device)
++ if device:
++ break
++ # Try the openstack way second
++ if not device:
++ LOG.debug("Using os lookup to find device %s", names)
++ for n in names:
++ device = self._os_name_to_device(n)
++ device = self._validate_device_name(device)
++ if device:
++ break
++ # Ok give up...
++ if not device:
++ return None
++ else:
++ LOG.debug("Using cfg drive lookup mapped to device %s", device)
++ return device
+
+ def get_data(self):
+ found = None
+ md = {}
+- ud = ""
+-
+- defaults = {"instance-id": DEFAULT_IID, "dsmode": "pass"}
+
+- if os.path.isdir(self.seeddir):
++ results = {}
++ if os.path.isdir(self.seed_dir):
+ try:
+- (md, ud) = read_config_drive_dir(self.seeddir)
+- found = self.seeddir
+- except nonConfigDriveDir:
+- pass
+-
++ results = read_config_drive_dir(self.seed_dir)
++ found = self.seed_dir
++ except NonConfigDriveDir:
++ LOG.debug("Failed reading config drive from %s", self.seed_dir)
++ util.logexc(LOG)
+ if not found:
+- dev = cfg_drive_device()
+- if dev:
++ devlist = find_candidate_devs()
++ for dev in devlist:
+ try:
+- (md, ud) = util.mount_callback_umount(dev,
+- read_config_drive_dir)
++ results = util.mount_callback_umount(dev,
++ read_config_drive_dir)
+ found = dev
+- except (nonConfigDriveDir, util.mountFailedError):
++ break
++ except (NonConfigDriveDir, util.mountFailedError):
+ pass
++ except BrokenConfigDriveDir:
++ LOG.debug("broken config drive: %s" % dev)
++ util.logexc(LOG)
+
+ if not found:
+ return False
+
+- if 'dsconfig' in md:
+- self.cfg = md['dscfg']
++ md = results['metadata']
++ md = util.mergedict(md, DEFAULT_METADATA)
++
++ # Perform some metadata 'fixups'
++ #
++ # OpenStack uses the 'hostname' key
++ # while most of cloud-init uses the metadata
++ # 'local-hostname' key instead so if it doesn't
++ # exist we need to make sure its copied over.
++ for (tgt, src) in [('local-hostname', 'hostname')]:
++ if tgt not in md and src in md:
++ md[tgt] = md[src]
++
++ user_dsmode = results.get('dsmode', None)
++ if user_dsmode not in VALID_DSMODES + (None,):
++ LOG.warn("user specified invalid mode: %s" % user_dsmode)
++ user_dsmode = None
++
++ dsmode = get_ds_mode(cfgdrv_ver=results['cfgdrive_ver'],
++ ds_cfg=self.ds_cfg.get('dsmode'),
++ user=user_dsmode)
++
++ if dsmode == "disabled":
++ # most likely user specified
++ return False
+
+- md = util.mergedict(md, defaults)
++ # TODO(smoser): fix this, its dirty.
++ # we want to do some things (writing files and network config)
++ # only on first boot, and even then, we want to do so in the
++ # local datasource (so they happen earlier) even if the configured
++ # dsmode is 'net' or 'pass'. To do this, we check the previous
++ # instance-id
++ prev_iid = get_previous_iid()
++ cur_iid = md['instance-id']
+
+ # update interfaces and ifup only on the local datasource
+ # this way the DataSourceConfigDriveNet doesn't do it also.
+- if 'network-interfaces' in md and self.dsmode == "local":
+- if md['dsmode'] == "pass":
+- log.info("updating network interfaces from configdrive")
+- else:
+- log.debug("updating network interfaces from configdrive")
++ if 'network_config' in results and self.dsmode == "local":
++ LOG.debug("Updating network interfaces from config drive (%s)",
++ dsmode)
+
+ util.write_file("/etc/network/interfaces",
+ md['network-interfaces'])
+ try:
+ (out, err) = util.subp(['ifup', '--all'])
+ if len(out) or len(err):
+- log.warn("ifup --all had stderr: %s" % err)
++ LOG.warn("ifup --all had stderr: %s" % err)
+
+ except subprocess.CalledProcessError as exc:
+- log.warn("ifup --all failed: %s" % (exc.output[1]))
++ LOG.warn("ifup --all failed: %s" % (exc.output[1]))
+
+- self.seed = found
+- self.metadata = md
+- self.userdata_raw = ud
++ # file writing occurs in local mode (to be as early as possible)
++ if self.dsmode == "local" and prev_iid != cur_iid and results['files']:
++ LOG.debug("writing injected files")
++ try:
++ write_files(results['files'])
++ except:
++ util.logexc(LOG, "Failed writing files")
++
++ # dsmode != self.dsmode here if:
++ # * dsmode = "pass", pass means it should only copy files and then
++ # pass to another datasource
++ # * dsmode = "net" and self.dsmode = "local"
++ # so that user boothooks would be applied with network, the
++ # local datasource just gets out of the way, and lets the net claim
++ if dsmode != self.dsmode:
++ LOG.debug("%s: not claiming datasource, dsmode=%s", self, dsmode)
++ return False
+
+- if md['dsmode'] == self.dsmode:
+- return True
++ self.source = found
++ self.metadata = md
++ self.ec2_metadata = results.get('ec2-metadata')
++ self.userdata_raw = results.get('userdata')
++ self.version = results['cfgdrive_ver']
++
++ # current trunk handles userdata_raw == None in other places, mostly
++ # by converting it to a empty string. If we do not have user-data
++ # other parts of this version will have issues. So just make None
++ # into an empty string here.
++ if self.userdata_raw is None:
++ self.userdata_raw = ''
+
+- log.debug("%s: not claiming datasource, dsmode=%s" %
+- (self, md['dsmode']))
+- return False
++ return True
+
+ def get_public_ssh_keys(self):
+- if not 'public-keys' in self.metadata:
+- return([])
+- return(self.metadata['public-keys'])
+-
+- # the data sources' config_obj is a cloud-config formated
+- # object that came to it from ways other than cloud-config
+- # because cloud-config content would be handled elsewhere
+- def get_config_obj(self):
+- return(self.cfg)
++ name = "public_keys"
++ if self.version == 1:
++ name = "public-keys"
++ return sources.normalize_pubkey_data(self.metadata.get(name))
++
++ def _remap_device(self, short_name):
++ # LP: #611137
++ # the metadata service may believe that devices are named 'sda'
++ # when the kernel named them 'vda' or 'xvda'
++ # we want to return the correct value for what will actually
++ # exist in this instance
++ mappings = {"sd": ("vd", "xvd")}
++ for (nfrom, tlist) in mappings.iteritems():
++ if not short_name.startswith(nfrom):
++ continue
++ for nto in tlist:
++ cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
++ if os.path.exists(cand):
++ return cand
++ return None
+
+
+ class DataSourceConfigDriveNet(DataSourceConfigDrive):
+- dsmode = "net"
++ def __init__(self, sys_cfg=None):
++ DataSourceConfigDrive.__init__(self, sys_cfg)
++ self.dsmode = 'net'
+
+
+-class nonConfigDriveDir(Exception):
++class NonConfigDriveDir(Exception):
+ pass
+
+
+-def cfg_drive_device():
+- """ get the config drive device. return a string like '/dev/vdb'
+- or None (if there is no non-root device attached). This does not
+- check the contents, only reports that if there *were* a config_drive
+- attached, it would be this device.
+- per config_drive documentation, this is
+- "associated as the last available disk on the instance"
+- """
++class BrokenConfigDriveDir(Exception):
++ pass
+
+- if 'CLOUD_INIT_CONFIG_DRIVE_DEVICE' in os.environ:
+- return(os.environ['CLOUD_INIT_CONFIG_DRIVE_DEVICE'])
+
+- # we are looking for a raw block device (sda, not sda1) with a vfat
+- # filesystem on it.
++def find_candidate_devs():
++ """Return a list of devices that may contain the config drive.
+
+- letters = "abcdefghijklmnopqrstuvwxyz"
+- devs = util.find_devs_with("TYPE=vfat")
++ The returned list is sorted by search order where the first item has
++ should be searched first (highest priority)
+
+- # filter out anything not ending in a letter (ignore partitions)
+- devs = [f for f in devs if f[-1] in letters]
++ config drive v1:
++ Per documentation, this is "associated as the last available disk on the
++ instance", and should be VFAT.
++ Currently, we do not restrict search list to "last available disk"
++
++ config drive v2:
++ Disk should be:
++ * either vfat or iso9660 formated
++ * labeled with 'config-2'
++ """
+
+- # sort them in reverse so "last" device is first
+- devs.sort(reverse=True)
++ by_fstype = (util.find_devs_with("TYPE=vfat") +
++ util.find_devs_with("TYPE=iso9660"))
++ by_label = util.find_devs_with("LABEL=config-2")
++
++ # give preference to "last available disk" (vdb over vda)
++ # note, this is not a perfect rendition of that.
++ by_fstype.sort(reverse=True)
++ by_label.sort(reverse=True)
++
++ # combine list of items by putting by-label items first
++ # followed by fstype items, but with dupes removed
++ combined = (by_label + [d for d in by_fstype if d not in by_label])
+
+- if len(devs):
+- return(devs[0])
++ # We are looking for block device (sda, not sda1), ignore partitions
++ combined = [d for d in combined if not util.is_partition(d)]
+
+- return(None)
++ return combined
+
+
+ def read_config_drive_dir(source_dir):
++ last_e = NonConfigDriveDir("Not found")
++ for finder in (read_config_drive_dir_v2, read_config_drive_dir_v1):
++ try:
++ data = finder(source_dir)
++ return data
++ except NonConfigDriveDir as exc:
++ last_e = exc
++ raise last_e
++
++
++def read_config_drive_dir_v2(source_dir, version="2012-08-10"):
++
++ if (not os.path.isdir(os.path.join(source_dir, "openstack", version)) and
++ os.path.isdir(os.path.join(source_dir, "openstack", "latest"))):
++ LOG.warn("version '%s' not available, attempting to use 'latest'" %
++ version)
++ version = "latest"
++
++ datafiles = (
++ ('metadata',
++ "openstack/%s/meta_data.json" % version, True, json.loads),
++ ('userdata', "openstack/%s/user_data" % version, False, None),
++ ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads),
++ )
++
++ results = {'userdata': None}
++ for (name, path, required, process) in datafiles:
++ fpath = os.path.join(source_dir, path)
++ data = None
++ found = False
++ if os.path.isfile(fpath):
++ try:
++ data = util.load_file(fpath)
++ except IOError:
++ raise BrokenConfigDriveDir("Failed to read: %s" % fpath)
++ found = True
++ elif required:
++ raise NonConfigDriveDir("Missing mandatory path: %s" % fpath)
++
++ if found and process:
++ try:
++ data = process(data)
++ except Exception as exc:
++ raise BrokenConfigDriveDir(("Failed to process "
++ "path: %s") % fpath)
++
++ if found:
++ results[name] = data
++
++ # instance-id is 'uuid' for openstack. just copy it to instance-id.
++ if 'instance-id' not in results['metadata']:
++ try:
++ results['metadata']['instance-id'] = results['metadata']['uuid']
++ except KeyError:
++ raise BrokenConfigDriveDir("No uuid entry in metadata")
++
++ def read_content_path(item):
++ # do not use os.path.join here, as content_path starts with /
++ cpath = os.path.sep.join((source_dir, "openstack",
++ "./%s" % item['content_path']))
++ return util.load_file(cpath)
++
++ files = {}
++ try:
++ for item in results['metadata'].get('files', {}):
++ files[item['path']] = read_content_path(item)
++
++ # the 'network_config' item in metadata is a content pointer
++ # to the network config that should be applied.
++ # in folsom, it is just a '/etc/network/interfaces' file.
++ item = results['metadata'].get("network_config", None)
++ if item:
++ results['network_config'] = read_content_path(item)
++ except Exception as exc:
++ raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc))
++
++ # to openstack, user can specify meta ('nova boot --meta=key=value') and
++ # those will appear under metadata['meta'].
++ # if they specify 'dsmode' they're indicating the mode that they intend
++ # for this datasource to operate in.
++ try:
++ results['dsmode'] = results['metadata']['meta']['dsmode']
++ except KeyError:
++ pass
++
++ results['files'] = files
++ results['cfgdrive_ver'] = 2
++ return results
++
++
++def read_config_drive_dir_v1(source_dir):
+ """
+- read_config_drive_dir(source_dir):
+- read source_dir, and return a tuple with metadata dict and user-data
+- string populated. If not a valid dir, raise a nonConfigDriveDir
++ read source_dir, and return a tuple with metadata dict, user-data,
++ files and version (1). If not a valid dir, raise a NonConfigDriveDir
+ """
+- md = {}
+- ud = ""
+
+- flist = ("etc/network/interfaces", "root/.ssh/authorized_keys", "meta.js")
+- found = [f for f in flist if os.path.isfile("%s/%s" % (source_dir, f))]
+- keydata = ""
++ found = {}
++ for af in CFG_DRIVE_FILES_V1:
++ fn = os.path.join(source_dir, af)
++ if os.path.isfile(fn):
++ found[af] = fn
+
+ if len(found) == 0:
+- raise nonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
++ raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
+
++ md = {}
++ keydata = ""
+ if "etc/network/interfaces" in found:
+- with open("%s/%s" % (source_dir, "/etc/network/interfaces")) as fp:
+- md['network-interfaces'] = fp.read()
++ fn = found["etc/network/interfaces"]
++ md['network_config'] = util.load_file(fn)
+
+ if "root/.ssh/authorized_keys" in found:
+- with open("%s/%s" % (source_dir, "root/.ssh/authorized_keys")) as fp:
+- keydata = fp.read()
++ fn = found["root/.ssh/authorized_keys"]
++ keydata = util.load_file(fn)
+
+ meta_js = {}
+-
+ if "meta.js" in found:
+- content = ''
+- with open("%s/%s" % (source_dir, "meta.js")) as fp:
+- content = fp.read()
+- md['meta_js'] = content
++ fn = found['meta.js']
++ content = util.load_file(fn)
+ try:
++ # Just check if its really json...
+ meta_js = json.loads(content)
+- except ValueError:
+- raise nonConfigDriveDir("%s: %s" %
+- (source_dir, "invalid json in meta.js"))
++ if not isinstance(meta_js, (dict)):
++ raise TypeError("Dict expected for meta.js root node")
++ except (ValueError, TypeError) as e:
++ raise NonConfigDriveDir("%s: %s, %s" %
++ (source_dir, "invalid json in meta.js", e))
++ md['meta_js'] = content
+
++ # keydata in meta_js is preferred over "injected"
+ keydata = meta_js.get('public-keys', keydata)
+-
+ if keydata:
+ lines = keydata.splitlines()
+ md['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+- for copy in ('dsmode', 'instance-id', 'dscfg'):
+- if copy in meta_js:
+- md[copy] = meta_js[copy]
++ # config-drive-v1 has no way for openstack to provide the instance-id
++ # so we copy that into metadata from the user input
++ if 'instance-id' in meta_js:
++ md['instance-id'] = meta_js['instance-id']
+
+- if 'user-data' in meta_js:
+- ud = meta_js['user-data']
++ results = {'cfgdrive_ver': 1, 'metadata': md}
+
+- return(md, ud)
++ # allow the user to specify 'dsmode' in a meta tag
++ if 'dsmode' in meta_js:
++ results['dsmode'] = meta_js['dsmode']
+
+-datasources = (
+- (DataSourceConfigDrive, (DataSource.DEP_FILESYSTEM, )),
+- (DataSourceConfigDriveNet,
+- (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+-)
++ # config-drive-v1 has no way of specifying user-data, so the user has
++ # to cheat and stuff it in a meta tag also.
++ results['userdata'] = meta_js.get('user-data')
+
++ # this implementation does not support files
++ # (other than network/interfaces and authorized_keys)
++ results['files'] = []
+
+-# return a list of data sources that match this set of dependencies
+-def get_datasource_list(depends):
+- return(DataSource.list_from_depends(depends, datasources))
++ return results
+
+-if __name__ == "__main__":
+- def main():
+- import sys
+- import pprint
+- print cfg_drive_device()
+- (md, ud) = read_config_drive_dir(sys.argv[1])
+- print "=== md ==="
+- pprint.pprint(md)
+- print "=== ud ==="
+- print(ud)
+
+- main()
++def get_ds_mode(cfgdrv_ver, ds_cfg=None, user=None):
++ """Determine what mode should be used.
++ valid values are 'pass', 'disabled', 'local', 'net'
++ """
++ # user passed data trumps everything
++ if user is not None:
++ return user
++
++ if ds_cfg is not None:
++ return ds_cfg
++
++ # at config-drive version 1, the default behavior was pass. That
++ # meant to not use use it as primary data source, but expect a ec2 metadata
++ # source. for version 2, we default to 'net', which means
++ # the DataSourceConfigDriveNet, would be used.
++ #
++ # this could change in the future. If there was definitive metadata
++ # that indicated presense of an openstack metadata service, then
++ # we could change to 'pass' by default also. The motivation for that
++ # would be 'cloud-init query' as the web service could be more dynamic
++ if cfgdrv_ver == 1:
++ return "pass"
++ return "net"
++
++
++def get_previous_iid():
++ # interestingly, for this purpose the "previous" instance-id is the current
++ # instance-id. cloud-init hasn't moved them over yet as this datasource
++ # hasn't declared itself found.
++ fname = os.path.join(get_cpath('data'), 'instance-id')
++ try:
++ return util.load_file(fname)
++ except IOError:
++ return None
++
++
++def write_files(files):
++ for (name, content) in files.iteritems():
++ if name[0] != os.sep:
++ name = os.sep + name
++ util.write_file(name, content, mode=0660)
++
++
++# Used to match classes to dependencies
++datasources = [
++ (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
++ (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
++]
+
+-# vi: ts=4 expandtab
++
++# Return a list of data sources that match this set of dependencies
++def get_datasource_list(depends):
++ return sources.list_from_depends(depends, datasources)
+--- /dev/null
++++ b/tests/unittests/test_datasource/test_configdrive.py
+@@ -0,0 +1,325 @@
++from copy import copy
++import json
++import os
++import os.path
++
++import mocker
++from mocker import MockerTestCase
++
++from cloudinit import cfg_builtin
++from cloudinit import DataSourceConfigDrive as ds
++from cloudinit import util
++
++
++PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
++EC2_META = {
++ 'ami-id': 'ami-00000001',
++ 'ami-launch-index': 0,
++ 'ami-manifest-path': 'FIXME',
++ 'block-device-mapping': {
++ 'ami': 'sda1',
++ 'ephemeral0': 'sda2',
++ 'root': '/dev/sda1',
++ 'swap': 'sda3'},
++ 'hostname': 'sm-foo-test.novalocal',
++ 'instance-action': 'none',
++ 'instance-id': 'i-00000001',
++ 'instance-type': 'm1.tiny',
++ 'local-hostname': 'sm-foo-test.novalocal',
++ 'local-ipv4': None,
++ 'placement': {'availability-zone': 'nova'},
++ 'public-hostname': 'sm-foo-test.novalocal',
++ 'public-ipv4': '',
++ 'public-keys': {'0': {'openssh-key': PUBKEY}},
++ 'reservation-id': 'r-iru5qm4m',
++ 'security-groups': ['default']
++}
++USER_DATA = '#!/bin/sh\necho This is user data\n'
++OSTACK_META = {
++ 'availability_zone': 'nova',
++ 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
++ {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
++ 'hostname': 'sm-foo-test.novalocal',
++ 'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
++ 'name': 'sm-foo-test',
++ 'public_keys': {'mykey': PUBKEY},
++ 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
++
++CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
++CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
++
++CFG_DRIVE_FILES_V2 = {
++ 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
++ 'ec2/2009-04-04/user-data': USER_DATA,
++ 'ec2/latest/meta-data.json': json.dumps(EC2_META),
++ 'ec2/latest/user-data': USER_DATA,
++ 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
++ 'openstack/2012-08-10/user_data': USER_DATA,
++ 'openstack/content/0000': CONTENT_0,
++ 'openstack/content/0001': CONTENT_1,
++ 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
++ 'openstack/latest/user_data': USER_DATA}
++
++
++class TestConfigDriveDataSource(MockerTestCase):
++
++ def setUp(self):
++ super(TestConfigDriveDataSource, self).setUp()
++ self.tmp = self.makeDir()
++
++ def test_ec2_metadata(self):
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++ found = ds.read_config_drive_dir(self.tmp)
++ self.assertTrue('ec2-metadata' in found)
++ ec2_md = found['ec2-metadata']
++ self.assertEqual(EC2_META, ec2_md)
++
++ def test_dev_os_remap(self):
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++ cfg_ds = ds.DataSourceConfigDrive()
++ found = ds.read_config_drive_dir(self.tmp)
++ cfg_ds.metadata = found['metadata']
++ name_tests = {
++ 'ami': '/dev/vda1',
++ 'root': '/dev/vda1',
++ 'ephemeral0': '/dev/vda2',
++ 'swap': '/dev/vda3',
++ }
++ for name, dev_name in name_tests.items():
++ my_mock = mocker.Mocker()
++ find_mock = my_mock.replace(util.find_devs_with,
++ spec=False, passthrough=False)
++ provided_name = dev_name[len('/dev/'):]
++ provided_name = "s" + provided_name[1:]
++ find_mock(mocker.ARGS)
++ my_mock.result([provided_name])
++ exists_mock = my_mock.replace(os.path.exists,
++ spec=False, passthrough=False)
++ exists_mock(mocker.ARGS)
++ my_mock.result(False)
++ exists_mock(mocker.ARGS)
++ my_mock.result(True)
++ my_mock.replay()
++ device = cfg_ds.device_name_to_device(name)
++ my_mock.restore()
++ self.assertEquals(dev_name, device)
++
++ def test_dev_os_map(self):
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++ cfg_ds = ds.DataSourceConfigDrive()
++ found = ds.read_config_drive_dir(self.tmp)
++ os_md = found['metadata']
++ cfg_ds.metadata = os_md
++ name_tests = {
++ 'ami': '/dev/vda1',
++ 'root': '/dev/vda1',
++ 'ephemeral0': '/dev/vda2',
++ 'swap': '/dev/vda3',
++ }
++ for name, dev_name in name_tests.items():
++ my_mock = mocker.Mocker()
++ find_mock = my_mock.replace(util.find_devs_with,
++ spec=False, passthrough=False)
++ find_mock(mocker.ARGS)
++ my_mock.result([dev_name])
++ exists_mock = my_mock.replace(os.path.exists,
++ spec=False, passthrough=False)
++ exists_mock(mocker.ARGS)
++ my_mock.result(True)
++ my_mock.replay()
++ device = cfg_ds.device_name_to_device(name)
++ my_mock.restore()
++ self.assertEquals(dev_name, device)
++
++ def test_dev_ec2_remap(self):
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++ cfg_ds = ds.DataSourceConfigDrive()
++ found = ds.read_config_drive_dir(self.tmp)
++ ec2_md = found['ec2-metadata']
++ os_md = found['metadata']
++ cfg_ds.ec2_metadata = ec2_md
++ cfg_ds.metadata = os_md
++ name_tests = {
++ 'ami': '/dev/vda1',
++ 'root': '/dev/vda1',
++ 'ephemeral0': '/dev/vda2',
++ 'swap': '/dev/vda3',
++ None: None,
++ 'bob': None,
++ 'root2k': None,
++ }
++ for name, dev_name in name_tests.items():
++ my_mock = mocker.Mocker()
++ exists_mock = my_mock.replace(os.path.exists,
++ spec=False, passthrough=False)
++ exists_mock(mocker.ARGS)
++ my_mock.result(False)
++ exists_mock(mocker.ARGS)
++ my_mock.result(True)
++ my_mock.replay()
++ device = cfg_ds.device_name_to_device(name)
++ self.assertEquals(dev_name, device)
++ my_mock.restore()
++
++ def test_dev_ec2_map(self):
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++ cfg_ds = ds.DataSourceConfigDrive()
++ found = ds.read_config_drive_dir(self.tmp)
++ exists_mock = self.mocker.replace(os.path.exists,
++ spec=False, passthrough=False)
++ exists_mock(mocker.ARGS)
++ self.mocker.count(0, None)
++ self.mocker.result(True)
++ self.mocker.replay()
++ ec2_md = found['ec2-metadata']
++ os_md = found['metadata']
++ cfg_ds.ec2_metadata = ec2_md
++ cfg_ds.metadata = os_md
++ name_tests = {
++ 'ami': '/dev/sda1',
++ 'root': '/dev/sda1',
++ 'ephemeral0': '/dev/sda2',
++ 'swap': '/dev/sda3',
++ None: None,
++ 'bob': None,
++ 'root2k': None,
++ }
++ for name, dev_name in name_tests.items():
++ device = cfg_ds.device_name_to_device(name)
++ self.assertEquals(dev_name, device)
++
++ def test_dir_valid(self):
++ """Verify a dir is read as such."""
++
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++
++ found = ds.read_config_drive_dir(self.tmp)
++
++ expected_md = copy(OSTACK_META)
++ expected_md['instance-id'] = expected_md['uuid']
++
++ self.assertEqual(USER_DATA, found['userdata'])
++ self.assertEqual(expected_md, found['metadata'])
++ self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0)
++ self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1)
++
++ def test_seed_dir_valid_extra(self):
++ """Verify extra files do not affect datasource validity."""
++
++ data = copy(CFG_DRIVE_FILES_V2)
++ data["myfoofile.txt"] = "myfoocontent"
++ data["openstack/latest/random-file.txt"] = "random-content"
++
++ populate_dir(self.tmp, data)
++
++ found = ds.read_config_drive_dir(self.tmp)
++
++ expected_md = copy(OSTACK_META)
++ expected_md['instance-id'] = expected_md['uuid']
++
++ self.assertEqual(expected_md, found['metadata'])
++
++ def test_seed_dir_bad_json_metadata(self):
++ """Verify that bad json in metadata raises BrokenConfigDriveDir."""
++ data = copy(CFG_DRIVE_FILES_V2)
++
++ data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}"
++ data["openstack/latest/meta_data.json"] = "non-json garbage {}"
++
++ populate_dir(self.tmp, data)
++
++ self.assertRaises(ds.BrokenConfigDriveDir,
++ ds.read_config_drive_dir, self.tmp)
++
++ def test_seed_dir_no_configdrive(self):
++ """Verify that no metadata raises NonConfigDriveDir."""
++
++ my_d = os.path.join(self.tmp, "non-configdrive")
++ data = copy(CFG_DRIVE_FILES_V2)
++ data["myfoofile.txt"] = "myfoocontent"
++ data["openstack/latest/random-file.txt"] = "random-content"
++ data["content/foo"] = "foocontent"
++
++ self.assertRaises(ds.NonConfigDriveDir,
++ ds.read_config_drive_dir, my_d)
++
++ def test_seed_dir_missing(self):
++ """Verify that missing seed_dir raises NonConfigDriveDir."""
++ my_d = os.path.join(self.tmp, "nonexistantdirectory")
++ self.assertRaises(ds.NonConfigDriveDir,
++ ds.read_config_drive_dir, my_d)
++
++ def test_find_candidates(self):
++ devs_with_answers = {}
++
++ def my_devs_with(criteria):
++ return devs_with_answers[criteria]
++
++ def my_is_partition(dev):
++ return dev[-1] in "0123456789" and not dev.startswith("sr")
++
++ try:
++ orig_find_devs_with = util.find_devs_with
++ util.find_devs_with = my_devs_with
++
++ orig_is_partition = util.is_partition
++ util.is_partition = my_is_partition
++
++ devs_with_answers = {"TYPE=vfat": [],
++ "TYPE=iso9660": ["/dev/vdb"],
++ "LABEL=config-2": ["/dev/vdb"],
++ }
++ self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
++
++ # add a vfat item
++ # zdd reverse sorts after vdb, but config-2 label is preferred
++ devs_with_answers['TYPE=vfat'] = ["/dev/zdd"]
++ self.assertEqual(["/dev/vdb", "/dev/zdd"],
++ ds.find_candidate_devs())
++
++ # verify that partitions are not considered
++ devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
++ "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
++ self.assertEqual([], ds.find_candidate_devs())
++
++ finally:
++ util.find_devs_with = orig_find_devs_with
++ util.is_partition = orig_is_partition
++
++ def test_pubkeys_v2(self):
++ """Verify that public-keys work in config-drive-v2."""
++ populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
++ myds = cfg_ds_from_dir(self.tmp)
++ self.assertEqual(myds.get_public_ssh_keys(),
++ [OSTACK_META['public_keys']['mykey']])
++
++
++def cfg_ds_from_dir(seed_d):
++ found = ds.read_config_drive_dir(seed_d)
++ cfg_ds = ds.DataSourceConfigDrive()
++ populate_ds_from_read_config(cfg_ds, seed_d, found)
++ return cfg_ds
++
++
++def populate_ds_from_read_config(cfg_ds, source, results):
++ """Patch the DataSourceConfigDrive from the results of
++ read_config_drive_dir hopefully in line with what it would have
++ if cfg_ds.get_data had been successfully called"""
++ cfg_ds.source = source
++ cfg_ds.metadata = results.get('metadata')
++ cfg_ds.ec2_metadata = results.get('ec2-metadata')
++ cfg_ds.userdata_raw = results.get('userdata')
++ cfg_ds.version = results.get('cfgdrive_ver')
++
++
++def populate_dir(seed_dir, files):
++ for (name, content) in files.iteritems():
++ path = os.path.join(seed_dir, name)
++ dirname = os.path.dirname(path)
++ if not os.path.isdir(dirname):
++ os.makedirs(dirname)
++ with open(path, "w") as fp:
++ fp.write(content)
++ fp.close()
++
++# vi: ts=4 expandtab
+--- a/cloudinit/DataSource.py
++++ b/cloudinit/DataSource.py
+@@ -215,3 +215,32 @@ def is_ipv4(instr):
+ return False
+
+ return (len(toks) == 4)
++
++
++def normalize_pubkey_data(pubkey_data):
++ keys = []
++
++ if not pubkey_data:
++ return keys
++
++ if isinstance(pubkey_data, (basestring, str)):
++ return str(pubkey_data).splitlines()
++
++ if isinstance(pubkey_data, (list, set)):
++ return list(pubkey_data)
++
++ if isinstance(pubkey_data, (dict)):
++ for (_keyname, klist) in pubkey_data.iteritems():
++ # lp:506332 uec metadata service responds with
++ # data that makes boto populate a string for 'klist' rather
++ # than a list.
++ if isinstance(klist, (str, basestring)):
++ klist = [klist]
++ if isinstance(klist, (list, set)):
++ for pkey in klist:
++ # There is an empty string at
++ # the end of the keylist, trim it
++ if pkey:
++ keys.append(pkey)
++
++ return keys
+--- a/cloudinit/util.py
++++ b/cloudinit/util.py
+@@ -38,6 +38,7 @@ import tempfile
+ import traceback
+ import urlparse
+ from StringIO import StringIO
++import types
+
+ try:
+ import selinux
+@@ -718,7 +719,8 @@ def close_stdin():
+ os.dup2(fp.fileno(), sys.stdin.fileno())
+
+
+-def find_devs_with(criteria):
++def find_devs_with(criteria=None, oformat='device',
++ tag=None, no_cache=False, path=None):
+ """
+ find devices matching given criteria (via blkid)
+ criteria can be *one* of:
+@@ -726,11 +728,46 @@ def find_devs_with(criteria):
+ LABEL=<label>
+ UUID=<uuid>
+ """
+- try:
+- (out, _err) = subp(['blkid', '-t%s' % criteria, '-odevice'])
+- except subprocess.CalledProcessError:
+- return([])
+- return(str(out).splitlines())
++ blk_id_cmd = ['blkid']
++ options = []
++ if criteria:
++ # Search for block devices with tokens named NAME that
++ # have the value 'value' and display any devices which are found.
++ # Common values for NAME include TYPE, LABEL, and UUID.
++ # If there are no devices specified on the command line,
++ # all block devices will be searched; otherwise,
++ # only search the devices specified by the user.
++ options.append("-t%s" % (criteria))
++ if tag:
++ # For each (specified) device, show only the tags that match tag.
++ options.append("-s%s" % (tag))
++ if no_cache:
++ # If you want to start with a clean cache
++ # (i.e. don't report devices previously scanned
++ # but not necessarily available at this time), specify /dev/null.
++ options.extend(["-c", "/dev/null"])
++ if oformat:
++ # Display blkid's output using the specified format.
++ # The format parameter may be:
++ # full, value, list, device, udev, export
++ options.append('-o%s' % (oformat))
++ if path:
++ options.append(path)
++ cmd = blk_id_cmd + options
++ try:
++ (out, _err) = subp(cmd)
++ except subprocess.CalledProcessError as e:
++ if e.returncode == 2:
++ # See man blkid (no matching devices were found)
++ return []
++ else:
++ raise
++ entries = []
++ for line in str(out).splitlines():
++ line = line.strip()
++ if line:
++ entries.append(line)
++ return entries
+
+
+ class mountFailedError(Exception):
+@@ -938,3 +975,10 @@ def pipe_in_out(in_fh, out_fh, chunk_siz
+ chunk_cb(bytes_piped)
+ out_fh.flush()
+ return bytes_piped
++
++def is_partition(device):
++ if device.startswith("/dev/"):
++ device = device[5:]
++
++ return os.path.isfile("/sys/class/block/%s/partition" % device)
++
diff --git a/debian/patches/lp-1077020-fix-ca-certificates-blanklines.patch b/debian/patches/lp-1077020-fix-ca-certificates-blanklines.patch
new file mode 100644
index 00000000..3ee2bd7e
--- /dev/null
+++ b/debian/patches/lp-1077020-fix-ca-certificates-blanklines.patch
@@ -0,0 +1,163 @@
+Author: Scott Moser <smoser@brickies.net>
+Bug: https://launchpad.net/bugs/1077020
+Applied-Upstream: revno 744
+Description: make sure no blank lines before cloud-init entry in ca-certificates.conf
+ when /etc/ca-certificates.conf is read by update-ca-certificates
+ lines after a blank line get ignored. Here, ensure that
+ there are no blank lines, and no duplicate entries for cloud-init are
+ added.
+--- a/cloudinit/CloudConfig/cc_ca_certs.py
++++ b/cloudinit/CloudConfig/cc_ca_certs.py
+@@ -16,7 +16,7 @@
+ import os
+ from subprocess import check_call
+ from cloudinit.util import (write_file, get_cfg_option_list_or_str,
+- delete_dir_contents, subp)
++ delete_dir_contents, subp, load_file)
+
+ CA_CERT_PATH = "/usr/share/ca-certificates/"
+ CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
+@@ -42,8 +42,14 @@ def add_ca_certs(certs):
+ cert_file_contents = "\n".join(certs)
+ cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
+ write_file(cert_file_fullpath, cert_file_contents, mode=0644)
+- # Append cert filename to CA_CERT_CONFIG file.
+- write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="a")
++
++ # We have to strip the content because blank lines in the file
++ # causes subsequent entries to be ignored. (LP: #1077020)
++ orig = load_file(CA_CERT_CONFIG)
++ cur_cont = '\n'.join([l for l in orig.splitlines()
++ if l != CA_CERT_FILENAME])
++ out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME)
++ write_file(CA_CERT_CONFIG, out, omode="wb")
+
+
+ def remove_default_ca_certs():
+--- a/cloudinit/util.py
++++ b/cloudinit/util.py
+@@ -37,6 +37,7 @@ import time
+ import tempfile
+ import traceback
+ import urlparse
++from StringIO import StringIO
+
+ try:
+ import selinux
+@@ -906,3 +907,34 @@ def keyval_str_to_dict(kvstring):
+ ret[key] = val
+
+ return(ret)
++
++
++def load_file(fname, read_cb=None, quiet=False):
++ LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
++ ofh = StringIO()
++ try:
++ with open(fname, 'rb') as ifh:
++ pipe_in_out(ifh, ofh, chunk_cb=read_cb)
++ except IOError as e:
++ if not quiet:
++ raise
++ if e.errno != errno.ENOENT:
++ raise
++ contents = ofh.getvalue()
++ LOG.debug("Read %s bytes from %s", len(contents), fname)
++ return contents
++
++
++def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
++ bytes_piped = 0
++ while True:
++ data = in_fh.read(chunk_size)
++ if data == '':
++ break
++ else:
++ out_fh.write(data)
++ bytes_piped += len(data)
++ if chunk_cb:
++ chunk_cb(bytes_piped)
++ out_fh.flush()
++ return bytes_piped
+--- a/tests/unittests/test_handler/test_handler_ca_certs.py
++++ b/tests/unittests/test_handler/test_handler_ca_certs.py
+@@ -1,6 +1,6 @@
+ from mocker import MockerTestCase
+
+-from cloudinit.util import write_file, delete_dir_contents
++from cloudinit.util import write_file, load_file, delete_dir_contents
+ from cloudinit.CloudConfig.cc_ca_certs import (
+ handle, update_ca_certs, add_ca_certs, remove_default_ca_certs)
+ from logging import getLogger
+@@ -126,15 +126,47 @@ class TestAddCaCerts(MockerTestCase):
+
+ add_ca_certs([])
+
+- def test_single_cert(self):
+- """Test adding a single certificate to the trusted CAs"""
++ def test_single_cert_trailing_cr(self):
++ """Test adding a single certificate to the trusted CAs
++ when existing ca-certificates has trailing newline"""
+ cert = "CERT1\nLINE2\nLINE3"
+
++ ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n"
++ expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n"
++
++ mock_write = self.mocker.replace(write_file, passthrough=False)
++ mock_load = self.mocker.replace(load_file, passthrough=False)
++
++ mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
++ cert, mode=0644)
++
++ mock_load("/etc/ca-certificates.conf")
++ self.mocker.result(ca_certs_content)
++
++ mock_write("/etc/ca-certificates.conf", expected, omode="wb")
++ self.mocker.replay()
++
++ add_ca_certs([cert])
++
++ def test_single_cert_no_trailing_cr(self):
++ """Test adding a single certificate to the trusted CAs
++ when existing ca-certificates has no trailing newline"""
++ cert = "CERT1\nLINE2\nLINE3"
++
++ ca_certs_content = "line1\nline2\nline3"
++
+ mock_write = self.mocker.replace(write_file, passthrough=False)
++ mock_load = self.mocker.replace(load_file, passthrough=False)
++
+ mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
+ cert, mode=0644)
++
++ mock_load("/etc/ca-certificates.conf")
++ self.mocker.result(ca_certs_content)
++
+ mock_write("/etc/ca-certificates.conf",
+- "\ncloud-init-ca-certs.crt", omode="a")
++ "%s\n%s\n" % (ca_certs_content, "cloud-init-ca-certs.crt"),
++ omode="wb")
+ self.mocker.replay()
+
+ add_ca_certs([cert])
+@@ -145,10 +177,18 @@ class TestAddCaCerts(MockerTestCase):
+ expected_cert_file = "\n".join(certs)
+
+ mock_write = self.mocker.replace(write_file, passthrough=False)
++ mock_load = self.mocker.replace(load_file, passthrough=False)
++
+ mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
+ expected_cert_file, mode=0644)
+- mock_write("/etc/ca-certificates.conf",
+- "\ncloud-init-ca-certs.crt", omode="a")
++
++ ca_certs_content = "line1\nline2\nline3"
++ mock_load("/etc/ca-certificates.conf")
++ self.mocker.result(ca_certs_content)
++
++ out = "%s\n%s\n" % (ca_certs_content, "cloud-init-ca-certs.crt")
++ mock_write("/etc/ca-certificates.conf", out, omode="wb")
++
+ self.mocker.replay()
+
+ add_ca_certs(certs)
diff --git a/debian/patches/series b/debian/patches/series
index 59a12729..e72d8134 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -7,3 +7,6 @@ lp-1018554-shutdown-message-to-console.patch
lp-1073077-zsh-workaround-for-locale_warn.patch
rework-mirror-selection.patch
lp-1100491-fix-broken-add-sources.patch
+lp-1077020-fix-ca-certificates-blanklines.patch
+lp-1031065-nonet-not-start-networking.patch
+lp-1037567-add-config-drive-v2-support.conf
diff --git a/debian/update-grub-legacy-ec2 b/debian/update-grub-legacy-ec2
index f9d327c8..19648f6d 100755
--- a/debian/update-grub-legacy-ec2
+++ b/debian/update-grub-legacy-ec2
@@ -1399,14 +1399,19 @@ fi
if ! type is_xen_kernel >/dev/null 2>&1; then
is_xen_kernel() {
- case "${1}" in
+ # input is like /boot/vmlinuz-2.6.35-13-virtual
+ # get the version string out of it.
+ local ver_flavor="";
+ ver_flavor="${1##*vmlinuz-}"
+
+ case "${ver_flavor}" in
*-ec2) return 0;;
*-virtual)
- # input is like /boot/vmlinuz-2.6.35-13-virtual
- # get the version string out of it.
- local ver=""
- ver=${1##*/}; ver=${ver#vmlinuz-}; ver=${ver%-virtual};
- dpkg --compare-versions ${ver} gt 2.6.35-13 && return 0
+ # 10.04 LTS through 12.04 LTS -virtual is the EC2/Xen kernel
+ dpkg --compare-versions ${ver_flavor%-virtual} gt 2.6.35-13 && return 0;;
+ *-generic)
+ # Starting with 12.10, -virtual was merged into -generic
+ dpkg --compare-versions ${ver_flavor%-generic} ge 3.4.0-3 && return 0;;
esac
return 1;
}