summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@brickies.net>2016-08-23 16:48:30 -0400
committerScott Moser <smoser@brickies.net>2016-08-23 16:48:30 -0400
commit75385015039b092ce74cd99a061f6e77716c487a (patch)
tree56b23a71611fc805bc8c8683060472dd9708d510
parent731b57b91cb202f07b236d6ed4041c43c2511fc5 (diff)
downloadcloud-init-git-ubuntu/0.6.3-0ubuntu1.2.tar.gz
Import version 0.6.3-0ubuntu1.2ubuntu/0.6.3-0ubuntu1.2
Imported using git-dsc-commit.
-rw-r--r--debian/changelog25
-rw-r--r--debian/cloud-init.preinst5
-rw-r--r--debian/patches/lp-1018554-shutdown-message-to-console.patch28
-rw-r--r--debian/patches/lp-1066115-landscape-install-fix-perms.patch34
-rw-r--r--debian/patches/lp-1070345-landscape-restart-after-change.patch17
-rw-r--r--debian/patches/lp-1073077-zsh-workaround-for-locale_warn.patch45
-rw-r--r--debian/patches/lp-974509-detect-dns-server-redirection.patch152
-rw-r--r--debian/patches/lp-978127-maas-oauth-fix-bad-clock.patch165
-rw-r--r--debian/patches/rework-mirror-selection.patch520
-rw-r--r--debian/patches/series7
10 files changed, 998 insertions, 0 deletions
diff --git a/debian/changelog b/debian/changelog
index 324af417..2186d270 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,28 @@
+cloud-init (0.6.3-0ubuntu1.2) precise-proposed; urgency=low
+
+ * debian/patches/lp-978127-maas-oauth-fix-bad-clock.patch: fix usage of
+ oauth in maas data source if local system has a bad clock (LP: #978127)
+ * debian/cloud-init.preinst: fix bug where user data scripts re-ran on
+ upgrade from 10.04 versions (LP: #1049146)
+ * debian/patches/lp-974509-detect-dns-server-redirection.patch: detect dns
+ server redirection and disable searching dns for a mirror named
+ 'ubuntu-mirror' (LP: #974509)
+ * debian/patches/lp-1018554-shutdown-message-to-console.patch: write a
+ message to the console on system shutdown. (LP: #1018554)
+ * debian/patches/lp-1066115-landscape-install-fix-perms.patch: install
+ landscape package if needed which will ensure proper permissions on config
+ file (LP: #1066115).
+ * debian/patches/lp-1070345-landscape-restart-after-change.patch: restart
+ landscape after modifying config (LP: #1070345)
+ * debian/patches/lp-1073077-zsh-workaround-for-locale_warn.patch: avoid
+ warning when user's shell is zsh (LP: #1073077)
+ * debian/patches/rework-mirror-selection.patch: improve mirror selection by:
+ * allowing region/availability-zone to be part of mirror (LP: #1037727)
+ * making mirror selection arch aware (LP: #1028501)
+ * allow specification of a security mirror (LP: #1006963)
+
+ -- Scott Moser <smoser@ubuntu.com> Mon, 12 Nov 2012 17:01:54 -0500
+
cloud-init (0.6.3-0ubuntu1.1) precise-proposed; urgency=low
[Thomas Hervé]
diff --git a/debian/cloud-init.preinst b/debian/cloud-init.preinst
index ac04f29d..42e84f7a 100644
--- a/debian/cloud-init.preinst
+++ b/debian/cloud-init.preinst
@@ -155,6 +155,11 @@ case "$1" in
convert_varlib_05x_06x
fi
+ # 0.6.0 changed 'user-scripts' to config-scripts-user (LP: #1049146)
+ if [ -e /var/lib/cloud/instance/sem/user-scripts ]; then
+ ln -sf user-scripts /var/lib/cloud/instance/sem/config-scripts-user
+ fi
+
d=/etc/cloud/
if [ -f "$d/distro.cfg" ] && [ ! -f "$d/cloud.cfg.d/90_dpkg.cfg" ]; then
echo "moving $d/distro.cfg -> $d/cloud.cfg.d/90_dpkg.cfg"
diff --git a/debian/patches/lp-1018554-shutdown-message-to-console.patch b/debian/patches/lp-1018554-shutdown-message-to-console.patch
new file mode 100644
index 00000000..d5006867
--- /dev/null
+++ b/debian/patches/lp-1018554-shutdown-message-to-console.patch
@@ -0,0 +1,28 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Bug: https://launchpad.net/bugs/1018554
+Applied-Upstream: revno 609 and 613
+Description: add an upstart init job that logs shutdowns and reboots
+ This simply writes a "shutting down" message to the console
+ when the system is shutdown. Its useful for logs.
+--- /dev/null
++++ b/upstart/cloud-log-shutdown.conf
+@@ -0,0 +1,19 @@
++# log shutdowns and reboots to the console (/dev/console)
++# this is useful for correlating logs
++start on runlevel PREVLEVEL=2
++
++task
++console output
++
++script
++ # runlevel(7) says INIT_HALT will be set to HALT or POWEROFF
++ date=$(date --utc)
++ case "$RUNLEVEL:$INIT_HALT" in
++ 6:*) mode="reboot";;
++ 0:HALT) mode="halt";;
++ 0:POWEROFF) mode="poweroff";;
++ 0:*) mode="shutdown-unknown";;
++ esac
++ { read seconds idle < /proc/uptime; } 2>/dev/null || :
++ echo "$date: shutting down for $mode${seconds:+ [up ${seconds%.*}s]}."
++end script
diff --git a/debian/patches/lp-1066115-landscape-install-fix-perms.patch b/debian/patches/lp-1066115-landscape-install-fix-perms.patch
new file mode 100644
index 00000000..5708e212
--- /dev/null
+++ b/debian/patches/lp-1066115-landscape-install-fix-perms.patch
@@ -0,0 +1,34 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Origin: upstream
+Bug: https://launchpad.net/bugs/1066115
+Applied-Upstream: revno 687
+Description: do nothing if config not given. install landscape-client.
+ Skip all the work when there is no config section.
+ Then enforce the package installation, which will create the config file with
+ the proper permissions.
+--- a/cloudinit/CloudConfig/cc_landscape.py
++++ b/cloudinit/CloudConfig/cc_landscape.py
+@@ -51,6 +51,11 @@ def handle(_name, cfg, _cloud, log, _arg
+ if not isinstance(ls_cloudcfg, dict):
+ raise(Exception("'landscape' existed in config, but not a dict"))
+
++ if not ls_cloudcfg:
++ return
++
++ util.install_packages(["landscape-client"])
++
+ merged = mergeTogether([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg])
+
+ if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)):
+@@ -59,9 +64,8 @@ def handle(_name, cfg, _cloud, log, _arg
+ with open(lsc_client_cfg_file, "w") as fp:
+ merged.write(fp)
+
+- if ls_cloudcfg:
+- with open(lsc_client_default_file, "w") as fp:
+- fp.write("RUN=1\n")
++ with open(lsc_client_default_file, "w") as fp:
++ fp.write("RUN=1\n")
+
+ log.debug("updated %s" % lsc_client_cfg_file)
+
diff --git a/debian/patches/lp-1070345-landscape-restart-after-change.patch b/debian/patches/lp-1070345-landscape-restart-after-change.patch
new file mode 100644
index 00000000..90b7864b
--- /dev/null
+++ b/debian/patches/lp-1070345-landscape-restart-after-change.patch
@@ -0,0 +1,17 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Origin: upstream
+Bug: https://launchpad.net/bugs/1070345
+Applied-Upstream: revno 692
+Description: restart the landscape-client service if a change was made
+ After modifying the landscape-client configuration file, we need to
+ restart the service to make sure the change was picked up.
+--- a/cloudinit/CloudConfig/cc_landscape.py
++++ b/cloudinit/CloudConfig/cc_landscape.py
+@@ -68,6 +68,7 @@ def handle(_name, cfg, _cloud, log, _arg
+ fp.write("RUN=1\n")
+
+ log.debug("updated %s" % lsc_client_cfg_file)
++ util.subp(["service", "landscape-client", "restart"])
+
+
+ def mergeTogether(objs):
diff --git a/debian/patches/lp-1073077-zsh-workaround-for-locale_warn.patch b/debian/patches/lp-1073077-zsh-workaround-for-locale_warn.patch
new file mode 100644
index 00000000..014b0e35
--- /dev/null
+++ b/debian/patches/lp-1073077-zsh-workaround-for-locale_warn.patch
@@ -0,0 +1,45 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Bug: https://launchpad.net/bugs/1073077
+Applied-Upstream: revno 702
+Description: avoid warning when user's shell is zsh (LP: #1073077)
+ improve zsh support in tools/Z99-cloud-locale-test.sh
+ .
+ This change does 2 things:
+ * minor clean up some local variable usage and definition
+ * make Z99-cloud-locale-test.sh actually work when shell is zsh
+ .
+ the last is done by detecting if this is zsh, and if so, changing the zsh
+ behavior for this function to emulate sh. As a result of the '-L' in
+ 'emulate -L sh', we have no negative affect on the user's shell.
+--- a/tools/Z99-cloud-locale-test.sh
++++ b/tools/Z99-cloud-locale-test.sh
+@@ -10,10 +10,13 @@
+ #
+
+ locale_warn() {
+- local cr="
+-"
+- local bad_names="" bad_lcs="" key="" value="" var=""
++ local bad_names="" bad_lcs="" key="" val="" var="" vars=""
+ local w1 w2 w3 w4 remain
++
++ # if shell is zsh, act like sh only for this function (-L).
++ # The behavior change will not permenently affect user's shell.
++ [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh
++
+ # locale is expected to output either:
+ # VARIABLE=
+ # VARIABLE="value"
+@@ -32,9 +35,9 @@ locale_warn() {
+ for bad in $bad_names; do
+ for var in ${vars}; do
+ [ "${bad}" = "${var%=*}" ] || continue
+- value=${var#*=}
+- [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] &&
+- bad_lcs="${bad_lcs} ${value}"
++ val=${var#*=}
++ [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] &&
++ bad_lcs="${bad_lcs} ${val}"
+ break
+ done
+ done
diff --git a/debian/patches/lp-974509-detect-dns-server-redirection.patch b/debian/patches/lp-974509-detect-dns-server-redirection.patch
new file mode 100644
index 00000000..a2fe5873
--- /dev/null
+++ b/debian/patches/lp-974509-detect-dns-server-redirection.patch
@@ -0,0 +1,152 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Bug: https://launchpad.net/bugs/974509
+Applied-Upstream: revno 612 and 616
+Description: protect against dns-redirection disable default mirror search
+ revno 612: add protection against dns-redirection to is_resolvable
+ .
+ In an effort to make the EC2 Datasource's search under ec2.archive.ubuntu.com
+ resilient against dns redirection, we add some code to is_resolvable.
+ .
+ One future enhancement for this would be to protect against server side
+ round robin results. Ie, if 'bogus-entry' returned 10.0.1.1 one time, and then
+ 10.0.1.2 a second time. We could check if results where within the same 3
+ octets, and assume invalid if they were.
+ .
+ revno 616: do not search for mirror named '<distro>-mirror' in dns by default
+ .
+ As described in the bug, enough non-cloud users experienced issues with
+ cloud-init selecting a mirror due to consumer level network providers using
+ dns server redirection.
+ .
+ We're turning this off by default.
+--- a/cloudinit/util.py
++++ b/cloudinit/util.py
+@@ -28,8 +28,10 @@ from Cheetah.Template import Template
+ import urllib2
+ import urllib
+ import logging
++import random
+ import re
+ import socket
++import string
+ import sys
+ import time
+ import tempfile
+@@ -42,6 +44,8 @@ try:
+ except ImportError:
+ HAVE_LIBSELINUX = False
+
++_DNS_REDIRECT_IP = None
++LOG = logging.getLogger("cloudinit")
+
+ def read_conf(fname):
+ try:
+@@ -510,12 +514,12 @@ def shellify(cmdlist):
+ return content
+
+
+-def dos2unix(string):
++def dos2unix(contents):
+ # find first end of line
+- pos = string.find('\n')
+- if pos <= 0 or string[pos - 1] != '\r':
+- return(string)
+- return(string.replace('\r\n', '\n'))
++ pos = contents.find('\n')
++ if pos <= 0 or contents[pos - 1] != '\r':
++ return(contents)
++ return(contents.replace('\r\n', '\n'))
+
+
+ def is_container():
+@@ -634,11 +638,49 @@ def get_fqdn_from_hosts(hostname, filena
+
+ return fqdn
+
++def rand_str(strlen=32, select_from=None):
++ if not select_from:
++ select_from = string.letters + string.digits
++ return "".join([random.choice(select_from) for _x in range(0, strlen)])
+
+ def is_resolvable(name):
+- """ determine if a url is resolvable, return a boolean """
++ """ determine if a url is resolvable, return a boolean
++ This also attempts to be resilent against dns redirection.
++
++ Note, that normal nsswitch resolution is used here. So in order
++ to avoid any utilization of 'search' entries in /etc/resolv.conf
++ we have to append '.'.
++
++ The top level 'invalid' domain is invalid per RFC. And example.com
++ should also not exist. The random entry will be resolved inside
++ the search list.
++ """
++ global _DNS_REDIRECT_IP # pylint: disable=W0603
++ if _DNS_REDIRECT_IP is None:
++ badips = set()
++ badnames = ("does-not-exist.example.com.", "example.invalid.",
++ rand_str())
++ badresults = {}
++ for iname in badnames:
++ try:
++ result = socket.getaddrinfo(iname, None, 0, 0,
++ socket.SOCK_STREAM, socket.AI_CANONNAME)
++ badresults[iname] = []
++ for (_fam, _stype, _proto, cname, sockaddr) in result:
++ badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
++ badips.add(sockaddr[0])
++ except socket.gaierror:
++ pass
++ _DNS_REDIRECT_IP = badips
++ if badresults:
++ LOG.debug("detected dns redirection: %s" % badresults)
++
+ try:
+- socket.getaddrinfo(name, None)
++ result = socket.getaddrinfo(name, None)
++ # check first result's sockaddr field
++ addr = result[0][4][0]
++ if addr in _DNS_REDIRECT_IP:
++ return False
+ return True
+ except socket.gaierror:
+ return False
+--- a/cloudinit/CloudConfig/cc_apt_update_upgrade.py
++++ b/cloudinit/CloudConfig/cc_apt_update_upgrade.py
+@@ -225,7 +225,7 @@ def find_apt_mirror(cloud, cfg):
+ if mydom:
+ doms.append(".%s" % mydom)
+
+- if not mirror:
++ if not mirror and cfg.get(cfg, "apt_mirror_search_dns", False):
+ doms.extend((".localdomain", "",))
+
+ mirror_list = []
+--- a/doc/examples/cloud-config.txt
++++ b/doc/examples/cloud-config.txt
+@@ -28,11 +28,14 @@ apt_upgrade: true
+ # then use the mirror provided by the DataSource found.
+ # In EC2, that means using <region>.ec2.archive.ubuntu.com
+ #
+-# if no mirror is provided by the DataSource, then search
+-# for dns names '<distro>-mirror' in each of
++# if no mirror is provided by the DataSource, and 'apt_mirror_search_dns' is
++# true, then search for dns names '<distro>-mirror' in each of
+ # - fqdn of this host per cloud metadata
+ # - localdomain
+ # - no domain (which would search domains listed in /etc/resolv.conf)
++# If there is a dns entry for <distro>-mirror, then it is assumed that there
++# is a distro mirror at http://<distro>-mirror.<domain>/<distro>
++#
+ # That gives the cloud provider the opportunity to set mirrors of a distro
+ # up and expose them only by creating dns entries.
+ #
+@@ -42,6 +45,8 @@ apt_mirror_search:
+ - http://local-mirror.mydomain
+ - http://archive.ubuntu.com
+
++apt_mirror_search_dns: False
++
+ # apt_proxy (configure Acquire::HTTP::Proxy)
+ apt_proxy: http://my.apt.proxy:3128
+
diff --git a/debian/patches/lp-978127-maas-oauth-fix-bad-clock.patch b/debian/patches/lp-978127-maas-oauth-fix-bad-clock.patch
new file mode 100644
index 00000000..11d1e5a5
--- /dev/null
+++ b/debian/patches/lp-978127-maas-oauth-fix-bad-clock.patch
@@ -0,0 +1,165 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Bug: https://launchpad.net/bugs/978127
+Applied-Upstream: revno 666 and 678
+Description: DataSourceMAAS: adjust oauth request timestamps on 401 or 403
+ In the event of a 401 or 403 (Unauthorized) in oauth, try set a
+ 'oauth_clockskew' variable. In future headers, use a time created by
+ 'time.time() + self.oauth_clockskew'. The idea here is that if the local time
+ is bad (or even if the server time is bad) we will essentially use something
+ that should be similar to the remote clock.
+--- a/cloudinit/DataSourceMAAS.py
++++ b/cloudinit/DataSourceMAAS.py
+@@ -20,6 +20,7 @@ import cloudinit.DataSource as DataSourc
+
+ from cloudinit import seeddir as base_seeddir
+ from cloudinit import log
++from email.utils import parsedate
+ import cloudinit.util as util
+ import errno
+ import oauth.oauth as oauth
+@@ -29,6 +30,7 @@ import time
+
+
+ MD_VERSION = "2012-03-01"
++LOG = log
+
+
+ class DataSourceMAAS(DataSource.DataSource):
+@@ -42,6 +44,7 @@ class DataSourceMAAS(DataSource.DataSour
+ """
+ seeddir = base_seeddir + '/maas'
+ baseurl = None
++ oauth_clockskew = None
+
+ def __str__(self):
+ return("DataSourceMAAS[%s]" % self.baseurl)
+@@ -92,9 +95,14 @@ class DataSourceMAAS(DataSource.DataSour
+
+ consumer_secret = mcfg.get('consumer_secret', "")
+
++ timestamp = None
++ if self.oauth_clockskew:
++ timestamp = int(time.time()) + self.oauth_clockskew
++
+ return(oauth_headers(url=url, consumer_key=mcfg['consumer_key'],
+ token_key=mcfg['token_key'], token_secret=mcfg['token_secret'],
+- consumer_secret=consumer_secret))
++ consumer_secret=consumer_secret,
++ timestamp=timestamp))
+
+ def wait_for_metadata_service(self, url):
+ mcfg = self.ds_cfg
+@@ -119,7 +127,8 @@ class DataSourceMAAS(DataSource.DataSour
+ starttime = time.time()
+ check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
+ url = util.wait_for_url(urls=[check_url], max_wait=max_wait,
+- timeout=timeout, status_cb=log.warn,
++ timeout=timeout, status_cb=LOG.warn,
++ exception_cb=self._except_cb,
+ headers_cb=self.md_headers)
+
+ if url:
+@@ -130,6 +139,26 @@ class DataSourceMAAS(DataSource.DataSour
+
+ return (bool(url))
+
++ def _except_cb(self, msg, exception):
++ if not (isinstance(exception, urllib2.HTTPError) and
++ (exception.code == 403 or exception.code == 401)):
++ return
++ if 'date' not in exception.headers:
++ LOG.warn("date field not in %d headers" % exception.code)
++ return
++
++ date = exception.headers['date']
++
++ try:
++ ret_time = time.mktime(parsedate(date))
++ except:
++ LOG.warn("failed to convert datetime '%s'")
++ return
++
++ self.oauth_clockskew = int(ret_time - time.time())
++ LOG.warn("set oauth clockskew to %d" % self.oauth_clockskew)
++ return
++
+
+ def read_maas_seed_dir(seed_d):
+ """
+@@ -220,13 +249,20 @@ def check_seed_contents(content, seed):
+ return(userdata, md)
+
+
+-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
++def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
++ timestamp=None):
+ consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
+ token = oauth.OAuthToken(token_key, token_secret)
++
++ if timestamp is None:
++ ts = int(time.time())
++ else:
++ ts = timestamp
++
+ params = {
+ 'oauth_version': "1.0",
+ 'oauth_nonce': oauth.generate_nonce(),
+- 'oauth_timestamp': int(time.time()),
++ 'oauth_timestamp': ts,
+ 'oauth_token': token.key,
+ 'oauth_consumer_key': consumer.key,
+ }
+--- a/cloudinit/util.py
++++ b/cloudinit/util.py
+@@ -756,7 +756,7 @@ def mount_callback_umount(device, callba
+
+
+ def wait_for_url(urls, max_wait=None, timeout=None,
+- status_cb=None, headers_cb=None):
++ status_cb=None, headers_cb=None, exception_cb=None):
+ """
+ urls: a list of urls to try
+ max_wait: roughly the maximum time to wait before giving up
+@@ -766,6 +766,8 @@ def wait_for_url(urls, max_wait=None, ti
+ status_cb: call method with string message when a url is not available
+ headers_cb: call method with single argument of url to get headers
+ for request.
++ exception_cb: call method with 2 arguments 'msg' (per status_cb) and
++ 'exception', the exception that occurred.
+
+ the idea of this routine is to wait for the EC2 metdata service to
+ come up. On both Eucalyptus and EC2 we have seen the case where
+@@ -817,9 +819,15 @@ def wait_for_url(urls, max_wait=None, ti
+
+ req = urllib2.Request(url, data=None, headers=headers)
+ resp = urllib2.urlopen(req, timeout=timeout)
+- if resp.read() != "":
++ contents = resp.read()
++ if not contents:
++ reason = "empty data [%s]" % (resp.code)
++ e = ValueError(reason)
++ elif not (resp.code >= 200 and resp.code < 400):
++ reason = "bad status code [%s]" % (resp.code)
++ e = ValueError(reason)
++ else:
+ return url
+- reason = "empty data [%s]" % resp.getcode()
+ except urllib2.HTTPError as e:
+ reason = "http error [%s]" % e.code
+ except urllib2.URLError as e:
+@@ -829,9 +837,12 @@ def wait_for_url(urls, max_wait=None, ti
+ except Exception as e:
+ reason = "unexpected error [%s]" % e
+
+- status_cb("'%s' failed [%s/%ss]: %s" %
+- (url, int(time.time() - starttime), max_wait,
+- reason))
++ status_msg = ("'%s' failed [%s/%ss]: %s" %
++ (url, int(time.time() - starttime), max_wait,
++ reason))
++ status_cb(status_msg)
++ if exception_cb:
++ exception_cb(msg=status_msg, exception=e)
+
+ if timeup(max_wait, starttime):
+ break
diff --git a/debian/patches/rework-mirror-selection.patch b/debian/patches/rework-mirror-selection.patch
new file mode 100644
index 00000000..37e94acf
--- /dev/null
+++ b/debian/patches/rework-mirror-selection.patch
@@ -0,0 +1,520 @@
+Author: Scott Moser <smoser@ubuntu.com>
+Bug: https://launchpad.net/bugs/1028501
+Bug: https://launchpad.net/bugs/1037727
+Bug: https://launchpad.net/bugs/1006963
+Applied-Upstream: revno 630
+Description: rework package mirror selection
+ There are several changes here. They were pulled from revno 630 on trunk.
+ In order to cherry-pick, most changes are kept inside of
+ cloudinit/CloudConfig/cc_apt_update_upgrade.py. Thus as a result, these
+ changes are Ubuntu specific, while still generally in keeping with upstream.
+ * Default DataSource has get_availability_zone
+ * get_package_mirror_info
+ * get_package_mirror_info returns a dict of name:mirror
+ this is to facilitate use of 'security' and 'primary' archive.
+ * this supports searching based on templates. Any template
+ that references undefined values is skipped. These templates
+ can contain 'availability_zone' (LP: #1037727)
+ * distro's mirrors can be arch specific (LP: #1028501)
+ * 'ec2_region' substitution variable is present only if this
+ appears (by availability-zone naming convention) to be in EC2.
+ * rename_apt_lists supports the "mirror_info" rather than single mirror
+ * generate_sources_list supports mirror_info, and as a result, the
+ ubuntu mirrors reference '$security' rather than security (LP: #1006963)
+ * remove the DataSourceEc2 specific mirror selection, but instead
+ rely on the above filtering, and the fact that 'ec2_region' is only
+ defined if the availability_zone looks like a ec2 az.
+ * package_mirrors is now required in the system_info config, a dict like:
+ package_mirrors:
+ - arches: [i386, amd64]
+ failsafe:
+ primary: http://archive.ubuntu.com/ubuntu
+ security: http://security.ubuntu.com/ubuntu
+ search:
+ primary:
+ - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
+ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
+ security: []
+ - arches: [armhf, armel, default]
+ failsafe:
+ primary: http://ports.ubuntu.com/ubuntu
+ security: http://ports.ubuntu.com/ubuntu
+--- a/templates/sources.list.tmpl
++++ b/templates/sources.list.tmpl
+@@ -52,9 +52,9 @@ deb-src $mirror $codename-updates univer
+ # deb http://archive.canonical.com/ubuntu $codename partner
+ # deb-src http://archive.canonical.com/ubuntu $codename partner
+
+-deb http://security.ubuntu.com/ubuntu $codename-security main
+-deb-src http://security.ubuntu.com/ubuntu $codename-security main
+-deb http://security.ubuntu.com/ubuntu $codename-security universe
+-deb-src http://security.ubuntu.com/ubuntu $codename-security universe
+-# deb http://security.ubuntu.com/ubuntu $codename-security multiverse
+-# deb-src http://security.ubuntu.com/ubuntu $codename-security multiverse
++deb $security $codename-security main
++deb-src $security $codename-security main
++deb $security $codename-security universe
++deb-src $security $codename-security universe
++# deb $security $codename-security multiverse
++# deb-src $security $codename-security multiverse
+--- a/cloudinit/DataSource.py
++++ b/cloudinit/DataSource.py
+@@ -151,6 +151,9 @@ class DataSource:
+ else:
+ return hostname
+
++ def get_availability_zone(self):
++ return(None)
++
+
+ # return a list of classes that have the same depends as 'depends'
+ # iterate through cfg_list, loading "DataSourceCollections" modules
+--- a/config/cloud.cfg
++++ b/config/cloud.cfg
+@@ -40,3 +40,19 @@ cloud_final_modules:
+ - keys-to-console
+ - phone-home
+ - final-message
++
++system_info:
++ package_mirrors:
++ - arches: [i386, amd64]
++ failsafe:
++ primary: http://archive.ubuntu.com/ubuntu
++ security: http://security.ubuntu.com/ubuntu
++ search:
++ primary:
++ - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
++ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
++ security: []
++ - arches: [armhf, armel, default]
++ failsafe:
++ primary: http://ports.ubuntu.com/ubuntu-ports
++ security: http://ports.ubuntu.com/ubuntu-ports
+--- /dev/null
++++ b/tests/unittests/test_archive_mirrors.py
+@@ -0,0 +1,117 @@
++from mocker import MockerTestCase
++
++from cloudinit.CloudConfig import cc_apt_update_upgrade
++
++unknown_arch_info = {
++ 'arches': ['default'],
++ 'failsafe': {'primary': 'http://fs-primary-default',
++ 'security': 'http://fs-security-default'}
++}
++
++package_mirrors = [
++ {'arches': ['i386', 'amd64'],
++ 'failsafe': {'primary': 'http://fs-primary-intel',
++ 'security': 'http://fs-security-intel'},
++ 'search': {
++ 'primary': ['http://%(ec2_region)s.ec2/',
++ 'http://%(availability_zone)s.clouds/'],
++ 'security': ['http://security-mirror1-intel',
++ 'http://security-mirror2-intel']}},
++ {'arches': ['armhf', 'armel'],
++ 'failsafe': {'primary': 'http://fs-primary-arm',
++ 'security': 'http://fs-security-arm'}},
++ unknown_arch_info
++]
++
++gpmi = cc_apt_update_upgrade._get_package_mirror_info # pylint: disable=W0212
++gapmi = cc_apt_update_upgrade._get_arch_package_mirror_info # pylint: disable=W0212
++
++
++class TestGenericDistro(MockerTestCase):
++
++ def return_first(self, mlist):
++ if not mlist:
++ return None
++ return mlist[0]
++
++ def return_second(self, mlist):
++ if not mlist:
++ return None
++ return mlist[1]
++
++ def return_none(self, _mlist):
++ return None
++
++ def return_last(self, mlist):
++ if not mlist:
++ return None
++ return(mlist[-1])
++
++ def setUp(self):
++ super(TestGenericDistro, self).setUp()
++ # Make a temp directoy for tests to use.
++ self.tmp = self.makeDir()
++
++ def test_arch_package_mirror_info_unknown(self):
++ """for an unknown arch, we should get back that with arch 'default'."""
++ arch_mirrors = gapmi(package_mirrors, arch="unknown")
++ self.assertEqual(unknown_arch_info, arch_mirrors)
++
++ def test_arch_package_mirror_info_known(self):
++ arch_mirrors = gapmi(package_mirrors, arch="amd64")
++ self.assertEqual(package_mirrors[0], arch_mirrors)
++
++ def test_get_package_mirror_info_az_ec2(self):
++ arch_mirrors = gapmi(package_mirrors, arch="amd64")
++
++ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
++ mirror_filter=self.return_first)
++ self.assertEqual(results,
++ {'primary': 'http://us-east-1.ec2/',
++ 'security': 'http://security-mirror1-intel'})
++
++ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
++ mirror_filter=self.return_second)
++ self.assertEqual(results,
++ {'primary': 'http://us-east-1a.clouds/',
++ 'security': 'http://security-mirror2-intel'})
++
++ results = gpmi(arch_mirrors, availability_zone="us-east-1a",
++ mirror_filter=self.return_none)
++ self.assertEqual(results, package_mirrors[0]['failsafe'])
++
++ def test_get_package_mirror_info_az_non_ec2(self):
++ arch_mirrors = gapmi(package_mirrors, arch="amd64")
++
++ results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
++ mirror_filter=self.return_first)
++ self.assertEqual(results,
++ {'primary': 'http://nova.cloudvendor.clouds/',
++ 'security': 'http://security-mirror1-intel'})
++
++ results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
++ mirror_filter=self.return_last)
++ self.assertEqual(results,
++ {'primary': 'http://nova.cloudvendor.clouds/',
++ 'security': 'http://security-mirror2-intel'})
++
++ def test_get_package_mirror_info_none(self):
++ arch_mirrors = gapmi(package_mirrors, arch="amd64")
++
++ # because both search entries here replacement based on
++ # availability-zone, the filter will be called with an empty list and
++ # failsafe should be taken.
++ results = gpmi(arch_mirrors, availability_zone=None,
++ mirror_filter=self.return_first)
++ self.assertEqual(results,
++ {'primary': 'http://fs-primary-intel',
++ 'security': 'http://security-mirror1-intel'})
++
++ results = gpmi(arch_mirrors, availability_zone=None,
++ mirror_filter=self.return_last)
++ self.assertEqual(results,
++ {'primary': 'http://fs-primary-intel',
++ 'security': 'http://security-mirror2-intel'})
++
++
++# vi: ts=4 expandtab
+--- a/cloudinit/CloudConfig/cc_apt_update_upgrade.py
++++ b/cloudinit/CloudConfig/cc_apt_update_upgrade.py
+@@ -24,24 +24,35 @@ import traceback
+ import os
+ import glob
+ import cloudinit.CloudConfig as cc
++import re
+
+
+-def handle(_name, cfg, cloud, log, _args):
++def handle(name, cfg, cloud, log, _args):
+ update = util.get_cfg_option_bool(cfg, 'apt_update', False)
+ upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
+
+ release = get_release()
+
+- mirror = find_apt_mirror(cloud, cfg)
++ mirrors = find_apt_mirror_info(cloud, cfg, log)
+
+- log.debug("selected mirror at: %s" % mirror)
++ if not mirrors or "primary" not in mirrors:
++ log.debug(("Skipping module named %s,"
++ " no package 'mirror' located"), name)
++ return
++
++ # backwards compatibility
++ mirror = mirrors["primary"]
++ mirrors["mirror"] = mirror
++
++ log.debug("mirror info: %s" % mirrors)
+
+ if not util.get_cfg_option_bool(cfg, \
+ 'apt_preserve_sources_list', False):
+- generate_sources_list(release, mirror)
+- old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \
+- "archive.ubuntu.com/ubuntu")
+- rename_apt_lists(old_mir, mirror)
++ generate_sources_list(release, mirrors)
++ old_mirrors = cfg.get('apt_old_mirrors',
++ {"primary": "archive.ubuntu.com/ubuntu",
++ "security": "security.ubuntu.com/ubuntu"})
++ rename_apt_lists(old_mirrors, mirrors)
+
+ # set up proxy
+ proxy = cfg.get("apt_proxy", None)
+@@ -58,8 +69,10 @@ def handle(_name, cfg, cloud, log, _args
+
+ # process 'apt_sources'
+ if 'apt_sources' in cfg:
+- errors = add_sources(cfg['apt_sources'],
+- {'MIRROR': mirror, 'RELEASE': release})
++ params = mirrors
++ params['RELEASE'] = release
++ params['MIRROR'] = mirror
++ errors = add_sources(cloud, cfg['apt_sources'], params)
+ for e in errors:
+ log.warn("Source Error: %s\n" % ':'.join(e))
+
+@@ -117,14 +130,18 @@ def mirror2lists_fileprefix(mirror):
+ return string
+
+
+-def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
+- oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror))
+- nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror))
+- if(oprefix == nprefix):
+- return
+- olen = len(oprefix)
+- for filename in glob.glob("%s_*" % oprefix):
+- os.rename(filename, "%s%s" % (nprefix, filename[olen:]))
++def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
++ for (name, omirror) in old_mirrors.iteritems():
++ nmirror = new_mirrors.get(name)
++ if not nmirror:
++ continue
++ oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
++ nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror))
++ if oprefix == nprefix:
++ continue
++ olen = len(oprefix)
++ for filename in glob.glob("%s_*" % oprefix):
++ os.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+
+
+ def get_release():
+@@ -133,9 +150,11 @@ def get_release():
+ return(str(stdout).strip())
+
+
+-def generate_sources_list(codename, mirror):
+- util.render_to_file('sources.list', '/etc/apt/sources.list', \
+- {'mirror': mirror, 'codename': codename})
++def generate_sources_list(codename, mirrors):
++ params = {'codename': codename}
++ for k in mirrors:
++ params[k] = mirrors[k]
++ util.render_to_file('sources.list', '/etc/apt/sources.list', params)
+
+
+ def add_sources(srclist, searchList=None):
+@@ -194,48 +213,132 @@ def add_sources(srclist, searchList=None
+ return(elst)
+
+
+-def find_apt_mirror(cloud, cfg):
++def find_apt_mirror_info(cloud, cfg, log):
+ """ find an apt_mirror given the cloud and cfg provided """
+
+ # TODO: distro and defaults should be configurable
+ distro = "ubuntu"
+- defaults = {
+- 'ubuntu': "http://archive.ubuntu.com/ubuntu",
+- 'debian': "http://archive.debian.org/debian",
++
++ # this is used if cfg['system_info']['package_mirrors'] is not present
++ def_mirror_info = {
++ 'ubuntu': {
++ 'primary': "http://archive.ubuntu.com/ubuntu",
++ 'security': "http://security.ubuntu.com/ubuntu"
++ }
+ }
+ mirror = None
+
+- cfg_mirror = cfg.get("apt_mirror", None)
+- if cfg_mirror:
+- mirror = cfg["apt_mirror"]
+- elif "apt_mirror_search" in cfg:
+- mirror = util.search_for_mirror(cfg['apt_mirror_search'])
+- else:
+- if cloud:
+- mirror = cloud.get_mirror()
++ # this is less preferred way of specifying mirror preferred would be to
++ # use the distro's search or package_mirror.
++ mirror = cfg.get("apt_mirror", None)
++
++ search = cfg.get("apt_mirror_search", None)
++ if not mirror and search:
++ mirror = util.search_for_mirror(search)
+
++ if (not mirror and
++ util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ mydom = ""
+
+ doms = []
+
+- if not mirror and cloud:
+- # if we have a fqdn, then search its domain portion first
+- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+- mydom = ".".join(fqdn.split(".")[1:])
+- if mydom:
+- doms.append(".%s" % mydom)
+-
+- if not mirror and cfg.get(cfg, "apt_mirror_search_dns", False):
+- doms.extend((".localdomain", "",))
+-
+- mirror_list = []
+- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+- for post in doms:
+- mirror_list.append(mirrorfmt % post)
+-
+- mirror = util.search_for_mirror(mirror_list)
+-
+- if not mirror:
+- mirror = defaults[distro]
+-
+- return mirror
++ # if we have a fqdn, then search its domain portion first
++ (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
++ mydom = ".".join(fqdn.split(".")[1:])
++ if mydom:
++ doms.append(".%s" % mydom)
++
++ doms.extend((".localdomain", "",))
++
++ mirror_list = []
++ mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
++ for post in doms:
++ mirror_list.append(mirrorfmt % (post))
++
++ mirror = util.search_for_mirror(mirror_list)
++
++ try:
++ pmirrors = cfg['system_info']['package_mirrors']
++ az = cloud.datasource.get_availability_zone()
++ mirror_info = get_package_mirror_info(package_mirrors=pmirrors,
++ availability_zone=az)
++ except Exception as e:
++ util.logexc(log)
++ log.warn("Failed to get mirror info, falling back to default" %
++ def_mirror_info)
++ mirror_info = def_mirror_info
++
++ # this is a bit strange.
++ # if mirror is set, then one of the legacy options above set it
++ # but they do not cover security. so we need to get that from
++ # get_package_mirror_info
++ if mirror:
++ mirror_info.update({'primary': mirror})
++
++ return mirror_info
++
++## put together from trunk's cloudinit/distros/__init__.py and
++## cloudinit/sources/__init__.py
++def get_package_mirror_info(package_mirrors,
++ availability_zone=None, arch=None):
++ if arch == None:
++ arch = get_primary_arch()
++ arch_info = _get_arch_package_mirror_info(package_mirrors, arch)
++
++ info = _get_package_mirror_info(mirror_info=arch_info,
++ availability_zone=availability_zone)
++ return info
++
++## taken from trunk's cloudinit/distros/debian.py (Distro)
++def get_primary_arch():
++ (arch, _err) = util.subp(['dpkg', '--print-architecture'])
++ return str(arch).strip()
++
++## taken from trunk's cloudinit/distros/__init__.py ##
++def _get_package_mirror_info(mirror_info, availability_zone=None,
++ mirror_filter=util.search_for_mirror):
++ # given a arch specific 'mirror_info' entry (from package_mirrors)
++ # search through the 'search' entries, and fallback appropriately
++ # return a dict with only {name: mirror} entries.
++
++ ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
++ "north|northeast|east|southeast|south|southwest|west|northwest")
++
++ subst = {}
++ if availability_zone:
++ subst['availability_zone'] = availability_zone
++
++ if availability_zone and re.match(ec2_az_re, availability_zone):
++ subst['ec2_region'] = "%s" % availability_zone[0:-1]
++
++ results = {}
++ for (name, mirror) in mirror_info.get('failsafe', {}).iteritems():
++ results[name] = mirror
++
++ for (name, searchlist) in mirror_info.get('search', {}).iteritems():
++ mirrors = []
++ for tmpl in searchlist:
++ try:
++ mirrors.append(tmpl % subst)
++ except KeyError:
++ pass
++
++ found = mirror_filter(mirrors)
++ if found:
++ results[name] = found
++
++ #LOG.debug("filtered distro mirror info: %s" % results)
++
++ return results
++
++## taken from trunk's cloudinit/distros/__init__.py
++def _get_arch_package_mirror_info(package_mirrors, arch):
++ # pull out the specific arch from a 'package_mirrors' config option
++ default = None
++ for item in package_mirrors:
++ arches = item.get("arches")
++ if arch in arches:
++ return item
++ if "default" in arches:
++ default = item
++ return default
+--- a/cloudinit/__init__.py
++++ b/cloudinit/__init__.py
+@@ -479,6 +479,9 @@ class CloudInit:
+ def get_cpath(self, name=None):
+ return(get_cpath(name))
+
++ def get_mirror_info(self, mirror_info):
++ return(self.datasource.get_mirror_info(mirror_info))
++
+
+ def initfs():
+ subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot',
+--- a/cloudinit/DataSourceEc2.py
++++ b/cloudinit/DataSourceEc2.py
+@@ -66,26 +66,6 @@ class DataSourceEc2(DataSource.DataSourc
+ def get_availability_zone(self):
+ return(self.metadata['placement']['availability-zone'])
+
+- def get_local_mirror(self):
+- return(self.get_mirror_from_availability_zone())
+-
+- def get_mirror_from_availability_zone(self, availability_zone=None):
+- # availability is like 'us-west-1b' or 'eu-west-1a'
+- if availability_zone == None:
+- availability_zone = self.get_availability_zone()
+-
+- fallback = None
+-
+- if self.is_vpc():
+- return fallback
+-
+- try:
+- host = "%s.ec2.archive.ubuntu.com" % availability_zone[:-1]
+- socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM)
+- return 'http://%s/ubuntu/' % host
+- except:
+- return fallback
+-
+ def wait_for_metadata_service(self):
+ mcfg = self.ds_cfg
+
diff --git a/debian/patches/series b/debian/patches/series
index 4dd7fc8f..be916d13 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1 +1,8 @@
fix-landscape-config.patch
+lp-1066115-landscape-install-fix-perms.patch
+lp-1070345-landscape-restart-after-change.patch
+lp-978127-maas-oauth-fix-bad-clock.patch
+lp-974509-detect-dns-server-redirection.patch
+lp-1018554-shutdown-message-to-console.patch
+lp-1073077-zsh-workaround-for-locale_warn.patch
+rework-mirror-selection.patch