summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2013-09-27 19:38:05 -0400
committerScott Moser <smoser@ubuntu.com>2013-09-27 19:38:05 -0400
commitf1b87f5bc5b83cbe9b9bfec99317dc1b7a0b865c (patch)
tree25596b8dc52d4e355805dfdff2032763e57d73cc
parentb1f3228052dfcdd57963e43b72d46a3a65c2fa2f (diff)
parentba7cc2b9fc5ff12b7eb613d1f1516fa35ec5ec03 (diff)
downloadcloud-init-f1b87f5bc5b83cbe9b9bfec99317dc1b7a0b865c.tar.gz
Enable filesystem creation on Azure, many disk_setup cleanups
There are a lot of cleanups here around Azure, SmartOS and disk_setup. disk_setup correctly identifies disk "aliases" (block device mappings from ec2), anywhere where you would use a device name. You can also specify these mappings to the Azure or SmartOS datasource in their datasource config (device_aliases). Also, stop Azure from calling blkid repeatedly in its tests, which really pounded my laptop.
-rw-r--r--ChangeLog1
-rw-r--r--cloudinit/config/cc_disk_setup.py155
-rw-r--r--cloudinit/sources/DataSourceAzure.py26
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py16
-rw-r--r--doc/examples/cloud-config-disk-setup.txt62
-rw-r--r--doc/sources/smartos/README.rst28
-rw-r--r--tests/unittests/test_datasource/test_azure.py60
-rw-r--r--tests/unittests/test_datasource/test_smartos.py28
8 files changed, 238 insertions, 138 deletions
diff --git a/ChangeLog b/ChangeLog
index 8222e2b7..880ace21 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -3,6 +3,7 @@
- small fix for OVF datasource for iso transport on non-iso9660 filesystem
- determine if upstart version is suitable for
'initctl reload-configuration' (LP: #1124384). If so, then invoke it.
+ supports setting up instance-store disk with partition table and filesystem.
- add Azure datasource.
- add support for SuSE / SLES [Juerg Haefliger]
- add a trailing carriage return to chpasswd input, which reportedly
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index fb404c5d..d274f81a 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -41,7 +41,8 @@ def handle(_name, cfg, cloud, log, _args):
"""
disk_setup = cfg.get("disk_setup")
if isinstance(disk_setup, dict):
- log.info("Partitioning disks.")
+ update_disk_setup_devices(disk_setup, cloud.device_name_to_device)
+ log.debug("Partitioning disks: %s", str(disk_setup))
for disk, definition in disk_setup.items():
if not isinstance(definition, dict):
log.warn("Invalid disk definition for %s" % disk)
@@ -51,13 +52,14 @@ def handle(_name, cfg, cloud, log, _args):
log.debug("Creating new partition table/disk")
util.log_time(logfunc=LOG.debug,
msg="Creating partition on %s" % disk,
- func=mkpart, args=(disk, cloud, definition))
+ func=mkpart, args=(disk, definition))
except Exception as e:
util.logexc(LOG, "Failed partitioning operation\n%s" % e)
fs_setup = cfg.get("fs_setup")
if isinstance(fs_setup, list):
- log.info("Creating file systems.")
+ log.debug("setting up filesystems: %s", str(fs_setup))
+ update_fs_setup_devices(fs_setup, cloud.device_name_to_device)
for definition in fs_setup:
if not isinstance(definition, dict):
log.warn("Invalid file system definition: %s" % definition)
@@ -68,31 +70,48 @@ def handle(_name, cfg, cloud, log, _args):
device = definition.get('device')
util.log_time(logfunc=LOG.debug,
msg="Creating fs for %s" % device,
- func=mkfs, args=(cloud, definition))
+ func=mkfs, args=(definition,))
except Exception as e:
util.logexc(LOG, "Failed during filesystem operation\n%s" % e)
-def is_default_device(name, cloud, fallback=None):
- """
- Ask the cloud datasource if the 'name' maps to a default
- device. If so, return that value, otherwise return 'name', or
- fallback if so defined.
- """
-
- _dev = None
- try:
- _dev = cloud.device_name_to_device(name)
- except Exception as e:
- util.logexc(LOG, "Failed to find mapping for %s" % e)
+def update_disk_setup_devices(disk_setup, tformer):
+ # update 'disk_setup' dictionary anywhere were a device may occur
+ # update it with the response from 'tformer'
+ for origname in disk_setup.keys():
+ transformed = tformer(origname)
+ if transformed is None or transformed == origname:
+ continue
+ if transformed in disk_setup:
+ LOG.info("Replacing %s in disk_setup for translation of %s",
+ origname, transformed)
+ del disk_setup[transformed]
+
+ disk_setup[transformed] = disk_setup[origname]
+ disk_setup[transformed]['_origname'] = origname
+ del disk_setup[origname]
+ LOG.debug("updated disk_setup device entry '%s' to '%s'",
+ origname, transformed)
+
+
+def update_fs_setup_devices(disk_setup, tformer):
+ # update 'fs_setup' dictionary anywhere were a device may occur
+ # update it with the response from 'tformer'
+ for definition in disk_setup:
+ if not isinstance(definition, dict):
+ LOG.warn("entry in disk_setup not a dict: %s", definition)
+ continue
- if _dev:
- return _dev
+ origname = definition.get('device')
+ if origname is None:
+ continue
- if fallback:
- return fallback
+ transformed = tformer(origname)
+ if transformed is None or transformed == origname:
+ continue
- return name
+ definition['_origname'] = origname
+ definition['device'] = transformed
def value_splitter(values, start=None):
@@ -195,6 +214,10 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
Note: This works with GPT partition tables!
"""
+ # label of None is same as no label
+ if label is None:
+ label = ""
+
if not valid_targets:
valid_targets = ['disk', 'part']
@@ -219,8 +242,8 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
for key, value in value_splitter(part):
d[key.lower()] = value
- if d['fstype'] == fs_type and \
- ((label_match and d['label'] == label) or not label_match):
+ if (d['fstype'] == fs_type and
+ ((label_match and d['label'] == label) or not label_match)):
# If we find a matching device, we return that
return ('/dev/%s' % d['name'], True)
@@ -397,8 +420,8 @@ def get_partition_mbr_layout(size, layout):
# Create a single partition
return "0,"
- if (len(layout) == 0 and isinstance(layout, list)) or \
- not isinstance(layout, list):
+ if ((len(layout) == 0 and isinstance(layout, list)) or
+ not isinstance(layout, list)):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
@@ -414,8 +437,7 @@ def get_partition_mbr_layout(size, layout):
if isinstance(part, list):
if len(part) != 2:
- raise Exception("Partition was incorrectly defined: %s" % \
- part)
+ raise Exception("Partition was incorrectly defined: %s" % part)
percent, part_type = part
part_size = int((float(size) * (float(percent) / 100)) / 1024)
@@ -488,12 +510,11 @@ def exec_mkpart(table_type, device, layout):
return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-def mkpart(device, cloud, definition):
+def mkpart(device, definition):
"""
Creates the partition table.
Parameters:
- cloud: the cloud object
definition: dictionary describing how to create the partition.
The following are supported values in the dict:
@@ -508,29 +529,18 @@ def mkpart(device, cloud, definition):
overwrite = definition.get('overwrite', False)
layout = definition.get('layout', False)
table_type = definition.get('table_type', 'mbr')
- _device = is_default_device(device, cloud)
# Check if the default device is a partition or not
LOG.debug("Checking against default devices")
- if _device and (_device != device):
- if not is_device_valid(_device):
- _device = _device[:-1]
-
- if not is_device_valid(_device):
- raise Exception("Unable to find backing block device for %s" % \
- device)
- else:
- LOG.debug("Mapped %s to physical device %s" % (device, _device))
- device = _device
if (isinstance(layout, bool) and not layout) or not layout:
LOG.debug("Device is not to be partitioned, skipping")
return # Device is not to be partitioned
# This prevents you from overwriting the device
- LOG.debug("Checking if device %s is a valid device" % device)
+ LOG.debug("Checking if device %s is a valid device", device)
if not is_device_valid(device):
- raise Exception("Device %s is not a disk device!" % device)
+ raise Exception("Device %s is not a disk device!", device)
LOG.debug("Checking if device layout matches")
if check_partition_layout(table_type, device, layout):
@@ -549,13 +559,13 @@ def mkpart(device, cloud, definition):
part_definition = get_partition_layout(table_type, device_size, layout)
LOG.debug(" Layout is: %s" % part_definition)
- LOG.debug("Creating partition table on %s" % device)
+ LOG.debug("Creating partition table on %s", device)
exec_mkpart(table_type, device, part_definition)
- LOG.debug("Partition table created for %s" % device)
+ LOG.debug("Partition table created for %s", device)
-def mkfs(cloud, fs_cfg):
+def mkfs(fs_cfg):
"""
Create a file system on the device.
@@ -576,54 +586,45 @@ def mkfs(cloud, fs_cfg):
When 'cmd' is provided then no other parameter is required.
"""
- fs_cfg['partition'] = 'any'
label = fs_cfg.get('label')
device = fs_cfg.get('device')
- partition = str(fs_cfg.get('partition'))
+ partition = str(fs_cfg.get('partition', 'any'))
fs_type = fs_cfg.get('filesystem')
fs_cmd = fs_cfg.get('cmd', [])
fs_opts = fs_cfg.get('extra_opts', [])
overwrite = fs_cfg.get('overwrite', False)
# This allows you to define the default ephemeral or swap
- LOG.debug("Checking %s against default devices" % device)
- _device = is_default_device(label, cloud, fallback=device)
- if _device and (_device != device):
- if not is_device_valid(_device):
- raise Exception("Unable to find backing block device for %s" % \
- device)
- else:
- LOG.debug("Mapped %s to physical device %s" % (device, _device))
- device = _device
+ LOG.debug("Checking %s against default devices", device)
if not partition or partition.isdigit():
# Handle manual definition of partition
if partition.isdigit():
device = "%s%s" % (device, partition)
- LOG.debug("Manual request of partition %s for %s" % (
- partition, device))
+ LOG.debug("Manual request of partition %s for %s",
+ partition, device)
# Check to see if the fs already exists
- LOG.debug("Checking device %s" % device)
+ LOG.debug("Checking device %s", device)
check_label, check_fstype, _ = check_fs(device)
- LOG.debug("Device %s has %s %s" % (device, check_label, check_fstype))
+ LOG.debug("Device %s has %s %s", device, check_label, check_fstype)
if check_label == label and check_fstype == fs_type:
- LOG.debug("Existing file system found at %s" % device)
+ LOG.debug("Existing file system found at %s", device)
if not overwrite:
- LOG.warn("Device %s has required file system" % device)
+ LOG.debug("Device %s has required file system", device)
return
else:
- LOG.warn("Destroying filesystem on %s" % device)
+ LOG.warn("Destroying filesystem on %s", device)
else:
- LOG.debug("Device %s is cleared for formating" % device)
+ LOG.debug("Device %s is cleared for formating", device)
elif partition and str(partition).lower() in ('auto', 'any'):
# For auto devices, we match if the filesystem does exist
odevice = device
- LOG.debug("Identifying device to create %s filesytem on" % label)
+ LOG.debug("Identifying device to create %s filesytem on", label)
# any mean pick the first match on the device with matching fs_type
label_match = True
@@ -632,33 +633,32 @@ def mkfs(cloud, fs_cfg):
device, reuse = find_device_node(device, fs_type=fs_type, label=label,
label_match=label_match)
- LOG.debug("Automatic device for %s identified as %s" % (
- odevice, device))
+ LOG.debug("Automatic device for %s identified as %s", odevice, device)
if reuse:
LOG.debug("Found filesystem match, skipping formating.")
return
if not device:
- LOG.debug("No device aviable that matches request.")
- LOG.debug("Skipping fs creation for %s" % fs_cfg)
+ LOG.debug("No device aviable that matches request. "
+ "Skipping fs creation for %s", fs_cfg)
return
else:
LOG.debug("Error in device identification handling.")
return
- LOG.debug("File system %s will be created on %s" % (label, device))
+ LOG.debug("File system %s will be created on %s", label, device)
# Make sure the device is defined
if not device:
- LOG.critical("Device is not known: %s" % fs_cfg)
+ LOG.warn("Device is not known: %s", device)
return
# Check that we can create the FS
- if not label or not fs_type:
- LOG.debug("Command to create filesystem %s is bad. Skipping." % \
- label)
+ if not (fs_type or fs_cmd):
+ raise Exception("No way to create filesystem '%s'. fs_type or fs_cmd "
+ "must be set.", label)
# Create the commands
if fs_cmd:
@@ -673,7 +673,8 @@ def mkfs(cloud, fs_cfg):
mkfs_cmd = util.which("mk%s" % fs_type)
if not mkfs_cmd:
- LOG.critical("Unable to locate command to create filesystem.")
+ LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type,
+ fs_type)
return
fs_cmd = [mkfs_cmd, device]
@@ -685,8 +686,8 @@ def mkfs(cloud, fs_cfg):
if fs_opts:
fs_cmd.extend(fs_opts)
- LOG.debug("Creating file system %s on %s" % (label, device))
- LOG.debug(" Using cmd: %s" % "".join(fs_cmd))
+ LOG.debug("Creating file system %s on %s", label, device)
+ LOG.debug(" Using cmd: %s", "".join(fs_cmd))
try:
util.subp(fs_cmd)
except Exception as e:
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index a77c3d9a..7ba6cea8 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -44,8 +44,20 @@ BUILTIN_DS_CONFIG = {
'policy': True,
'command': BOUNCE_COMMAND,
'hostname_command': 'hostname',
- }
+ },
+ 'disk_aliases': {'ephemeral0': '/dev/sdb'},
}
+
+BUILTIN_CLOUD_CONFIG = {
+ 'disk_setup': {
+ 'ephemeral0': {'table_type': 'mbr',
+ 'layout': True,
+ 'overwrite': False}
+ },
+ 'fs_setup': [{'filesystem': 'ext4', 'device': 'ephemeral0',
+ 'partition': 'auto'}],
+}
+
DS_CFG_PATH = ['datasource', DS_NAME]
@@ -94,7 +106,7 @@ class DataSourceAzureNet(sources.DataSource):
(md, self.userdata_raw, cfg, files) = ret
self.seed = cdev
self.metadata = util.mergemanydict([md, DEFAULT_METADATA])
- self.cfg = cfg
+ self.cfg = util.mergemanydict([cfg, BUILTIN_CLOUD_CONFIG])
found = cdev
LOG.debug("found datasource in %s", cdev)
@@ -112,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource):
self.metadata['random_seed'] = seed
# now update ds_cfg to reflect contents pass in config
- usercfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
- self.ds_cfg = util.mergemanydict([usercfg, self.ds_cfg])
+ user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
+ self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
mycfg = self.ds_cfg
# walinux agent writes files world readable, but expects
@@ -161,9 +173,11 @@ class DataSourceAzureNet(sources.DataSource):
pubkeys = pubkeys_from_crt_files(fp_files)
self.metadata['public-keys'] = pubkeys
-
return True
+ def device_name_to_device(self, name):
+ return self.ds_cfg['disk_aliases'].get(name)
+
def get_config_obj(self):
return self.cfg
@@ -349,7 +363,7 @@ def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
except Exception as e:
- raise NonAzureDataSource("invalid xml: %s" % e)
+ raise BrokenAzureDataSource("invalid xml: %s" % e)
results = find_child(dom.documentElement,
lambda n: n.localName == "ProvisioningSection")
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index da1eec79..93b8b50b 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -72,14 +72,17 @@ BUILTIN_DS_CONFIG = {
'iptables_disable'],
'base64_keys': [],
'base64_all': False,
- 'ephemeral_disk': '/dev/vdb',
+ 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+}
+
+BUILTIN_CLOUD_CONFIG = {
'disk_setup': {
'ephemeral0': {'table_type': 'mbr',
- 'layout': True,
+ 'layout': False,
'overwrite': False}
},
'fs_setup': [{'label': 'ephemeral0', 'filesystem': 'ext3',
- 'device': '/dev/xvdb', 'partition': 'auto'}],
+ 'device': 'ephemeral0', 'partition': 'auto'}],
}
@@ -94,9 +97,7 @@ class DataSourceSmartOS(sources.DataSource):
BUILTIN_DS_CONFIG])
self.metadata = {}
- self.cfg = {}
- self.cfg['disk_setup'] = self.ds_cfg.get('disk_setup')
- self.cfg['fs_setup'] = self.ds_cfg.get('fs_setup')
+ self.cfg = BUILTIN_CLOUD_CONFIG
self.seed = self.ds_cfg.get("serial_device")
self.seed_timeout = self.ds_cfg.get("serial_timeout")
@@ -154,8 +155,7 @@ class DataSourceSmartOS(sources.DataSource):
return True
def device_name_to_device(self, name):
- if 'ephemeral0' in name:
- return self.ds_cfg['ephemeral_disk']
+ return self.ds_cfg['disk_aliases'].get(name)
def get_config_obj(self):
return self.cfg
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index db2c52a7..3fc47699 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -19,36 +19,36 @@ Default disk definitions for AWS
Default disk definitions for Windows Azure
------------------------------------------
-(Not implemented yet due to conflict with WALinuxAgent in Ubuntu)
+device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
- /dev/sdb:
+ ephemeral0:
type: mbr
layout: True
overwrite: False
fs_setup:
- label: ephemeral0
- filesystem: ext3
+ filesystem: ext4
device: ephemeral0
- partition: any
+ partition: auto
Default disk definitions for SmartOS
------------------------------------
-ephemeral_disk: /dev/vdb
+device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
- /dev/vdb:
+ ephemeral0:
type: mbr
- layout: True
+ layout: False
overwrite: False
fs_setup:
- label: ephemeral0
filesystem: ext3
- device: /dev/vdb
- partition: 1
+ device: ephemeral0
+ partition: auto
Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will
not be automatically added to the mounts.
@@ -188,13 +188,43 @@ Where:
of the ephemeral storage layer.
<PART_VALUE>: The valid options are:
- "auto": auto is a special in the sense that you are telling cloud-init
- not to care whether there is a partition or not. Auto will put the
- first partition that does not contain a file system already. In
- the absence of a partition table, it will put it directly on the
- disk.
-
- "none": Put the partition directly on the disk.
+ "auto|any": tell cloud-init not to care whether there is a partition
+ or not. Auto will use the first partition that does not contain a
+ file system already. In the absence of a partition table, it will
+ put it directly on the disk.
+
+ "auto": If a file system that matches the specification in terms of
+ label, type and device, then cloud-init will skip the creation of
+ the file system.
+
+ "any": If a file system that matches the file system type and device,
+ then cloud-init will skip the creation of the file system.
+
+ Devices are selected based on first-detected, starting with partitions
+ and then the raw disk. Consider the following:
+ NAME FSTYPE LABEL
+ xvdb
+ |-xvdb1 ext4
+ |-xvdb2
+ |-xvdb3 btrfs test
+ \-xvdb4 ext4 test
+
+ If you ask for 'auto', label of 'test, and file system of 'ext4'
+ then cloud-init will select the 2nd partition, even though there
+ is a partition match at the 4th partition.
+
+ If you ask for 'any' and a label of 'test', then cloud-init will
+ select the 1st partition.
+
+ If you ask for 'auto' and don't define label, then cloud-init will
+ select the 1st partition.
+
+ In general, if you have a specific partition configuration in mind,
+ you should define either the device or the partition number. 'auto'
+ and 'any' are specifically intended for formating ephemeral storage or
+ for simple schemes.
+
+ "none": Put the file system directly on the device.
<NUM>: where NUM is the actual partition number.
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
index e2d3312e..8b63e520 100644
--- a/doc/sources/smartos/README.rst
+++ b/doc/sources/smartos/README.rst
@@ -73,15 +73,21 @@ or not to base64 decode something:
(i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
base64 decoded.
-ephemeral_disk:
+disk_aliases and ephemeral disk:
---------------
-
-In order to instruct Cloud-init which disk to auto-mount. By default,
-SmartOS only supports a single ephemeral disk.
-
-The default SmartOS configuration will prepare the ephemeral disk and format
-it for you. SmartOS does not, by default, prepare the ephemeral disk for you.
-
-If you change ephemeral_disk, you should also consider changing
-the default disk formatting parameters. See
-doc/examples/cloud-config-disk-setup.txt for information on using this.
+By default, SmartOS only supports a single ephemeral disk. That disk is
+completely empty (un-partitioned with no filesystem).
+
+The SmartOS datasource has built-in cloud-config which instructs the
+'disk_setup' module to partition and format the ephemeral disk.
+
+You can control the disk_setup then in 2 ways:
+ 1. through the datasource config, you can change the 'alias' of
+ ephermeral0 to reference another device. The default is:
+ 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+ Which means anywhere disk_setup sees a device named 'ephemeral0'
+ then /dev/vdb will be substituted.
+ 2. you can provide disk_setup or fs_setup data in user-data to overwrite
+ the datasource's built-in values.
+
+See doc/examples/cloud-config-disk-setup.txt for information on disk_setup.
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 1ca6a79d..86e2ed8c 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -120,8 +120,7 @@ class TestAzureDataSource(MockerTestCase):
mod = DataSourceAzure
- if data.get('dsdevs'):
- self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
+ self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
self.apply_patches([(mod, 'invoke_agent', _invoke_agent),
(mod, 'write_files', _write_files),
@@ -154,9 +153,12 @@ class TestAzureDataSource(MockerTestCase):
def test_user_cfg_set_agent_command_plain(self):
# set dscfg in via plaintext
- cfg = {'agent_command': "my_command"}
+ # we must have friendly-to-xml formatted plaintext in yaml_cfg
+ # not all plaintext is expected to work.
+ yaml_cfg = "{agent_command: my_command}\n"
+ cfg = yaml.safe_load(yaml_cfg)
odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml.dump(cfg), 'encoding': 'plain'}}
+ 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
@@ -290,11 +292,59 @@ class TestAzureDataSource(MockerTestCase):
self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
+ def test_default_ephemeral(self):
+ # make sure the ephemeral device works
+ odata = {}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata),
+ 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+
+ self.assertEquals(dsrc.device_name_to_device("ephemeral0"),
+ "/dev/sdb")
+ assert 'disk_setup' in cfg
+ assert 'fs_setup' in cfg
+ self.assertIsInstance(cfg['disk_setup'], dict)
+ self.assertIsInstance(cfg['fs_setup'], list)
+
+ def test_provide_disk_aliases(self):
+ # Make sure that user can affect disk aliases
+ dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
+ odata = {'HostName': "myhost", 'UserName': "myuser",
+ 'dscfg': {'text': base64.b64encode(yaml.dump(dscfg)),
+ 'encoding': 'base64'}}
+ usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
+ 'ephemeral0': False}}
+ userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
+
+ ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata)
+ data = {'ovfcontent': ovfcontent, 'sys_cfg': {}}
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ cfg = dsrc.get_config_obj()
+ self.assertTrue(cfg)
+ self.assertEquals(dsrc.device_name_to_device("ephemeral0"),
+ "/dev/sdc")
+
+ def test_userdata_arrives(self):
+ userdata = "This is my user-data"
+ xml = construct_valid_ovf_env(data={}, userdata=userdata)
+ data = {'ovfcontent': xml}
+ dsrc = self._get_ds(data)
+ dsrc.get_data()
+
+ self.assertEqual(userdata, dsrc.userdata_raw)
+
class TestReadAzureOvf(MockerTestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
- self.assertRaises(DataSourceAzure.NonAzureDataSource,
+ self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
DataSourceAzure.read_azure_ovf, invalid_xml)
def test_load_with_pubkeys(self):
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 56fe811e..956767d8 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -79,7 +79,6 @@ class MockSerial(object):
if self.last in self.mockdata:
if not self.mocked_out:
self.mocked_out = [x for x in self._format_out()]
- print self.mocked_out
if len(self.mocked_out) > self.count:
self.count += 1
@@ -275,26 +274,25 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertIsInstance(cfg['disk_setup'], dict)
self.assertIsInstance(cfg['fs_setup'], list)
- def test_override_builtin_ds(self):
+ def test_override_disk_aliases(self):
# Test to make sure that the built-in DS is overriden
- data = {}
- data['disk_setup'] = {'test_dev': {}}
- data['fs_setup'] = [{'label': 'test_dev'}]
- data['serial_device'] = '/dev/ttyS2'
- dsrc = self._get_ds(ds_cfg=data)
- cfg = dsrc.get_config_obj()
+ builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
+
+ mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
+ # expect that these values are in builtin, or this is pointless
+ for k in mydscfg:
+ self.assertIn(k, builtin)
+
+ dsrc = self._get_ds(ds_cfg=mydscfg)
ret = dsrc.get_data()
self.assertTrue(ret)
- assert 'disk_setup' in cfg
- assert 'fs_setup' in cfg
- self.assertIsInstance(cfg['disk_setup'], dict)
- self.assertIsInstance(cfg['fs_setup'], list)
- assert 'test_dev' in cfg['disk_setup']
- assert 'test_dev' in cfg['fs_setup'][0]['label']
+ self.assertEqual(mydscfg['disk_aliases']['FOO'],
+ dsrc.ds_cfg['disk_aliases']['FOO'])
- self.assertEquals(data['serial_device'], dsrc.seed)
+ self.assertEqual(dsrc.device_name_to_device('FOO'),
+ mydscfg['disk_aliases']['FOO'])
def apply_patches(patches):