summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeremy Edwards <jeremyedwards@google.com>2013-08-02 15:51:39 -0700
committerJeremy Edwards <jeremyedwards@google.com>2013-08-02 15:51:39 -0700
commit1ab76ecb359824599a4dd311ec951ba9826a345e (patch)
tree9ec1ef71666d11b8a9edba90e697cb872bc48f38
parent3dec2d7c4e52399159291519f4c7d65f275d6544 (diff)
downloadgoogle-compute-image-packages-1ab76ecb359824599a4dd311ec951ba9826a345e.tar.gz
"Updating image-bundle"1.0.4
-rwxr-xr-ximage-bundle/README29
-rwxr-xr-ximage-bundle/block_disk.py227
-rwxr-xr-ximage-bundle/block_disk_unittest.py260
-rwxr-xr-ximage-bundle/centos.py57
-rwxr-xr-ximage-bundle/debian.py37
-rwxr-xr-ximage-bundle/exclude_spec.py82
-rwxr-xr-ximage-bundle/fs_copy.py164
-rwxr-xr-ximage-bundle/gcel.py58
-rwxr-xr-ximage-bundle/image_bundle.py223
-rwxr-xr-ximage-bundle/image_bundle_test_base.py112
-rwxr-xr-ximage-bundle/linux.py118
-rwxr-xr-ximage-bundle/os_platform.py73
-rwxr-xr-ximage-bundle/platform_factory.py54
-rwxr-xr-ximage-bundle/ubuntu.py57
-rwxr-xr-ximage-bundle/utils.py289
15 files changed, 1840 insertions, 0 deletions
diff --git a/image-bundle/README b/image-bundle/README
new file mode 100755
index 0000000..677ae8b
--- /dev/null
+++ b/image-bundle/README
@@ -0,0 +1,29 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+Image bundling tool for root file system.
+
+To build a root filesystem tar
+$ sudo ./image_bundle.py -r /data/myimage/root -o /usr/local/google/home/${USER} \
+ -k 'somekey' --loglevel=DEBUG --log_file=/tmp/image_bundle.log
+
+This will output the image tar in the output directory
+specified with -o option.
+
+To run unittest:
+$ mkdir /tmp/imagebundle
+$ cp * /tmp/imagebundle/
+$ sudo /tmp/imagebundle/block_disk_unittest.py
+
+Note that this is copied out file by file into the default google image.
diff --git a/image-bundle/block_disk.py b/image-bundle/block_disk.py
new file mode 100755
index 0000000..08c156e
--- /dev/null
+++ b/image-bundle/block_disk.py
@@ -0,0 +1,227 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to create raw disk images.
+
+Stores a copy of directories/files in a file mounted as a partitioned blocked
+device.
+"""
+
+
+
+import hashlib
+import logging
+import os
+import re
+import tempfile
+
+import exclude_spec
+import fs_copy
+import utils
+
+
+class RawDiskError(Exception):
+ """Error occured during raw disk creation."""
+
+
+class InvalidRawDiskError(Exception):
+ """Error when verification fails before copying."""
+
+
+class FsRawDisk(fs_copy.FsCopy):
+ """Creates a raw disk copy of OS image and bundles it into gzipped tar."""
+
+ def __init__(self, fs_size):
+ """Constructor for FsRawDisk class.
+
+ Args:
+ fs_size: Size of the raw disk.
+ """
+ super(FsRawDisk, self).__init__()
+ self._fs_size = fs_size
+
+ def Bundleup(self):
+ """Creates a raw disk copy of OS image and bundles it into gzipped tar.
+
+ Returns:
+ A size of a generated raw disk and the SHA1 digest of the the tar archive.
+
+ Raises:
+ RawDiskError: If number of partitions in a created image doesn't match
+ expected count.
+ """
+ self._Verify()
+ # Create sparse file with specified size
+ file_path = os.path.join(self._scratch_dir, 'disk.raw')
+ self._excludes.append(exclude_spec.ExcludeSpec(file_path))
+ with open(file_path, 'wb') as disk_file:
+ disk_file.truncate(self._fs_size)
+ utils.MakePartitionTable(file_path)
+ # Pass 1MB as start to avoid 'Warning: The resulting partition is not
+ # properly aligned for best performance.' from parted.
+ utils.MakePartition(file_path, 'primary', 'ext2', 1024 * 1024,
+ self._fs_size)
+ with utils.LoadDiskImage(file_path) as devices:
+ # For now we only support disks with a single partition.
+ if len(devices) != 1:
+ raise RawDiskError(devices)
+ uuid = utils.MakeFileSystem(devices[0], 'ext4')
+ if uuid is None:
+ raise Exception('Could not get uuid from makefilesystem')
+ mount_point = tempfile.mkdtemp(dir=self._scratch_dir)
+ with utils.MountFileSystem(devices[0], mount_point):
+ self._CopySourceFiles(mount_point)
+ self._CopyPlatformSpecialFiles(mount_point)
+ self._ProcessOverwriteList(mount_point)
+ self._CleanupNetwork(mount_point)
+ self._UpdateFstab(mount_point, uuid)
+
+ utils.TarAndGzipFile(file_path, self._output_tarfile)
+ os.remove(file_path)
+ # TODO(user): It would be better to compute tar.gz file hash during
+ # archiving.
+ h = hashlib.sha1()
+ with open(self._output_tarfile, 'rb') as tar_file:
+ for chunk in iter(lambda: tar_file.read(8192), ''):
+ h.update(chunk)
+ return (self._fs_size, h.hexdigest())
+
+ def _CopySourceFiles(self, mount_point):
+ """Copies all source files/directories to a mounted raw disk.
+
+ There are several cases which must be handled separately:
+ 1. src=dir1 and dest is empty. In this case we simply copy the content of
+ dir1 to mount_point.
+ 2. src=dir1 and dest=dir2. In this case dir1 is copied to mount_point
+ under a new name dir2, so its content would be copied under
+ mount_point/dir2.
+ 3. src=file1/dir1 and dest=file2/dir2 and is_recursive=False. file1/dir1
+ is copied to mount_point/file2 or mount_point/dir2.
+
+ Args:
+ mount_point: A path to a mounted raw disk.
+ """
+ for (src, dest, is_recursive) in self._srcs:
+ # Generate a list of files/directories excluded from copying to raw disk.
+ # rsync expects them to be relative to src directory so we need to
+ # regenerate this list for every src separately.
+ with tempfile.NamedTemporaryFile(dir=self._scratch_dir) as rsync_file:
+ for spec in self._excludes:
+ rsync_file.write(spec.GetRsyncSpec(src))
+
+ # make sure that rsync utility sees all the content of rsync_file which
+ # otherwise can be buffered.
+ rsync_file.flush()
+ if is_recursive:
+ # if a directory ends with / rsync copies the content of a
+ # directory, otherwise it also copies the directory itself.
+ src = src.rstrip('/')
+ if not dest:
+ src += '/'
+ utils.Rsync(src, mount_point, rsync_file.name,
+ self._ignore_hard_links, recursive=True)
+ if dest:
+ os.rename(os.path.join(mount_point, os.path.basename(src)),
+ os.path.join(mount_point, dest))
+ else:
+ utils.Rsync(src, os.path.join(mount_point, dest), rsync_file.name,
+ self._ignore_hard_links, recursive=False)
+
+ def _CopyPlatformSpecialFiles(self, mount_point):
+ """Copies platform special files to a mounted raw disk.
+
+ Args:
+ mount_point: A path to a mounted raw disk.
+ """
+ if self._platform:
+ special_files = self._platform.GetPlatformSpecialFiles(self._scratch_dir)
+ for (src, dest) in special_files:
+ utils.Rsync(src, os.path.join(mount_point, dest), None,
+ self._ignore_hard_links, recursive=False)
+
+ def _ProcessOverwriteList(self, mount_point):
+ """Overwrites a set of files/directories requested by platform.
+
+ Args:
+ mount_point: A path to a mounted raw disk.
+ """
+ for file_name in self._overwrite_list:
+ file_path = os.path.join(mount_point, file_name)
+ if os.path.exists(file_path):
+ if os.path.isdir(file_path):
+ # TODO(user): platform.Overwrite is expected to overwrite the
+ # directory in place from what I can tell. In case of a file it will
+ # create a new file which must be copied to mounted raw disk. So there
+ # some inconsistency which would need to be addresses if and when we
+ # encounter a platform which would want to overwrite a directory.
+ self._platform.Overwrite(file_path, file_name, self._scratch_dir)
+ logging.info('rawdisk: modifying directory %s', file_path)
+ else:
+ new_file = self._platform.Overwrite(file_path, file_name,
+ self._scratch_dir)
+ logging.info('rawdisk: modifying %s from %s', file_path, new_file)
+ utils.Rsync(new_file, file_path, None, self._ignore_hard_links,
+ recursive=False)
+
+
+ def _CleanupNetwork(self, mount_point):
+ """Remove any record of our current MAC address."""
+ net_rules_path = os.path.join(
+ mount_point,
+ 'lib/udev/rules.d/75-persistent-net-generator.rules')
+ if os.path.exists(net_rules_path):
+ os.remove(net_rules_path)
+
+ def _UpdateFstab(self, mount_point, uuid):
+ """Update /etc/fstab with the new root fs UUID."""
+ fstab_path = os.path.join(mount_point, 'etc/fstab')
+ if not os.path.exists(fstab_path):
+ print 'etc/fstab does not exist. Not updating fstab uuid'
+ return
+
+ f = open(fstab_path, 'r')
+ lines = f.readlines()
+ f.close()
+
+ def UpdateUUID(line):
+ """Replace the UUID on the entry for /."""
+ g = re.match(r'UUID=\S+\s+/\s+(.*)', line)
+ if not g:
+ return line
+ return 'UUID=%s / %s\n' % (uuid, g.group(1))
+
+ lines = map(UpdateUUID, lines)
+ f = open(fstab_path, 'w')
+ f.write(''.join(lines))
+ f.close()
+
+
+class RootFsRaw(FsRawDisk):
+ """Block disk copy of the root file system.
+
+ Takes care of additional checks for a root file system.
+ """
+
+ def __init__(self, fs_size):
+ super(RootFsRaw, self).__init__(fs_size)
+
+ def _Verify(self):
+ super(RootFsRaw, self)._Verify()
+ # exactly one file system to bundle up
+ if len(self._srcs) != 1:
+ raise InvalidRawDiskError('Root filesystems must have exactly one src.')
+ # check that destination field is empty.
+ if self._srcs[0][1]:
+ raise InvalidRawDiskError('Root filesystems must be copied as /')
diff --git a/image-bundle/block_disk_unittest.py b/image-bundle/block_disk_unittest.py
new file mode 100755
index 0000000..02790e4
--- /dev/null
+++ b/image-bundle/block_disk_unittest.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for block_disk.py module."""
+
+
+__pychecker__ = 'no-local' # for unittest
+
+
+import logging
+import os
+import subprocess
+import tempfile
+import unittest
+
+import block_disk
+import exclude_spec
+import image_bundle_test_base
+import utils
+
+
+class FsRawDiskTest(image_bundle_test_base.ImageBundleTest):
+ """FsRawDisk Unit Test."""
+
+ def setUp(self):
+ super(FsRawDiskTest, self).setUp()
+ self._bundle = block_disk.FsRawDisk(10*1024*1024)
+ self._tar_path = self.tmp_path + '/image.tar.gz'
+ self._bundle.SetTarfile(self._tar_path)
+ self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
+ self._bundle.SetKey('key')
+
+ def tearDown(self):
+ super(FsRawDiskTest, self).tearDown()
+
+ def testRawDisk(self):
+ """Tests the regular operation. No expected error."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
+
+ def testRawDiskIgnoresHardlinks(self):
+ """Tests if the raw disk ignores hard links if asked."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.IgnoreHardLinks()
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 1)
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 1)
+
+ def testRawDiskIgnoresExcludes(self):
+ """Tests if the raw disk ignores specified excludes files."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir2/', '/dir2/dir1',
+ '/dir2/sl2', '/dir2/hl1'])
+
+ def testRawDiskExcludePreservesSubdirs(self):
+ """Tests if excludes preserves subdirs underneath if asked."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
+ preserve_dir=True,
+ preserve_subdir=True)])
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11', 'dir2/', '/dir2/dir1',
+ '/dir2/sl2', '/dir2/hl1'])
+
+ def testRawDiskExcludePreservesFiles(self):
+ """Tests if excludes preserves the files underneath if asked."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
+ preserve_dir=True,
+ preserve_file=True)])
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/', '/dir1/hl2',
+ '/dir1/sl1', 'dir2/', '/dir2/dir1', '/dir2/sl2',
+ '/dir2/hl1'])
+
+ def testRawDiskUsesModifiedFiles(self):
+ """Tests if the raw disk uses modified files."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
+ self._bundle.SetPlatform(image_bundle_test_base.MockPlatform(self.tmp_root))
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyFileInRawDiskEndsWith(self._tar_path, 'test1',
+ 'something extra.')
+
+ def testRawDiskGeneratesCorrectDigest(self):
+ """Tests if the SHA1 digest generated is accurate."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ p = subprocess.Popen(['/usr/bin/openssl dgst -sha1 ' + self._tar_path],
+ stdout=subprocess.PIPE, shell=True)
+ file_digest = p.communicate()[0].split('=')[1].strip()
+ self.assertEqual(digest, file_digest)
+
+ def testRawDiskHonorsRecursiveOff(self):
+ """Tests if raw disk handles recursive off."""
+ self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
+ self._bundle.AddSource(self.tmp_path + '/dir1',
+ arcname='dir1', recursive=False)
+ self._bundle.AddSource(self.tmp_path + '/dir2', arcname='dir2')
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'dir1/', 'dir2/', '/dir2/dir1',
+ '/dir2/sl2', '/dir2/hl1'])
+
+ def _VerifyImageHas(self, tar, expected):
+ """Tests if raw disk contains an expected list of files/directories."""
+ tmp_dir = tempfile.mkdtemp(dir='/tmp')
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(disk_path) as devices:
+ self.assertEqual(len(devices), 1)
+ mnt_dir = tempfile.mkdtemp(dir='/tmp')
+ with utils.MountFileSystem(devices[0], mnt_dir):
+ found = []
+ for root, dirs, files in os.walk(mnt_dir):
+ root = root.replace(mnt_dir, '')
+ for f in files:
+ found.append(os.path.join(root, f))
+ for d in dirs:
+ found.append(os.path.join(root, d))
+ self._AssertListEqual(expected, found)
+
+ def _VerifyFileInRawDiskEndsWith(self, tar, filename, text):
+ """Tests if a file on raw disk contains ends with a specified text."""
+ tmp_dir = tempfile.mkdtemp(dir='/tmp')
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(disk_path) as devices:
+ self.assertEqual(len(devices), 1)
+ mnt_dir = tempfile.mkdtemp(dir='/tmp')
+ with utils.MountFileSystem(devices[0], mnt_dir):
+ f = open(os.path.join(mnt_dir, filename), 'r')
+ file_content = f.read()
+ f.close()
+ self.assertTrue(file_content.endswith(text))
+
+ def _VerifyNumberOfHardLinksInRawDisk(self, tar, filename, count):
+ """Tests if a file on raw disk has a specified number of hard links."""
+ tmp_dir = tempfile.mkdtemp(dir='/tmp')
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(disk_path) as devices:
+ self.assertEqual(len(devices), 1)
+ mnt_dir = tempfile.mkdtemp(dir='/tmp')
+ with utils.MountFileSystem(devices[0], mnt_dir):
+ self.assertEqual(os.stat(os.path.join(mnt_dir, filename)).st_nlink,
+ count)
+
+
+class RootFsRawTest(image_bundle_test_base.ImageBundleTest):
+ """RootFsRaw Unit Test."""
+
+ def setUp(self):
+ super(RootFsRawTest, self).setUp()
+ self._bundle = block_disk.RootFsRaw(10*1024*1024)
+ self._tar_path = self.tmp_path + '/image.tar.gz'
+ self._bundle.SetTarfile(self._tar_path)
+ self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
+
+ def tearDown(self):
+ super(RootFsRawTest, self).tearDown()
+
+ def testRootRawDiskVerifiesOneSource(self):
+ """Tests that only one root directory is allowed."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AddSource(self.tmp_path + '/dir1')
+ self._bundle.SetKey('key')
+ try:
+ self._bundle.Verify()
+ except block_disk.InvalidRawDiskError:
+ return
+ self.fail()
+
+ def testRootRawDiskVerifiesRootDestination(self):
+ """Tests that destination directory must be /."""
+ self._bundle.AddSource(self.tmp_path, arcname='/tmp')
+ self._bundle.SetKey('key')
+ try:
+ self._bundle.Verify()
+ except block_disk.InvalidRawDiskError:
+ return
+ self.fail()
+
+
+def main():
+ logging.basicConfig(level=logging.DEBUG)
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/image-bundle/centos.py b/image-bundle/centos.py
new file mode 100755
index 0000000..1996e4b
--- /dev/null
+++ b/image-bundle/centos.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Centos specific platform info."""
+
+
+
+import os
+import re
+
+import linux
+
+
+class Centos(linux.LinuxPlatform):
+ """Centos specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/redhat-release'
+ if os.path.exists(release_file):
+ (_, _, flavor, _) = Centos.ParseRedhatRelease(release_file)
+ if flavor and flavor.lower() == 'centos':
+ return True
+ return False
+
+ @staticmethod
+ def ParseRedhatRelease(release_file='/etc/redhat-release'):
+ """Parses the /etc/redhat-release file."""
+ f = open(release_file)
+ lines = f.readlines()
+ f.close()
+ if not lines:
+ return (None, None, None, None)
+ line0 = lines[0]
+ g = re.match(r'(\S+) release (\d+\.\d+) \(([^)]*)\)', line0)
+ if not g:
+ return (None, None, None, None)
+ (osname, version, label) = (g.group(1), g.group(2), g.group(3))
+ return (osname, label, osname, version)
+
+ def __init__(self):
+ super(Centos, self).__init__()
+ (self.distribution_codename, _, self.distribution,
+ self.distribution_version) = Centos.ParseRedhatRelease()
diff --git a/image-bundle/debian.py b/image-bundle/debian.py
new file mode 100755
index 0000000..88e28da
--- /dev/null
+++ b/image-bundle/debian.py
@@ -0,0 +1,37 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Debian Linux specific platform info."""
+
+
+
+import platform
+
+import linux
+
+
+class Debian(linux.LinuxPlatform):
+ """Debian Linux specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ (distribution, _, _) = platform.linux_distribution()
+ if distribution and distribution.lower() == 'debian':
+ return True
+ return False
+
+ def __init__(self):
+ super(Debian, self).__init__()
diff --git a/image-bundle/exclude_spec.py b/image-bundle/exclude_spec.py
new file mode 100755
index 0000000..26b8264
--- /dev/null
+++ b/image-bundle/exclude_spec.py
@@ -0,0 +1,82 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Exclude file specification."""
+
+
+
+import logging
+import os
+
+
+class ExcludeSpec(object):
+ """Specifies how exclusion of a path should be handled."""
+
+ def __init__(self, path, preserve_file=False, preserve_dir=False,
+ preserve_subdir=False):
+ self.path = path
+ self.preserve_dir = preserve_dir
+ self.preserve_file = False
+ self.preserve_subdir = False
+ # Preserve files and subdirs only if dir is preserved.
+ if preserve_file and preserve_dir:
+ self.preserve_file = True
+ if preserve_subdir and preserve_dir:
+ self.preserve_subdir = True
+
+ def ShouldExclude(self, filename):
+ prefix = os.path.commonprefix([filename, self.path])
+ if prefix == self.path:
+ if ((self.preserve_dir and filename == self.path) or
+ (self.preserve_subdir and os.path.isdir(filename)) or
+ (self.preserve_file and os.path.isfile(filename))):
+ logging.warning('preserving %s', filename)
+ return False
+ return True
+ return False
+
+ def GetSpec(self):
+ return '(%s, %d:%d:%d)' % (self.path, self.preserve_file, self.preserve_dir,
+ self.preserve_subdir)
+
+ def GetRsyncSpec(self, src):
+ """Returns exclude spec in a format required by rsync.
+
+ Args:
+ src: source directory path passed to rsync. rsync expects exclude-spec to
+ be relative to src directory.
+
+ Returns:
+ A string of exclude filters in rsync exclude-from file format.
+ """
+ spec = ''
+ prefix = os.path.commonprefix([src, self.path])
+ if prefix == src:
+ relative_path = os.path.join('/', self.path[len(prefix):])
+ if self.preserve_dir:
+ spec += '+ %s\n' % relative_path
+ if self.preserve_file or self.preserve_subdir:
+ for f in os.listdir(self.path):
+ file_path = os.path.join(self.path, f)
+ relative_file_path = os.path.join(relative_path, f)
+ if self.preserve_file and os.path.isfile(file_path):
+ spec += '+ %s\n' % relative_file_path
+ if self.preserve_subdir and os.path.isdir(file_path):
+ spec += '+ %s\n' % relative_file_path
+ else:
+ spec += '- %s\n' % relative_path
+ spec += '- %s\n' % os.path.join(relative_path, '**')
+ return spec
diff --git a/image-bundle/fs_copy.py b/image-bundle/fs_copy.py
new file mode 100755
index 0000000..0083f92
--- /dev/null
+++ b/image-bundle/fs_copy.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Creates a copy of specified directories\files."""
+
+
+
+import logging
+import os
+import re
+
+
+class FsCopyError(Exception):
+ """Error occured in fs copy operation."""
+
+
+class InvalidFsCopyError(Exception):
+ """Error when verification fails before fs copying."""
+
+
+class FsCopy(object):
+ """Specifies which files/directories must be copied."""
+
+ def __init__(self):
+ # Populate the required parameters with None so we can verify.
+ self._output_tarfile = None
+ self._srcs = []
+ self._excludes = []
+ self._key = None
+ self._recursive = True
+ self._fs_size = 0
+ self._ignore_hard_links = False
+ self._platform = None
+ self._overwrite_list = []
+ self._scratch_dir = '/tmp'
+
+ def SetTarfile(self, tar_file):
+ """Sets tar file which will contain file system copy.
+
+ Args:
+ tar_file: path to a tar file.
+ """
+ self._output_tarfile = tar_file
+
+ def AddSource(self, src, arcname='', recursive=True):
+ """Adds a source to be copied to the tar file.
+
+ Args:
+ src: path to directory/file to be copied.
+ arcname: name of src in the tar archive. If arcname is empty, then instead
+ of copying src itself only its content is copied.
+ recursive: specifies if src directory should be copied recursively.
+
+ Raises:
+ ValueError: If src path doesn't exist.
+ """
+ if not os.path.exists(src):
+ raise ValueError('invalid path')
+ # Note that there is a fundamental asymmetry here as
+ # abspath('/') => '/' while abspath('/usr/') => '/usr'.
+ # This creates some subtleties elsewhere in the code.
+ self._srcs.append((os.path.abspath(src), arcname, recursive))
+
+ def AppendExcludes(self, excludes):
+ """Adds a file/directory to be excluded from file copy.
+
+ Args:
+ excludes: A list of ExcludeSpec objects.
+ """
+ self._excludes.extend(excludes)
+
+ def SetKey(self, key):
+ """Sets a key to use to sign the archive digest.
+
+ Args:
+ key: key to use to sign the archive digest.
+ """
+ # The key is ignored for now.
+ # TODO(user): sign the digest with the key
+ self._key = key
+
+ def SetPlatform(self, platform):
+ """Sets the OS platform which is used to create an image.
+
+ Args:
+ platform: OS platform specific settings.
+ """
+ self._platform = platform
+ logging.warning('overwrite list = %s',
+ ' '.join(platform.GetOverwriteList()))
+ self._overwrite_list = [re.sub('^/', '', x)
+ for x in platform.GetOverwriteList()]
+
+ def SetScratchDirectory(self, directory):
+ """Sets a directory used for storing intermediate results.
+
+ Args:
+ directory: scratch directory path.
+ """
+ self._scratch_dir = directory
+
+ def IgnoreHardLinks(self):
+ """Requests that hard links should not be copied as hard links."""
+
+ # TODO(user): I don't see a reason for this option to exist. Currently
+ # there is a difference in how this option is interpreted between FsTarball
+ # and FsRawDisk. FsTarball only copies one hard link to an inode and ignores
+ # the rest of them. FsRawDisk copies the content of a file that hard link is
+ # pointing to instead of recreating a hard link. Either option seems useless
+ # for creating a copy of a file system.
+ self._ignore_hard_links = True
+
+ def Verify(self):
+ """Verify if we have all the components to build a tar."""
+ self._Verify()
+
+ def Bundleup(self):
+ """Creates the tar image based on set parameters.
+
+ Returns:
+ the SHA1 digest of the the tar archive.
+ """
+ return (0, None)
+
+ def _Verify(self):
+ """Verifies the tar attributes. Raises InvalidTarballError.
+
+ Raises:
+ InvalidFsCopyError: If not all required parameters are set.
+ FsCopyError: If source file does not exist.
+ """
+ if not self._output_tarfile or not self._srcs or not self._key:
+ raise InvalidFsCopyError('Incomplete copy spec')
+ for (src, _, _) in self._srcs:
+ if not os.path.exists(src):
+ raise FsCopyError('%s does not exists' % src)
+
+ def _ShouldExclude(self, filename):
+ """"Checks if a file/directory are excluded from a copy.
+
+ Args:
+ filename: a file/directory path.
+
+ Returns:
+ True if a file/directory shouldn't be copied, False otherwise.
+ """
+ for spec in self._excludes:
+ if spec.ShouldExclude(filename):
+ logging.info('tarfile: Excluded %s', filename)
+ return True
+ return False
diff --git a/image-bundle/gcel.py b/image-bundle/gcel.py
new file mode 100755
index 0000000..1519d86
--- /dev/null
+++ b/image-bundle/gcel.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""GCE Linux specific platform info."""
+
+
+
+import csv
+import os
+
+import linux
+
+
+class Gcel(linux.LinuxPlatform):
+ """GCE Linux specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/lsb-release'
+ if os.path.exists(release_file):
+ (flavor, _, _, _) = Gcel.ParseLsbRelease(release_file)
+ if flavor and flavor.lower() == 'gcel':
+ return True
+ return False
+
+ @staticmethod
+ def ParseLsbRelease(release_file='/etc/lsb-release'):
+ """Parses the /etc/lsb-releases file.
+
+ Returns:
+ A 4-tuple containing id, release, codename, and description
+ """
+ release_info = {}
+ for line in csv.reader(open(release_file), delimiter='='):
+ if len(line) > 1:
+ release_info[line[0]] = line[1]
+ return (release_info.get('DISTRIB_ID', None),
+ release_info.get('DISTRIB_RELEASE', None),
+ release_info.get('DISTRIB_CODENAME', None),
+ release_info.get('DISTRIB_DESCRIPTION', None))
+
+ def __init__(self):
+ super(Gcel, self).__init__()
+ (self.distribution, self.distribution_version,
+ self.distribution_codename, _) = Gcel.ParseLsbRelease()
diff --git a/image-bundle/image_bundle.py b/image-bundle/image_bundle.py
new file mode 100755
index 0000000..89ccb18
--- /dev/null
+++ b/image-bundle/image_bundle.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Tool to bundle root filesystem to a tarball.
+
+Creates a tar bundle and a Manifest, which can be uploaded to image store.
+"""
+
+
+
+import logging
+from optparse import OptionParser
+import os
+import shutil
+import subprocess
+import tempfile
+
+import block_disk
+import exclude_spec
+import platform_factory
+import utils
+
+
+def SetupArgsParser():
+ """Sets up the command line flags."""
+ parser = OptionParser()
+ parser.add_option('-r', '--root', dest='root_directory',
+ default='/', metavar='ROOT',
+ help='Root of the file system to bundle.'
+ ' Recursively bundles all sub directories.')
+ parser.add_option('-e', '--excludes', dest='excludes',
+ help='Comma separated list of sub directories to exclude.'
+ ' The defaults are platform specific.')
+ parser.add_option('-o', '--output_directory', dest='output_directory',
+ default='/tmp/', metavar='DIR',
+ help='Output directory for image.')
+ parser.add_option('--output_file_name', dest='output_file_name',
+ default=None, metavar='FILENAME',
+ help=('Output filename for the image. Default is a digest'
+ ' of the image bytes.'))
+ parser.add_option('--include_mounts', dest='include_mounts',
+ help='Don\'t ignore mounted filesystems under ROOT.',
+ action='store_true', default=False)
+ parser.add_option('-v', '--version',
+ action='store_true', dest='display_version', default=False,
+ help='Print the tool version.')
+ parser.add_option('--loglevel', dest='log_level',
+ help='Debug logging level.', default='INFO',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR' 'CRITICAL'])
+ parser.add_option('--log_file', dest='log_file',
+ help='Output file for log messages.')
+ parser.add_option('-k', '--key', dest='key', default='nebula',
+ help='Public key used for signing the image.')
+ parser.add_option('--nocleanup', dest='cleanup',
+ action='store_false', default=True,
+ help=' Do not clean up temporary and log files.')
+ #TODO(user): Get dehumanize.
+ parser.add_option('--fssize', dest='fs_size', default=10*1024*1024*1024,
+ type='int', help='File system size in bytes')
+ parser.add_option('-b', '--bucket', dest='bucket',
+ help='Destination storage bucket')
+ return parser
+
+
+def VerifyArgs(parser, options):
+ """Verifies that commandline flags are consistent."""
+ if not options.output_directory:
+ parser.error('output bundle directory must be specified.')
+ if not os.path.exists(options.output_directory):
+ parser.error('output bundle directory does not exist.')
+ # TODO(user): add more verification as needed
+
+
+def EnsureSuperUser():
+ """Ensures that current user has super user privileges."""
+ if os.getuid() != 0:
+ print 'Tool must be run as root.'
+ exit(-1)
+
+
+def GetLogLevel(options):
+ """Log Level string to logging.LogLevel mapping."""
+ level = {
+ 'DEBUG': logging.DEBUG,
+ 'INFO': logging.INFO,
+ 'WARNING': logging.WARNING,
+ 'ERROR': logging.ERROR,
+ 'CRITICAL': logging.CRITICAL
+ }
+ if options.log_level in level:
+ return level[options.log_level]
+ print 'Invalid logging level. defaulting to INFO.'
+ return logging.INFO
+
+
+def SetupLogging(options, log_dir='/tmp'):
+ """Set up logging.
+
+ All messages above INFO level are also logged to console.
+
+ Args:
+ options: collection of command line options.
+ log_dir: directory used to generate log files.
+ """
+ if options.log_file:
+ logfile = options.log_file
+ else:
+ logfile = tempfile.mktemp(dir=log_dir, prefix='bundle_log_')
+ print 'Starting logging in %s' % logfile
+ logging.basicConfig(filename=logfile, level=GetLogLevel(options))
+ console = logging.StreamHandler()
+ console.setLevel(logging.INFO)
+ logging.getLogger().addHandler(console)
+
+
+def PrintVersionInfo():
+ #TODO(user): fix up the version string
+ print 'version 1.0'
+
+
+def main():
+ #EnsureSuperUser()
+ parser = SetupArgsParser()
+ (options, _) = parser.parse_args()
+ VerifyArgs(parser, options)
+ if options.display_version:
+ PrintVersionInfo()
+ return 0
+
+ scratch_dir = tempfile.mkdtemp(dir=options.output_directory)
+ SetupLogging(options, scratch_dir)
+ try:
+ guest_platform = platform_factory.PlatformFactory(
+ options.root_directory).GetPlatform()
+ except platform_factory.UnknownPlatformException:
+ print 'Could not determine host platform try -s option.'
+ return -1
+
+ temp_file_name = tempfile.mktemp(dir=scratch_dir, suffix='.tar.gz')
+
+ bundle = block_disk.RootFsRaw(options.fs_size)
+ bundle.SetTarfile(temp_file_name)
+ bundle.AddSource(options.root_directory)
+ bundle.SetKey(options.key)
+ bundle.SetScratchDirectory(scratch_dir)
+
+ # Merge platform specific exclude list, mounts points
+ # and user specified excludes
+ excludes = guest_platform.GetExcludeList()
+ if options.excludes:
+ excludes.extend([exclude_spec.ExcludeSpec(x) for x in
+ options.excludes.split(',')])
+ logging.info('exclude list: %s', ' '.join([x.GetSpec() for x in excludes]))
+ bundle.AppendExcludes(excludes)
+ if not options.include_mounts:
+ mount_points = utils.GetMounts(options.root_directory)
+ logging.info('ignoring mounts %s', ' '.join(mount_points))
+ bundle.AppendExcludes([exclude_spec.ExcludeSpec(x, preserve_dir=True) for x
+ in utils.GetMounts(options.root_directory)])
+ bundle.SetPlatform(guest_platform)
+
+ # Verify that bundle attributes are correct and create tar bundle.
+ bundle.Verify()
+ (fs_size, digest) = bundle.Bundleup()
+ if not digest:
+ logging.critical('Could not get digest for the bundle.'
+ ' The bundle may not be created correctly')
+ return -1
+ if fs_size > options.fs_size:
+ logging.critical('Size of tar %d exceeds the file system size %d.', fs_size,
+ options.fs_size)
+ return -1
+
+ if options.output_file_name:
+ output_file = os.path.join(
+ options.output_directory, options.output_file_name)
+ else:
+ output_file = os.path.join(
+ options.output_directory, '%s.image.tar.gz' % digest)
+
+ os.rename(temp_file_name, output_file)
+
+ if options.bucket:
+ bucket = options.bucket
+ if bucket.startswith('gs://'):
+ output_bucket = '%s/%s' % (
+ bucket, os.path.basename(output_file))
+ else:
+ output_bucket = 'gs://%s/%s' % (
+ bucket, os.path.basename(output_file))
+ # TODO: Consider using boto library directly.
+ cmd = ['gsutil', 'cp', output_file, output_bucket]
+ retcode = subprocess.call(cmd)
+ if retcode != 0:
+ logging.critical('Failed to copy image to bucket. '
+ 'gsutil returned %d. To retry, run the command: %s',
+ retcode, ' '.join(cmd))
+
+ return -1
+ logging.info('Uploaded image to %s', output_bucket)
+
+ # If we've uploaded, then we can remove the local file.
+ os.remove(output_file)
+
+ if options.cleanup:
+ shutil.rmtree(scratch_dir)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/image-bundle/image_bundle_test_base.py b/image-bundle/image_bundle_test_base.py
new file mode 100755
index 0000000..f614532
--- /dev/null
+++ b/image-bundle/image_bundle_test_base.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Base class for image_bundle unittests."""
+
+
+__pychecker__ = 'no-local' # for unittest
+
+
+import os
+import re
+import shutil
+import subprocess
+import tarfile
+import tempfile
+import unittest
+
+from os_platform import Platform
+
+
+class InvalidOverwriteFileException(Exception):
+ """Invalid overwrite target was passed to MockPlatform.Overwrite method."""
+
+
+class MockPlatform(Platform):
+ """Mock platform for image bundle unit tests."""
+ OVERWRITE_LIST = ['test1']
+
+ def __init__(self, tmp_root):
+ super(MockPlatform, self).__init__()
+ self.tmp_root = tmp_root
+
+ def Overwrite(self, filename, arcname, tmpdir):
+ temp = tempfile.mktemp(dir=tmpdir)
+ if arcname != 'test1':
+ raise InvalidOverwriteFileException(arcname)
+ fd = open(temp, 'w')
+ fd.write(open(filename).read())
+ fd.write('something extra.')
+ fd.close()
+ return temp
+
+
+class ImageBundleTest(unittest.TestCase):
+ """ImageBundle Unit Test Base Class."""
+
+ def setUp(self):
+ self.tmp_root = tempfile.mkdtemp(dir='/tmp')
+ self.tmp_path = tempfile.mkdtemp(dir=self.tmp_root)
+ self._SetupFilesystemToTar()
+
+ def tearDown(self):
+ self._CleanupFiles()
+
+ def _SetupFilesystemToTar(self):
+ """Creates some directory structure to tar."""
+ if os.path.exists(self.tmp_path):
+ shutil.rmtree(self.tmp_path)
+ os.makedirs(self.tmp_path)
+ with open(self.tmp_path + '/test1', 'w') as fd:
+ print >> fd, 'some text'
+ shutil.copyfile(self.tmp_path + '/test1', self.tmp_path + '/test2')
+ os.makedirs(self.tmp_path + '/dir1')
+ os.makedirs(self.tmp_path + '/dir1/dir11')
+ os.makedirs(self.tmp_path + '/dir2')
+ os.makedirs(self.tmp_path + '/dir2/dir1')
+ os.symlink(self.tmp_path + '/test1', self.tmp_path + '/dir1/sl1')
+ os.link(self.tmp_path + '/test2', self.tmp_path + '/dir1/hl2')
+ os.symlink(self.tmp_path + '/test2', self.tmp_path + '/dir2/sl2')
+ os.link(self.tmp_path + '/test1', self.tmp_path + '/dir2/hl1')
+
+ def _CleanupFiles(self):
+ """Removes the files under test directory."""
+ if os.path.exists(self.tmp_root):
+ shutil.rmtree(self.tmp_root)
+
+ def _VerifyTarHas(self, tar, expected):
+ p = subprocess.Popen(['tar -tf %s' % tar],
+ stdout=subprocess.PIPE, shell=True)
+ found = p.communicate()[0].split('\n')
+ if './' in found:
+ found.remove('./')
+ if '' in found:
+ found.remove('')
+ self._AssertListEqual(expected, found)
+
+ def _VerifyFileInTarEndsWith(self, tar, filename, text):
+ tf = tarfile.open(tar, 'r:gz')
+ fd = tf.extractfile(filename)
+ file_content = fd.read()
+ self.assertTrue(file_content.endswith(text))
+
+ def _AssertListEqual(self, list1, list2):
+ """Asserts that, when sorted, list1 and list2 are identical."""
+ sorted_list1 = [re.sub(r'/$', '', x) for x in list1]
+ sorted_list2 = [re.sub(r'/$', '', x) for x in list2]
+ sorted_list1.sort()
+ sorted_list2.sort()
+ self.assertEqual(sorted_list1, sorted_list2)
diff --git a/image-bundle/linux.py b/image-bundle/linux.py
new file mode 100755
index 0000000..94afb53
--- /dev/null
+++ b/image-bundle/linux.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Base class for Linux platform specific information."""
+
+
+
+import os
+import platform
+import stat
+import tempfile
+
+import exclude_spec
+import os_platform
+
+
+class LinuxPlatform(os_platform.Platform):
+ """Base class for all Linux flavors."""
+ EXCLUDE_LIST = [exclude_spec.ExcludeSpec('/tmp', preserve_dir=True),
+ exclude_spec.ExcludeSpec('/var/log', preserve_dir=True,
+ preserve_subdir=True),
+ exclude_spec.ExcludeSpec('/etc/ssh/.host_key_regenerated'),
+ exclude_spec.ExcludeSpec('/var/run', preserve_dir=True),
+ exclude_spec.ExcludeSpec('/var/lib/google/per-instance',
+ preserve_dir=True)]
+
+ def __init__(self):
+ """Populate the uname -a information."""
+ super(LinuxPlatform, self).__init__()
+ (self.name, self.hostname, self.release, self.version, self.architecture,
+ self.processor) = platform.uname()
+ (self.distribution, self.distribution_version,
+ self.distribution_codename) = platform.dist()
+
+ def GetPlatformDetails(self):
+ return ' '.join([self.name, self.hostname, self.release, self.version,
+ self.architecture, self.processor, self.distribution,
+ self.distribution_version, self.distribution_codename])
+
+ def GetName(self):
+ return self.GetOs()
+
+ def GetProcessor(self):
+ return platform.processor()
+
+ def GetArchitecture(self):
+ if self.architecture:
+ return self.architecture
+ return ''
+
+ def GetOs(self):
+ if self.distribution:
+ if self.distribution_codename:
+ return '%s (%s)' % (self.distribution, self.distribution_codename)
+ else:
+ return self.distribution
+ if self.name:
+ return self.name
+ return 'Linux'
+
+ def IsLinux(self):
+ return True
+
+ # Linux specific methods
+ def GetKernelVersion(self):
+ return self.release
+
+ # distribution specific methods
+ # if platforms module does not do a good job override these.
+ def GetDistribution(self):
+ return self.distribution
+
+ def GetDistributionCodeName(self):
+ return self.distribution_codename
+
+ def GetDistributionVersion(self):
+ return self.distribution_version
+
+ def GetPlatformSpecialFiles(self, tmpdir='/tmp'):
+ """Creates any platform specific special files."""
+ retval = []
+ console_dev = os.makedev(5, 1)
+ os.mknod(tmpdir + 'console', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR, console_dev)
+ retval.append((tmpdir + 'console', 'dev/console'))
+ null_dev = os.makedev(1, 3)
+ os.mknod(tmpdir + 'null', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH, null_dev)
+ retval.append((tmpdir + 'null', 'dev/null'))
+ tty_dev = os.makedev(5, 0)
+ os.mknod(tmpdir + 'tty', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH, tty_dev)
+ retval.append((tmpdir + 'tty', 'dev/tty'))
+ zero_dev = os.makedev(1, 5)
+ os.mknod(tmpdir + 'zero', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH, zero_dev)
+ retval.append((tmpdir + 'zero', 'dev/zero'))
+ return retval
+
+ def Overwrite(self, filename, arcname, tmpdir='/tmp'):
+ """Overwrites specified file if needed for the Linux platform."""
+ pass
diff --git a/image-bundle/os_platform.py b/image-bundle/os_platform.py
new file mode 100755
index 0000000..7dc815f
--- /dev/null
+++ b/image-bundle/os_platform.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Base class for platform specific information."""
+
+
+
+
+class Platform(object):
+ """Base class for platform information."""
+ EXCLUDE_LIST = []
+ OVERWRITE_LIST = []
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ return False
+
+ def __init__(self):
+ pass
+
+ def GetName(self):
+ """Generic name for the platform."""
+ return 'Unknown'
+
+ def GetProcessor(self):
+ """Real processor."""
+ return ''
+
+ def GetArchitecture(self):
+ """Returns machine architecture."""
+ return ''
+
+ def GetExcludeList(self):
+ """Returns the default exclude list of the platform."""
+ return self.__class__.EXCLUDE_LIST
+
+ def GetOs(self):
+ """Returns the name of OS."""
+ return 'Unknown'
+
+ def IsLinux(self):
+ return False
+
+ def IsWindows(self):
+ return False
+
+ def IsUnix(self):
+ return False
+
+ def GetOverwriteList(self):
+ """Returns list of platform specific files to overwrite."""
+ return self.__class__.OVERWRITE_LIST
+
+ def Overwrite(self, file_path, file_name, scratch_dir):
+ """Called for each file in the OverwriteList."""
+ return file_name
+
+ def GetPlatformSpecialFiles(self, tmpdir):
+ """returns a list of platform special files that should be created."""
+ return []
diff --git a/image-bundle/platform_factory.py b/image-bundle/platform_factory.py
new file mode 100755
index 0000000..637c82d
--- /dev/null
+++ b/image-bundle/platform_factory.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Factory that guesses the correct platform and creates it."""
+
+
+
+import centos
+import debian
+import gcel
+import logging
+import ubuntu
+
+
+class UnknownPlatformException(Exception):
+ """The platform could not be correctly determined."""
+
+
+class PlatformFactory(object):
+ """Guess the platform and create it."""
+
+ def __init__(self, root='/'):
+ self.__root = root
+ self.__registry = {}
+ self.__platform_registry = {}
+ self.Register('Ubuntu', ubuntu.Ubuntu)
+ self.Register('GCEL', gcel.Gcel)
+ self.Register('Centos', centos.Centos)
+ self.Register('Debian', debian.Debian)
+
+ def Register(self, name, klass):
+ self.__registry[name] = klass
+
+ def GetPlatform(self):
+ for name in self.__registry:
+ if self.__registry[name].IsThisPlatform(self.__root):
+ logging.info('found platform %s', name)
+ return self.__registry[name]()
+ else:
+ logging.debug('skipping platform %s %s ', name, self.__registry[name])
+ raise UnknownPlatformException('Could not determine host platform.')
diff --git a/image-bundle/ubuntu.py b/image-bundle/ubuntu.py
new file mode 100755
index 0000000..9a343f8
--- /dev/null
+++ b/image-bundle/ubuntu.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ubuntu specific platform info."""
+
+
+
+import csv
+import os
+import linux
+
+
+class Ubuntu(linux.LinuxPlatform):
+ """Ubuntu specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/lsb-release'
+ if os.path.exists(release_file):
+ (_, _, flavor, _) = Ubuntu.ParseLsbRelease(release_file)
+ if flavor and flavor.lower() == 'ubuntu':
+ return True
+ return False
+
+ @staticmethod
+ def ParseLsbRelease(release_file='/etc/lsb-release'):
+ """Parses the /etc/lsb-releases file."""
+ release_info = {}
+ for line in csv.reader(open(release_file), delimiter='='):
+ if len(line) > 1:
+ release_info[line[0]] = line[1]
+ if ('DISTRIB_CODENAME' not in release_info or
+ 'DISTRIB_DESCRIPTION' not in release_info or
+ 'DISTRIB_ID' not in release_info or
+ 'DISTRIB_RELEASE' not in release_info):
+ return (None, None, None, None)
+ return (release_info['DISTRIB_CODENAME'],
+ release_info['DISTRIB_DESCRIPTION'],
+ release_info['DISTRIB_ID'],
+ release_info['DISTRIB_RELEASE'])
+
+ def __init__(self):
+ super(Ubuntu, self).__init__()
+ (self.distribution_codename, _, self.distribution,
+ self.distribution_version) = Ubuntu.ParseLsbRelease()
diff --git a/image-bundle/utils.py b/image-bundle/utils.py
new file mode 100755
index 0000000..5c9def2
--- /dev/null
+++ b/image-bundle/utils.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Utilities for image bundling tool."""
+
+
+
+import logging
+import os
+import subprocess
+import time
+
+
+class MakePartitionTableException(Exception):
+ """Error occurred in parted during partition table creation."""
+
+
+class MakePartitionException(Exception):
+ """Error occurred in parted during partition creation."""
+
+
+class LoadDiskImageException(Exception):
+ """Error occurred in kpartx loading a raw image."""
+
+
+class MakeFileSystemException(Exception):
+ """Error occurred in file system creation."""
+
+
+class MountFileSystemException(Exception):
+ """Error occurred in file system mount."""
+
+
+class RsyncException(Exception):
+ """Error occurred in rsync execution."""
+
+
+class TarAndGzipFileException(Exception):
+ """Error occurred in tar\gzip execution."""
+
+
+class LoadDiskImage(object):
+ """Loads raw disk image using kpartx."""
+
+ def __init__(self, file_path):
+ """Initializes LoadDiskImage object.
+
+ Args:
+ file_path: a path to a file containing raw disk image.
+
+ Raises:
+ LoadDiskImageException: If kpartx encountered an error while load image.
+
+ Returns:
+ A list of devices for every partition found in an image.
+ """
+ self._file_path = file_path
+
+ def __enter__(self):
+ """Map disk image as a device."""
+ kpartx_cmd = ['kpartx', '-av', self._file_path]
+ p = subprocess.Popen(kpartx_cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ output = p.communicate()[0]
+ if p.returncode != 0:
+ raise LoadDiskImageException(output)
+ devs = []
+ for line in output.splitlines():
+ split_line = line.split()
+ if (len(split_line) > 2 and split_line[0] == 'add'
+ and split_line[1] == 'map'):
+ devs.append('/dev/mapper/' + split_line[2])
+ return devs
+
+ def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
+ """Unmap disk image as a device.
+
+ Args:
+ unused_exc_type: unused.
+ unused_exc_value: unused.
+ unused_exc_tb: unused.
+ """
+ kpartx_cmd = ['kpartx', '-d', self._file_path]
+ subprocess.call(kpartx_cmd)
+
+
+class MountFileSystem(object):
+ """Mounts a file system."""
+
+ def __init__(self, dev_path, dir_path):
+ """Initializes MountFileSystem object.
+
+ Args:
+ dev_path: A path to a device to mount.
+ dir_path: A path to a directory where a device is to be mounted.
+ """
+ self._dev_path = dev_path
+ self._dir_path = dir_path
+
+ def __enter__(self):
+ """Mounts a device.
+
+ Raises:
+ MakeFileSystemException: If a mount command encountered an error.
+ """
+ mount_cmd = ['mount', self._dev_path, self._dir_path]
+ retcode = subprocess.call(mount_cmd)
+ if retcode != 0:
+ raise MakeFileSystemException(self._dev_path)
+
+ def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
+ """Unmounts a file system.
+
+ Args:
+ unused_exc_type: unused.
+ unused_exc_value: unused.
+ unused_exc_tb: unused.
+ """
+ umount_cmd = ['umount', self._dir_path]
+ subprocess.call(umount_cmd)
+
+
+def GetMounts(root='/'):
+ """Find all mount points under the specified root.
+
+ Args:
+ root: a path to look for a mount points.
+
+ Returns:
+ A list of mount points.
+ """
+ mount_cmd = ['/bin/mount', '-l']
+ output = subprocess.Popen(mount_cmd, stdout=subprocess.PIPE).communicate()[0]
+ mounts = []
+ for line in output.splitlines():
+ split_line = line.split()
+ mount_point = split_line[2]
+ if mount_point == root:
+ continue
+ # We are simply ignoring the fs_type of fs for now. But we can use that
+ # later Just verify that these are actually mount points.
+ if os.path.ismount(mount_point) and mount_point.startswith(root):
+ mounts.append(mount_point)
+ return mounts
+
+
+def MakePartitionTable(file_path):
+ """Create a partition table in a file.
+
+ Args:
+ file_path: A path to a file where a partition table will be created.
+
+ Raises:
+ MakePartitionTableException: If parted encounters an error.
+ """
+ parted_cmd = ['parted', file_path, 'mklabel', 'msdos']
+ retcode = subprocess.call(parted_cmd)
+ if retcode != 0:
+ raise MakePartitionTableException(file_path)
+
+
+def MakePartition(file_path, partition_type, fs_type, start, end):
+ """Create a partition in a file.
+
+ Args:
+ file_path: A path to a file where a partition will be created.
+ partition_type: A type of a partition to be created. Tested option is msdos.
+ fs_type: A type of a file system to be created. For example, ext2, ext3,
+ etc.
+ start: Start offset of a partition in bytes.
+ end: End offset of a partition in bytes.
+
+ Raises:
+ MakePartitionException: If parted encounters an error.
+ """
+ parted_cmd = ['parted', file_path, 'mkpart', partition_type, fs_type,
+ str(start / (1024 * 1024)), str(end / (1024 * 1024))]
+ retcode = subprocess.call(parted_cmd)
+ if retcode != 0:
+ raise MakePartitionException(file_path)
+
+
+def MakeFileSystem(dev_path, fs_type):
+ """Create a file system in a device.
+
+ Args:
+ dev_path: A path to a device.
+ fs_type: A type of a file system to be created. For example ext2, ext3, etc.
+
+ Returns:
+ The uuid of the filesystem.
+
+ Raises:
+ MakeFileSystemException: If mkfs encounters an error.
+ """
+ p = subprocess.Popen(['uuidgen'], stdout=subprocess.PIPE)
+ if p.wait() != 0:
+ raise MakeFileSystemException(dev_path)
+ uuid = p.communicate()[0].strip()
+ if uuid is None:
+ raise MakeFileSystemException(dev_path)
+
+ mkfs_cmd = ['mkfs', '-t', fs_type, dev_path]
+ retcode = subprocess.call(mkfs_cmd)
+ if retcode != 0:
+ raise MakeFileSystemException(dev_path)
+
+ set_uuid_cmd = ['tune2fs', '-U', uuid, dev_path]
+ retcode = subprocess.call(set_uuid_cmd)
+ if retcode != 0:
+ raise MakeFileSystemException(dev_path)
+
+ return uuid
+
+
+def Rsync(src, dest, exclude_file, ignore_hard_links, recursive):
+ """Copy files from specified directory using rsync.
+
+ Args:
+ src: Source location to copy.
+ dest: Destination to copy files to.
+ exclude_file: A path to a file which contains a list of exclude from copy
+ filters.
+ ignore_hard_links: If True a hard links are copied as a separate files. If
+ False, hard link are recreated in dest.
+ recursive: Specifies if directories are copied recursively or not.
+
+ Raises:
+ RsyncException: If rsync encounters an error.
+ """
+ rsync_cmd = ['rsync', '--times', '--perms', '--owner', '--group', '--links',
+ '--devices', '--sparse']
+ if not ignore_hard_links:
+ rsync_cmd.append('--hard-links')
+ if recursive:
+ rsync_cmd.append('--recursive')
+ else:
+ rsync_cmd.append('--dirs')
+ if exclude_file:
+ rsync_cmd.append('--exclude-from=' + exclude_file)
+ rsync_cmd.extend([src, dest])
+
+ logging.debug('Calling: %s', repr(rsync_cmd))
+ if exclude_file:
+ logging.debug('Contents of exclude file %s:', exclude_file)
+ with open(exclude_file, 'rb') as excludes:
+ for line in excludes:
+ logging.debug(' %s', line.rstrip())
+
+ # TODO: It would be great to capture the stderr/stdout from this and
+ # put it in the log. We could then include verbose output.
+ retcode = subprocess.call(rsync_cmd)
+ if retcode != 0:
+ raise RsyncException(src)
+
+
+def TarAndGzipFile(src, dest):
+ """Pack file in tar archive and optionally gzip it.
+
+ Args:
+ src: A file to archive.
+ dest: An archive name. If a file ends with .gz or .tgz an archive is gzipped
+ as well.
+
+ Raises:
+ TarAndGzipFileException: If tar encounters an error.
+ """
+ if dest.endswith('.gz') or dest.endswith('.tgz'):
+ mode = 'czSf'
+ else:
+ mode = 'cSf'
+ tar_cmd = ['tar', mode, dest, '-C', os.path.dirname(src),
+ os.path.basename(src)]
+ retcode = subprocess.call(tar_cmd)
+ if retcode != 0:
+ raise TarAndGzipFileException(src)