summaryrefslogtreecommitdiff
path: root/gcimagebundle/gcimagebundlelib
diff options
context:
space:
mode:
Diffstat (limited to 'gcimagebundle/gcimagebundlelib')
-rw-r--r--gcimagebundle/gcimagebundlelib/__init__.py0
-rw-r--r--gcimagebundle/gcimagebundlelib/block_disk.py389
-rw-r--r--gcimagebundle/gcimagebundlelib/centos.py66
-rw-r--r--gcimagebundle/gcimagebundlelib/debian.py36
-rw-r--r--gcimagebundle/gcimagebundlelib/exclude_spec.py82
-rw-r--r--gcimagebundle/gcimagebundlelib/fedora.py56
-rw-r--r--gcimagebundle/gcimagebundlelib/fs_copy.py180
-rw-r--r--gcimagebundle/gcimagebundlelib/gcel.py57
-rwxr-xr-xgcimagebundle/gcimagebundlelib/imagebundle.py265
-rw-r--r--gcimagebundle/gcimagebundlelib/linux.py135
-rwxr-xr-xgcimagebundle/gcimagebundlelib/manifest.py79
-rw-r--r--gcimagebundle/gcimagebundlelib/opensuse.py29
-rw-r--r--gcimagebundle/gcimagebundlelib/os_platform.py70
-rw-r--r--gcimagebundle/gcimagebundlelib/platform_factory.py60
-rw-r--r--gcimagebundle/gcimagebundlelib/rhel.py42
-rw-r--r--gcimagebundle/gcimagebundlelib/sle.py34
-rw-r--r--gcimagebundle/gcimagebundlelib/suse.py91
-rw-r--r--gcimagebundle/gcimagebundlelib/tests/__init__.py16
-rwxr-xr-xgcimagebundle/gcimagebundlelib/tests/block_disk_test.py512
-rwxr-xr-xgcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py140
-rwxr-xr-xgcimagebundle/gcimagebundlelib/tests/utils_test.py49
-rw-r--r--gcimagebundle/gcimagebundlelib/ubuntu.py54
-rw-r--r--gcimagebundle/gcimagebundlelib/utils.py455
23 files changed, 2897 insertions, 0 deletions
diff --git a/gcimagebundle/gcimagebundlelib/__init__.py b/gcimagebundle/gcimagebundlelib/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/__init__.py
diff --git a/gcimagebundle/gcimagebundlelib/block_disk.py b/gcimagebundle/gcimagebundlelib/block_disk.py
new file mode 100644
index 0000000..a860b89
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/block_disk.py
@@ -0,0 +1,389 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module to create raw disk images.
+
+Stores a copy of directories/files in a file mounted as a partitioned blocked
+device.
+"""
+
+
+
+import hashlib
+import logging
+import os
+import re
+import tempfile
+
+from gcimagebundlelib import exclude_spec
+from gcimagebundlelib import fs_copy
+from gcimagebundlelib import utils
+
+
+class RawDiskError(Exception):
+ """Error occured during raw disk creation."""
+
+
+class InvalidRawDiskError(Exception):
+ """Error when verification fails before copying."""
+
+
+class FsRawDisk(fs_copy.FsCopy):
+ """Creates a raw disk copy of OS image and bundles it into gzipped tar."""
+
+ def __init__(self, fs_size, fs_type):
+ """Constructor for FsRawDisk class.
+
+ Args:
+ fs_size: Size of the raw disk.
+ """
+ super(FsRawDisk, self).__init__()
+ self._fs_size = fs_size
+ self._fs_type = fs_type
+
+ def _ResizeFile(self, file_path, file_size):
+ logging.debug('Resizing %s to %s', file_path, file_size)
+ with open(file_path, 'a') as disk_file:
+ disk_file.truncate(file_size)
+
+ def _InitializeDiskFileFromDevice(self, file_path):
+ """Initializes disk file from the device specified in self._disk.
+
+ It preserves whatever may be there on the device prior to the start of the
+ first partition.
+
+ At the moment this method supports devices with a single partition only.
+
+ Args:
+ file_path: The path where the disk file should be created.
+
+ Returns:
+ A tuple with partition_start, uuid. partition_start is the location
+ where the first partition on the disk starts and uuid is the filesystem
+ UUID to use for the first partition.
+
+ Raises:
+ RawDiskError: If there are more than one partition on the disk device.
+ """
+ # Find the disk size
+ disk_size = utils.GetDiskSize(self._disk)
+ logging.debug('Size of disk is %s', disk_size)
+ # Make the disk file big enough to hold the disk
+ self._ResizeFile(file_path, disk_size)
+ # Find the location where the first partition starts
+ partition_start = utils.GetPartitionStart(self._disk, 1)
+ logging.debug('First partition starts at %s', partition_start)
+ # Copy all the bytes as is from the start of the disk to the start of
+ # first partition
+ utils.CopyBytes(self._disk, file_path, partition_start)
+ # Verify there is only 1 partition on the disk
+ with utils.LoadDiskImage(file_path) as devices:
+ # For now we only support disks with a single partition.
+ if len(devices) == 0:
+ raise RawDiskError(
+ 'Device %s should be a disk not a partition.' % self._disk)
+ elif len(devices) != 1:
+ raise RawDiskError(
+ 'Device %s has more than 1 partition. Only devices '
+ 'with a single partition are supported.' % self._disk)
+ # Remove the first partition from the file we are creating. We will
+ # recreate a partition that will fit inside _fs_size later.
+ utils.RemovePartition(file_path, 1)
+ # Resize the disk.raw file down to self._fs_size
+ # We do this after removing the first partition to ensure that an
+ # existing partition doesn't fall outside the boundary of the disk device.
+ self._ResizeFile(file_path, self._fs_size)
+ # Get UUID of the first partition on the disk
+ # TODO(user): This is very hacky and relies on the disk path being
+ # similar to /dev/sda etc which is bad. Need to fix it.
+ uuid = utils.GetUUID(self._disk + '1')
+ return partition_start, uuid
+
+ def Bundleup(self):
+ """Creates a raw disk copy of OS image and bundles it into gzipped tar.
+
+ Returns:
+ A size of a generated raw disk and the SHA1 digest of the the tar archive.
+
+ Raises:
+ RawDiskError: If number of partitions in a created image doesn't match
+ expected count.
+ """
+
+ # Create sparse file with specified size
+ disk_file_path = os.path.join(self._scratch_dir, 'disk.raw')
+ with open(disk_file_path, 'wb') as _:
+ pass
+ self._excludes.append(exclude_spec.ExcludeSpec(disk_file_path))
+
+ logging.info('Initializing disk file')
+ partition_start = None
+ uuid = None
+ if self._disk:
+ # If a disk device has been provided then preserve whatever is there on
+ # the disk before the first partition in case there is an MBR present.
+ partition_start, uuid = self._InitializeDiskFileFromDevice(disk_file_path)
+ else:
+ # User didn't specify a disk device. Initialize a device with a simple
+ # partition table.
+ self._ResizeFile(disk_file_path, self._fs_size)
+ # User didn't specify a disk to copy. Create a new partition table
+ utils.MakePartitionTable(disk_file_path)
+ # Pass 1MB as start to avoid 'Warning: The resulting partition is not
+ # properly aligned for best performance.' from parted.
+ partition_start = 1024 * 1024
+
+ # Create a new partition starting at partition_start of size
+ # self._fs_size - partition_start
+ utils.MakePartition(disk_file_path, 'primary', 'ext2', partition_start,
+ self._fs_size - partition_start)
+ with utils.LoadDiskImage(disk_file_path) as devices:
+ # For now we only support disks with a single partition.
+ if len(devices) != 1:
+ raise RawDiskError(devices)
+ # List contents of /dev/mapper to help with debugging. Contents will
+ # be listed in debug log only
+ utils.RunCommand(['ls', '/dev/mapper'])
+ logging.info('Making filesystem')
+ uuid = utils.MakeFileSystem(devices[0], self._fs_type, uuid)
+ with utils.LoadDiskImage(disk_file_path) as devices:
+ if uuid is None:
+ raise Exception('Could not get uuid from MakeFileSystem')
+ mount_point = tempfile.mkdtemp(dir=self._scratch_dir)
+ with utils.MountFileSystem(devices[0], mount_point, self._fs_type):
+ logging.info('Copying contents')
+ self._CopySourceFiles(mount_point)
+ self._CopyPlatformSpecialFiles(mount_point)
+ self._ProcessOverwriteList(mount_point)
+ self._CleanupNetwork(mount_point)
+ self._UpdateFstab(mount_point, uuid)
+
+ tar_entries = []
+
+ manifest_file_path = os.path.join(self._scratch_dir, 'manifest.json')
+ manifest_created = self._manifest.CreateIfNeeded(manifest_file_path)
+ if manifest_created:
+ tar_entries.append(manifest_file_path)
+
+ tar_entries.append(disk_file_path)
+ logging.info('Creating tar.gz archive')
+ utils.TarAndGzipFile(tar_entries,
+ self._output_tarfile)
+ for tar_entry in tar_entries:
+ os.remove(tar_entry)
+
+ # TODO(user): It would be better to compute tar.gz file hash during
+ # archiving.
+ h = hashlib.sha1()
+ with open(self._output_tarfile, 'rb') as tar_file:
+ for chunk in iter(lambda: tar_file.read(8192), ''):
+ h.update(chunk)
+ return (self._fs_size, h.hexdigest())
+
+ def _CopySourceFiles(self, mount_point):
+ """Copies all source files/directories to a mounted raw disk.
+
+ There are several cases which must be handled separately:
+ 1. src=dir1 and dest is empty. In this case we simply copy the content of
+ dir1 to mount_point.
+ 2. src=dir1 and dest=dir2. In this case dir1 is copied to mount_point
+ under a new name dir2, so its content would be copied under
+ mount_point/dir2.
+ 3. src=file1/dir1 and dest=file2/dir2 and is_recursive=False. file1/dir1
+ is copied to mount_point/file2 or mount_point/dir2.
+
+ Args:
+ mount_point: A path to a mounted raw disk.
+ """
+ for (src, dest, is_recursive) in self._srcs:
+ # Generate a list of files/directories excluded from copying to raw disk.
+ # rsync expects them to be relative to src directory so we need to
+ # regenerate this list for every src separately.
+ with tempfile.NamedTemporaryFile(dir=self._scratch_dir) as rsync_file:
+ for spec in self._excludes:
+ rsync_file.write(spec.GetRsyncSpec(src))
+
+ # make sure that rsync utility sees all the content of rsync_file which
+ # otherwise can be buffered.
+ rsync_file.flush()
+ if is_recursive:
+ # if a directory ends with / rsync copies the content of a
+ # directory, otherwise it also copies the directory itself.
+ src = src.rstrip('/')
+ if not dest:
+ src += '/'
+ utils.Rsync(src, mount_point, rsync_file.name,
+ self._ignore_hard_links, recursive=True, xattrs=True)
+ if dest:
+ os.rename(os.path.join(mount_point, os.path.basename(src)),
+ os.path.join(mount_point, dest))
+ else:
+ utils.Rsync(src, os.path.join(mount_point, dest), rsync_file.name,
+ self._ignore_hard_links, recursive=False, xattrs=True)
+
+ def _CopyPlatformSpecialFiles(self, mount_point):
+ """Copies platform special files to a mounted raw disk.
+
+ Args:
+ mount_point: A path to a mounted raw disk.
+ """
+ if self._platform:
+ special_files = self._platform.GetPlatformSpecialFiles(self._scratch_dir)
+ for (src, dest) in special_files:
+ # Ensure we don't use extended attributes here, so that copying /selinux
+ # on Linux doesn't try and fail to preserve the SELinux context. That
+ # doesn't work and causes rsync to return a nonzero status code.
+ utils.Rsync(src, os.path.join(mount_point, dest), None,
+ self._ignore_hard_links, recursive=False, xattrs=False)
+
+ def _ProcessOverwriteList(self, mount_point):
+ """Overwrites a set of files/directories requested by platform.
+
+ Args:
+ mount_point: A path to a mounted raw disk.
+ """
+ for file_name in self._overwrite_list:
+ file_path = os.path.join(mount_point, file_name)
+ if os.path.exists(file_path):
+ if os.path.isdir(file_path):
+ # TODO(user): platform.Overwrite is expected to overwrite the
+ # directory in place from what I can tell. In case of a file it will
+ # create a new file which must be copied to mounted raw disk. So there
+ # some inconsistency which would need to be addresses if and when we
+ # encounter a platform which would want to overwrite a directory.
+ self._platform.Overwrite(file_path, file_name, self._scratch_dir)
+ logging.info('rawdisk: modifying directory %s', file_path)
+ else:
+ new_file = self._platform.Overwrite(file_path, file_name,
+ self._scratch_dir)
+ logging.info('rawdisk: modifying %s from %s', file_path, new_file)
+ utils.Rsync(new_file, file_path, None, self._ignore_hard_links,
+ recursive=False, xattrs=True)
+
+
+ def _CleanupNetwork(self, mount_point):
+ """Remove any record of our current MAC address."""
+ net_rules_path = os.path.join(
+ mount_point,
+ 'lib/udev/rules.d/75-persistent-net-generator.rules')
+ if os.path.exists(net_rules_path):
+ os.remove(net_rules_path)
+
+ def _UpdateFstab(self, mount_point, uuid):
+ """Update /etc/fstab with the new root fs UUID."""
+ fstab_path = os.path.join(mount_point, 'etc/fstab')
+ if not os.path.exists(fstab_path):
+ logging.warning('etc/fstab does not exist. Not updating fstab uuid')
+ return
+
+ f = open(fstab_path, 'r')
+ lines = f.readlines()
+ f.close()
+
+ def UpdateUUID(line):
+ """Replace the UUID on the entry for /."""
+ g = re.match(r'UUID=\S+\s+/\s+(.*)', line)
+ if not g:
+ return line
+ return 'UUID=%s / %s\n' % (uuid, g.group(1))
+
+ logging.debug('Original /etc/fstab contents:\n%s', lines)
+ updated_lines = map(UpdateUUID, lines)
+ if lines == updated_lines:
+ logging.debug('No changes required to /etc/fstab')
+ return
+ logging.debug('Updated /etc/fstab contents:\n%s', updated_lines)
+ f = open(fstab_path, 'w')
+ f.write(''.join(updated_lines))
+ f.close()
+
+
+class RootFsRaw(FsRawDisk):
+ """Block disk copy of the root file system.
+
+ Takes care of additional checks for a root file system.
+ """
+
+ def __init__(
+ self, fs_size, fs_type, skip_disk_space_check, statvfs = os.statvfs):
+ # statvfs parameter is for unit test to mock out os.statvfs call.
+ super(RootFsRaw, self).__init__(fs_size, fs_type)
+ self._skip_disk_space_check = skip_disk_space_check
+ self._statvfs = statvfs
+
+ def _Verify(self):
+ super(RootFsRaw, self)._Verify()
+ # exactly one file system to bundle up
+ if len(self._srcs) != 1:
+ raise InvalidRawDiskError('Root filesystems must have exactly one src.')
+ # check that destination field is empty.
+ if self._srcs[0][1]:
+ raise InvalidRawDiskError('Root filesystems must be copied as /')
+ if (not self._skip_disk_space_check and
+ self._srcs[0][0] == '/'):
+ self._VerifyDiskSpace()
+
+ def _VerifyDiskSpace(self):
+ """Verify that there is enough free disk space to generate the image file"""
+ # We use a very quick and simplistic check,
+ # DiskSpaceNeeded = disk.raw + image.tar.gz + LogFile
+ # disk.raw = PartitionTable + AllFilesCopied
+ # AllFilesCopied = RootDiskSize - RootDiskFreeSize - ExcludedFiles
+ # We ignore LogFile, PartitionTable, and ExcludedFiles.
+ # Some empirical experience showed that the compression ratio of the
+ # tar.gz file is about 1/3. To be conservative, we assume image.tar.gz is
+ # about 40% of disk.raw file.
+ # As a result, DiskSpaceNeeded=1.4*(RootDiskSize - RootDiskFreeSize)
+ # TODO(user): Make this check more accurate because ignoring ExcludedFiles
+ # can result in significant overestimation of disk
+ # space needed if the user has large disk space used in /tmp, for example.
+ root_fs = self._statvfs(self._srcs[0][0])
+ disk_space_needed = long(1.4 * root_fs.f_bsize * (root_fs.f_blocks -
+ root_fs.f_bfree))
+ logging.info(("Root disk on %s: f_bsize=%d f_blocks=%d f_bfree=%d. "
+ "Estimated space needed is %d (may be overestimated)."),
+ self._srcs[0][0],
+ root_fs.f_bsize,
+ root_fs.f_blocks,
+ root_fs.f_bfree,
+ disk_space_needed)
+
+ # self._scratch_dir is where we will put the disk.raw and *.tar.gz file.
+ scratch_fs = self._statvfs(self._scratch_dir)
+ free_space = scratch_fs.f_bsize * scratch_fs.f_bfree
+ logging.info("Free disk space for %s is %d bytes.",
+ self._scratch_dir,
+ free_space)
+
+ if disk_space_needed > free_space:
+ errorMessage = ("The operation may require up to %d bytes of disk space. "
+ "However, the free disk space for %s is %d bytes. Please consider "
+ "freeing more disk space. Note that the disk space required may "
+ "be overestimated because it does not exclude temporary files that "
+ "will not be copied. You may use --skip_disk_space_check to disable "
+ "this check.") % (disk_space_needed, self._scratch_dir, free_space)
+ raise InvalidRawDiskError(errorMessage)
+ if disk_space_needed > self._fs_size:
+ errorMessage = ("The root disk files to be copied may require up to %d "
+ "bytes. However, the limit on the image disk file is %d bytes. "
+ "Please consider deleting unused files from root disk, "
+ "or increasing the image disk file limit with --fssize option. "
+ "Note that the disk space required may "
+ "be overestimated because it does not exclude temporary files that "
+ "will not be copied. You may use --skip_disk_space_check to disable "
+ "this check.") % (disk_space_needed, self._fs_size)
+ raise InvalidRawDiskError(errorMessage)
+
+
+
diff --git a/gcimagebundle/gcimagebundlelib/centos.py b/gcimagebundle/gcimagebundlelib/centos.py
new file mode 100644
index 0000000..1a082de
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/centos.py
@@ -0,0 +1,66 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Centos specific platform info."""
+
+
+
+import os
+import platform
+import re
+
+from gcimagebundlelib import linux
+
+
+class Centos(linux.LinuxPlatform):
+ """Centos specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/redhat-release'
+ if os.path.exists(release_file):
+ (_, _, flavor, _) = Centos.ParseRedhatRelease(release_file)
+ if flavor and flavor.lower() == 'centos':
+ return True
+ return False
+
+ @staticmethod
+ def ParseRedhatRelease(release_file='/etc/redhat-release'):
+ """Parses the /etc/redhat-release file."""
+ f = open(release_file)
+ lines = f.readlines()
+ f.close()
+ if not lines:
+ return (None, None, None, None)
+ line0 = lines[0]
+ # Matches both CentOS 6 and CentOS 7 formats.
+ # CentOS 6: CentOS release 6.5 (Final)
+ # CentOS 7: CentOS Linux release 7.0.1406 (Core)
+ g = re.match(r'(\S+)( Linux)? release (\d+(\.\d+)+) \(([^)]*)\)', line0)
+ if not g:
+ return (None, None, None, None)
+ (osname, version, label) = (g.group(1), g.group(3), g.group(5))
+ return (osname, label, osname, version)
+
+ def __init__(self):
+ super(Centos, self).__init__()
+ (self.distribution_codename, _, self.distribution,
+ self.distribution_version) = Centos.ParseRedhatRelease()
+
+ def GetPreferredFilesystemType(self):
+ (_,version,_) = platform.linux_distribution()
+ if version.startswith('7'):
+ return 'xfs'
+ return 'ext4'
diff --git a/gcimagebundle/gcimagebundlelib/debian.py b/gcimagebundle/gcimagebundlelib/debian.py
new file mode 100644
index 0000000..957e3a7
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/debian.py
@@ -0,0 +1,36 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Debian Linux specific platform info."""
+
+
+
+import platform
+
+from gcimagebundlelib import linux
+
+
+class Debian(linux.LinuxPlatform):
+ """Debian Linux specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ (distribution, _, _) = platform.linux_distribution()
+ if distribution and distribution.lower() == 'debian':
+ return True
+ return False
+
+ def __init__(self):
+ super(Debian, self).__init__()
diff --git a/gcimagebundle/gcimagebundlelib/exclude_spec.py b/gcimagebundle/gcimagebundlelib/exclude_spec.py
new file mode 100644
index 0000000..b5bc237
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/exclude_spec.py
@@ -0,0 +1,82 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Exclude file specification."""
+
+
+
+import logging
+import os
+
+
+class ExcludeSpec(object):
+ """Specifies how exclusion of a path should be handled."""
+
+ def __init__(self, path, preserve_file=False, preserve_dir=False,
+ preserve_subdir=False):
+ self.path = path
+ self.preserve_dir = preserve_dir
+ self.preserve_file = False
+ self.preserve_subdir = False
+ # Preserve files and subdirs only if dir is preserved.
+ if preserve_file and preserve_dir:
+ self.preserve_file = True
+ if preserve_subdir and preserve_dir:
+ self.preserve_subdir = True
+
+ def ShouldExclude(self, filename):
+ prefix = os.path.commonprefix([filename, self.path])
+ if prefix == self.path:
+ if ((self.preserve_dir and filename == self.path) or
+ (self.preserve_subdir and os.path.isdir(filename)) or
+ (self.preserve_file and os.path.isfile(filename))):
+ logging.warning('preserving %s', filename)
+ return False
+ return True
+ return False
+
+ def GetSpec(self):
+ return '(%s, %d:%d:%d)' % (self.path, self.preserve_file, self.preserve_dir,
+ self.preserve_subdir)
+
+ def GetRsyncSpec(self, src):
+ """Returns exclude spec in a format required by rsync.
+
+ Args:
+ src: source directory path passed to rsync. rsync expects exclude-spec to
+ be relative to src directory.
+
+ Returns:
+ A string of exclude filters in rsync exclude-from file format.
+ """
+ spec = ''
+ prefix = os.path.commonprefix([src, self.path])
+ if prefix == src:
+ relative_path = os.path.join('/', self.path[len(prefix):])
+ if self.preserve_dir:
+ spec += '+ %s\n' % relative_path
+ if self.preserve_file or self.preserve_subdir:
+ if os.path.isdir(self.path):
+ for f in os.listdir(self.path):
+ file_path = os.path.join(self.path, f)
+ relative_file_path = os.path.join(relative_path, f)
+ if self.preserve_file and os.path.isfile(file_path):
+ spec += '+ %s\n' % relative_file_path
+ if self.preserve_subdir and os.path.isdir(file_path):
+ spec += '+ %s\n' % relative_file_path
+ else:
+ spec += '- %s\n' % relative_path
+ spec += '- %s\n' % os.path.join(relative_path, '**')
+ return spec
diff --git a/gcimagebundle/gcimagebundlelib/fedora.py b/gcimagebundle/gcimagebundlelib/fedora.py
new file mode 100644
index 0000000..21d098b
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/fedora.py
@@ -0,0 +1,56 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Fedora specific platform info."""
+
+
+
+import os
+import re
+
+from gcimagebundlelib import linux
+
+
+class Fedora(linux.LinuxPlatform):
+ """Fedora specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/redhat-release'
+ if os.path.exists(release_file):
+ (_, _, flavor, _) = Fedora.ParseRedhatRelease(release_file)
+ if flavor and flavor.lower() == 'fedora':
+ return True
+ return False
+
+ @staticmethod
+ def ParseRedhatRelease(release_file='/etc/redhat-release'):
+ """Parses the /etc/redhat-release file."""
+ f = open(release_file)
+ lines = f.readlines()
+ f.close()
+ if not lines:
+ return (None, None, None, None)
+ line0 = lines[0]
+ g = re.match(r'(\S+) release (\d+) \(([^)]*)\)', line0)
+ if not g:
+ return (None, None, None, None)
+ (osname, version, label) = (g.group(1), g.group(2), g.group(3))
+ return (osname, label, osname, version)
+
+ def __init__(self):
+ super(Fedora, self).__init__()
+ (self.distribution_codename, _, self.distribution,
+ self.distribution_version) = Fedora.ParseRedhatRelease()
diff --git a/gcimagebundle/gcimagebundlelib/fs_copy.py b/gcimagebundle/gcimagebundlelib/fs_copy.py
new file mode 100644
index 0000000..e9adc91
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/fs_copy.py
@@ -0,0 +1,180 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Creates a copy of specified directories\files."""
+
+
+
+import logging
+import os
+import re
+
+from gcimagebundlelib import manifest
+from gcimagebundlelib import utils
+
+
+class FsCopyError(Exception):
+ """Error occured in fs copy operation."""
+
+
+class InvalidFsCopyError(Exception):
+ """Error when verification fails before fs copying."""
+
+
+class FsCopy(object):
+ """Specifies which files/directories must be copied."""
+
+ def __init__(self):
+ # Populate the required parameters with None so we can verify.
+ self._output_tarfile = None
+ self._srcs = []
+ self._excludes = []
+ self._key = None
+ self._recursive = True
+ self._fs_size = 0
+ self._ignore_hard_links = False
+ self._platform = None
+ self._overwrite_list = []
+ self._scratch_dir = '/tmp'
+ self._disk = None
+ self._manifest = manifest.ImageManifest(is_gce_instance=utils.IsRunningOnGCE())
+
+ def SetTarfile(self, tar_file):
+ """Sets tar file which will contain file system copy.
+
+ Args:
+ tar_file: path to a tar file.
+ """
+ self._output_tarfile = tar_file
+
+ def AddDisk(self, disk):
+ """Adds the disk which should be bundled.
+
+ Args:
+ disk: The block disk that needs to be bundled.
+ """
+ self._disk = disk
+
+ def AddSource(self, src, arcname='', recursive=True):
+ """Adds a source to be copied to the tar file.
+
+ Args:
+ src: path to directory/file to be copied.
+ arcname: name of src in the tar archive. If arcname is empty, then instead
+ of copying src itself only its content is copied.
+ recursive: specifies if src directory should be copied recursively.
+
+ Raises:
+ ValueError: If src path doesn't exist.
+ """
+ if not os.path.exists(src):
+ raise ValueError('invalid path')
+ # Note that there is a fundamental asymmetry here as
+ # abspath('/') => '/' while abspath('/usr/') => '/usr'.
+ # This creates some subtleties elsewhere in the code.
+ self._srcs.append((os.path.abspath(src), arcname, recursive))
+
+ def AppendExcludes(self, excludes):
+ """Adds a file/directory to be excluded from file copy.
+
+ Args:
+ excludes: A list of ExcludeSpec objects.
+ """
+ self._excludes.extend(excludes)
+
+ def SetKey(self, key):
+ """Sets a key to use to sign the archive digest.
+
+ Args:
+ key: key to use to sign the archive digest.
+ """
+ # The key is ignored for now.
+ # TODO(user): sign the digest with the key
+ self._key = key
+
+ def SetPlatform(self, platform):
+ """Sets the OS platform which is used to create an image.
+
+ Args:
+ platform: OS platform specific settings.
+ """
+ self._platform = platform
+ logging.warning('overwrite list = %s',
+ ' '.join(platform.GetOverwriteList()))
+ self._overwrite_list = [re.sub('^/', '', x)
+ for x in platform.GetOverwriteList()]
+
+ def _SetManifest(self, image_manifest):
+ """For test only, allows to set a test manifest object."""
+ self._manifest = image_manifest
+
+ def SetScratchDirectory(self, directory):
+ """Sets a directory used for storing intermediate results.
+
+ Args:
+ directory: scratch directory path.
+ """
+ self._scratch_dir = directory
+
+ def IgnoreHardLinks(self):
+ """Requests that hard links should not be copied as hard links."""
+
+ # TODO(user): I don't see a reason for this option to exist. Currently
+ # there is a difference in how this option is interpreted between FsTarball
+ # and FsRawDisk. FsTarball only copies one hard link to an inode and ignores
+ # the rest of them. FsRawDisk copies the content of a file that hard link is
+ # pointing to instead of recreating a hard link. Either option seems useless
+ # for creating a copy of a file system.
+ self._ignore_hard_links = True
+
+ def Verify(self):
+ """Verify if we have all the components to build a tar."""
+ self._Verify()
+
+ def Bundleup(self):
+ """Creates the tar image based on set parameters.
+
+ Returns:
+ the SHA1 digest of the the tar archive.
+ """
+ return (0, None)
+
+ def _Verify(self):
+ """Verifies the tar attributes. Raises InvalidTarballError.
+
+ Raises:
+ InvalidFsCopyError: If not all required parameters are set.
+ FsCopyError: If source file does not exist.
+ """
+ if not self._output_tarfile or not self._srcs or not self._key:
+ raise InvalidFsCopyError('Incomplete copy spec')
+ for (src, _, _) in self._srcs:
+ if not os.path.exists(src):
+ raise FsCopyError('%s does not exists' % src)
+
+ def _ShouldExclude(self, filename):
+ """"Checks if a file/directory are excluded from a copy.
+
+ Args:
+ filename: a file/directory path.
+
+ Returns:
+ True if a file/directory shouldn't be copied, False otherwise.
+ """
+ for spec in self._excludes:
+ if spec.ShouldExclude(filename):
+ logging.info('tarfile: Excluded %s', filename)
+ return True
+ return False
diff --git a/gcimagebundle/gcimagebundlelib/gcel.py b/gcimagebundle/gcimagebundlelib/gcel.py
new file mode 100644
index 0000000..2622cf7
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/gcel.py
@@ -0,0 +1,57 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""GCE Linux specific platform info."""
+
+
+
+import csv
+import os
+
+from gcimagebundlelib import linux
+
+
+class Gcel(linux.LinuxPlatform):
+ """GCE Linux specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/lsb-release'
+ if os.path.exists(release_file):
+ (flavor, _, _, _) = Gcel.ParseLsbRelease(release_file)
+ if flavor and flavor.lower() == 'gcel':
+ return True
+ return False
+
+ @staticmethod
+ def ParseLsbRelease(release_file='/etc/lsb-release'):
+ """Parses the /etc/lsb-releases file.
+
+ Returns:
+ A 4-tuple containing id, release, codename, and description
+ """
+ release_info = {}
+ for line in csv.reader(open(release_file), delimiter='='):
+ if len(line) > 1:
+ release_info[line[0]] = line[1]
+ return (release_info.get('DISTRIB_ID', None),
+ release_info.get('DISTRIB_RELEASE', None),
+ release_info.get('DISTRIB_CODENAME', None),
+ release_info.get('DISTRIB_DESCRIPTION', None))
+
+ def __init__(self):
+ super(Gcel, self).__init__()
+ (self.distribution, self.distribution_version,
+ self.distribution_codename, _) = Gcel.ParseLsbRelease()
diff --git a/gcimagebundle/gcimagebundlelib/imagebundle.py b/gcimagebundle/gcimagebundlelib/imagebundle.py
new file mode 100755
index 0000000..f275c3c
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/imagebundle.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Tool to bundle root filesystem to a tarball.
+
+Creates a tar bundle and a Manifest, which can be uploaded to image store.
+"""
+
+
+
+import logging
+from optparse import OptionParser
+import os
+import shutil
+import subprocess
+import tempfile
+import time
+
+from gcimagebundlelib import block_disk
+from gcimagebundlelib import exclude_spec
+from gcimagebundlelib import platform_factory
+from gcimagebundlelib import utils
+
+def SetupArgsParser():
+ """Sets up the command line flags."""
+ parser = OptionParser()
+ parser.add_option('-d', '--disk', dest='disk',
+ default='/dev/sda',
+ help='Disk to bundle.')
+ parser.add_option('-r', '--root', dest='root_directory',
+ default='/', metavar='ROOT',
+ help='Root of the file system to bundle.'
+ ' Recursively bundles all sub directories.')
+ parser.add_option('-e', '--excludes', dest='excludes',
+ help='Comma separated list of sub directories to exclude.'
+ ' The defaults are platform specific.')
+ parser.add_option('-o', '--output_directory', dest='output_directory',
+ default='/tmp/', metavar='DIR',
+ help='Output directory for image.')
+ parser.add_option('--output_file_name', dest='output_file_name',
+ default=None, metavar='FILENAME',
+ help=('Output filename for the image. Default is a digest'
+ ' of the image bytes.'))
+ parser.add_option('--include_mounts', dest='include_mounts',
+ help='Don\'t ignore mounted filesystems under ROOT.',
+ action='store_true', default=False)
+ parser.add_option('-v', '--version',
+ action='store_true', dest='display_version', default=False,
+ help='Print the tool version.')
+ parser.add_option('--loglevel', dest='log_level',
+ help='Debug logging level.', default='INFO',
+ choices=['DEBUG', 'INFO', 'WARNING', 'ERROR' 'CRITICAL'])
+ parser.add_option('--log_file', dest='log_file',
+ help='Output file for log messages.')
+ parser.add_option('-k', '--key', dest='key', default='nebula',
+ help='Public key used for signing the image.')
+ parser.add_option('--nocleanup', dest='cleanup',
+ action='store_false', default=True,
+ help=' Do not clean up temporary and log files.')
+ #TODO(user): Get dehumanize.
+ parser.add_option('--fssize', dest='fs_size', default=10*1024*1024*1024,
+ type='int', help='File system size in bytes')
+ parser.add_option('-b', '--bucket', dest='bucket',
+ help='Destination storage bucket')
+ parser.add_option('-f', '--filesystem', dest='file_system',
+ default=None,
+ help='File system type for the image.')
+ parser.add_option('--skip_disk_space_check', dest='skip_disk_space_check',
+ default=False, action='store_true',
+ help='Skip the disk space requirement check.')
+
+ return parser
+
+
+def VerifyArgs(parser, options):
+ """Verifies that commandline flags are consistent."""
+ if not options.output_directory:
+ parser.error('output bundle directory must be specified.')
+ if not os.path.exists(options.output_directory):
+ parser.error('output bundle directory does not exist.')
+
+ # TODO(user): add more verification as needed
+
+def EnsureSuperUser():
+ """Ensures that current user has super user privileges."""
+ if os.getuid() != 0:
+ logging.warning('Tool must be run as root.')
+ exit(-1)
+
+
+def GetLogLevel(options):
+ """Log Level string to logging.LogLevel mapping."""
+ level = {
+ 'DEBUG': logging.DEBUG,
+ 'INFO': logging.INFO,
+ 'WARNING': logging.WARNING,
+ 'ERROR': logging.ERROR,
+ 'CRITICAL': logging.CRITICAL
+ }
+ if options.log_level in level:
+ return level[options.log_level]
+ print 'Invalid logging level. defaulting to INFO.'
+ return logging.INFO
+
+
+def SetupLogging(options, log_dir='/tmp'):
+ """Set up logging.
+
+ All messages above INFO level are also logged to console.
+
+ Args:
+ options: collection of command line options.
+ log_dir: directory used to generate log files.
+ """
+ if options.log_file:
+ logfile = options.log_file
+ else:
+ logfile = tempfile.mktemp(dir=log_dir, prefix='bundle_log_')
+ print 'Starting logging in %s' % logfile
+ logging.basicConfig(filename=logfile,
+ level=GetLogLevel(options),
+ format='%(asctime)s %(levelname)s:%(name)s:%(message)s')
+ # Use GMT timestamp in logging.
+ logging.Formatter.converter=time.gmtime
+ console = logging.StreamHandler()
+ console.setLevel(GetLogLevel(options))
+ logging.getLogger().addHandler(console)
+
+
+def PrintVersionInfo():
+ #TODO: Should read from the VERSION file instead.
+ print 'version 1.3.1'
+
+
+def GetTargetFilesystem(options, guest_platform):
+ if options.file_system:
+ return options.file_system
+ else:
+ return guest_platform.GetPreferredFilesystemType()
+
+
+def main():
+ parser = SetupArgsParser()
+ (options, _) = parser.parse_args()
+ if options.display_version:
+ PrintVersionInfo()
+ return 0
+ EnsureSuperUser()
+ VerifyArgs(parser, options)
+
+ scratch_dir = tempfile.mkdtemp(dir=options.output_directory)
+ SetupLogging(options, scratch_dir)
+ logging.warn('============================================================\n'
+ 'Warning: gcimagebundle is deprecated. See\n'
+ 'https://cloud.google.com/compute/docs/creating-custom-image'
+ '#export_an_image_to_google_cloud_storage\n'
+ 'for updated instructions.\n'
+ '============================================================')
+ try:
+ guest_platform = platform_factory.PlatformFactory(
+ options.root_directory).GetPlatform()
+ except platform_factory.UnknownPlatformException:
+ logging.critical('Platform is not supported.'
+ ' Platform rules can be added to platform_factory.py.')
+ return -1
+
+ temp_file_name = tempfile.mktemp(dir=scratch_dir, suffix='.tar.gz')
+
+ file_system = GetTargetFilesystem(options, guest_platform)
+ logging.info('File System: %s', file_system)
+ logging.info('Disk Size: %s bytes', options.fs_size)
+ bundle = block_disk.RootFsRaw(
+ options.fs_size, file_system, options.skip_disk_space_check)
+ bundle.SetTarfile(temp_file_name)
+ if options.disk:
+ readlink_command = ['readlink', '-f', options.disk]
+ final_path = utils.RunCommand(readlink_command).strip()
+ logging.info('Resolved %s to %s', options.disk, final_path)
+ bundle.AddDisk(final_path)
+ # TODO(user): Find the location where the first partition of the disk
+ # is mounted and add it as the source instead of relying on the source
+ # param flag
+ bundle.AddSource(options.root_directory)
+ bundle.SetKey(options.key)
+ bundle.SetScratchDirectory(scratch_dir)
+
+ # Merge platform specific exclude list, mounts points
+ # and user specified excludes
+ excludes = guest_platform.GetExcludeList()
+ if options.excludes:
+ excludes.extend([exclude_spec.ExcludeSpec(x) for x in
+ options.excludes.split(',')])
+ logging.info('exclude list: %s', ' '.join([x.GetSpec() for x in excludes]))
+ bundle.AppendExcludes(excludes)
+ if not options.include_mounts:
+ mount_points = utils.GetMounts(options.root_directory)
+ logging.info('ignoring mounts %s', ' '.join(mount_points))
+ bundle.AppendExcludes([exclude_spec.ExcludeSpec(x, preserve_dir=True) for x
+ in utils.GetMounts(options.root_directory)])
+ bundle.SetPlatform(guest_platform)
+
+ # Verify that bundle attributes are correct and create tar bundle.
+ bundle.Verify()
+ (fs_size, digest) = bundle.Bundleup()
+ if not digest:
+ logging.critical('Could not get digest for the bundle.'
+ ' The bundle may not be created correctly')
+ return -1
+ if fs_size > options.fs_size:
+ logging.critical('Size of tar %d exceeds the file system size %d.', fs_size,
+ options.fs_size)
+ return -1
+
+ if options.output_file_name:
+ output_file = os.path.join(
+ options.output_directory, options.output_file_name)
+ else:
+ output_file = os.path.join(
+ options.output_directory, '%s.image.tar.gz' % digest)
+
+ os.rename(temp_file_name, output_file)
+ logging.info('Created tar.gz file at %s' % output_file)
+
+ if options.bucket:
+ bucket = options.bucket
+ if bucket.startswith('gs://'):
+ output_bucket = '%s/%s' % (
+ bucket, os.path.basename(output_file))
+ else:
+ output_bucket = 'gs://%s/%s' % (
+ bucket, os.path.basename(output_file))
+
+ # /usr/local/bin not in redhat root PATH by default
+ if '/usr/local/bin' not in os.environ['PATH']:
+ os.environ['PATH'] += ':/usr/local/bin'
+
+ # TODO: Consider using boto library directly.
+ cmd = ['gsutil', 'cp', output_file, output_bucket]
+ retcode = subprocess.call(cmd)
+ if retcode != 0:
+ logging.critical('Failed to copy image to bucket. '
+ 'gsutil returned %d. To retry, run the command: %s',
+ retcode, ' '.join(cmd))
+
+ return -1
+ logging.info('Uploaded image to %s', output_bucket)
+
+ # If we've uploaded, then we can remove the local file.
+ os.remove(output_file)
+
+ if options.cleanup:
+ shutil.rmtree(scratch_dir)
diff --git a/gcimagebundle/gcimagebundlelib/linux.py b/gcimagebundle/gcimagebundlelib/linux.py
new file mode 100644
index 0000000..ff8c1d4
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/linux.py
@@ -0,0 +1,135 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Base class for Linux platform specific information."""
+
+
+
+import os
+import platform
+import stat
+
+from gcimagebundlelib import exclude_spec
+from gcimagebundlelib import os_platform
+
+
+class LinuxPlatform(os_platform.Platform):
+ """Base class for all Linux flavors."""
+ EXCLUDE_LIST = [
+ exclude_spec.ExcludeSpec('/etc/ssh/.host_key_regenerated'),
+ exclude_spec.ExcludeSpec('/dev', preserve_dir=True),
+ exclude_spec.ExcludeSpec('/proc', preserve_dir=True),
+ exclude_spec.ExcludeSpec('/run',
+ preserve_dir=True, preserve_subdir=True),
+ exclude_spec.ExcludeSpec('/selinux'),
+ exclude_spec.ExcludeSpec('/tmp', preserve_dir=True),
+ exclude_spec.ExcludeSpec('/sys', preserve_dir=True),
+ exclude_spec.ExcludeSpec('/var/lib/google/per-instance',
+ preserve_dir=True),
+ exclude_spec.ExcludeSpec('/var/lock',
+ preserve_dir=True, preserve_subdir=True),
+ exclude_spec.ExcludeSpec('/var/log',
+ preserve_dir=True, preserve_subdir=True),
+ exclude_spec.ExcludeSpec('/var/run',
+ preserve_dir=True, preserve_subdir=True)]
+
+ def __init__(self):
+ """Populate the uname -a information."""
+ super(LinuxPlatform, self).__init__()
+ (self.name, self.hostname, self.release, self.version, self.architecture,
+ self.processor) = platform.uname()
+ (self.distribution, self.distribution_version,
+ self.distribution_codename) = platform.dist()
+
+ def GetPlatformDetails(self):
+ return ' '.join([self.name, self.hostname, self.release, self.version,
+ self.architecture, self.processor, self.distribution,
+ self.distribution_version, self.distribution_codename])
+
+ def GetName(self):
+ return self.GetOs()
+
+ def GetProcessor(self):
+ return platform.processor()
+
+ def GetArchitecture(self):
+ if self.architecture:
+ return self.architecture
+ return ''
+
+ def GetOs(self):
+ if self.distribution:
+ if self.distribution_codename:
+ return '%s (%s)' % (self.distribution, self.distribution_codename)
+ else:
+ return self.distribution
+ if self.name:
+ return self.name
+ return 'Linux'
+
+ def IsLinux(self):
+ return True
+
+ # Linux specific methods
+ def GetKernelVersion(self):
+ return self.release
+
+ # distribution specific methods
+ # if platforms module does not do a good job override these.
+ def GetDistribution(self):
+ return self.distribution
+
+ def GetDistributionCodeName(self):
+ return self.distribution_codename
+
+ def GetDistributionVersion(self):
+ return self.distribution_version
+
+ def GetPlatformSpecialFiles(self, tmpdir='/tmp'):
+ """Creates any platform specific special files."""
+ retval = []
+ console_dev = os.makedev(5, 1)
+ os.mknod(tmpdir + 'console', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR, console_dev)
+ retval.append((tmpdir + 'console', 'dev/console'))
+ null_dev = os.makedev(1, 3)
+ os.mknod(tmpdir + 'null', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH, null_dev)
+ retval.append((tmpdir + 'null', 'dev/null'))
+ tty_dev = os.makedev(5, 0)
+ os.mknod(tmpdir + 'tty', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH, tty_dev)
+ retval.append((tmpdir + 'tty', 'dev/tty'))
+ zero_dev = os.makedev(1, 5)
+ os.mknod(tmpdir + 'zero', stat.S_IFCHR |
+ stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH, zero_dev)
+ retval.append((tmpdir + 'zero', 'dev/zero'))
+ # /selinux is deprecated in favor of /sys/fs/selinux, but preserve it on
+ # those OSes where it's present.
+ if os.path.isdir('/selinux'):
+ os.mkdir(tmpdir + 'selinux', 0755)
+ retval.append((tmpdir + 'selinux', 'selinux'))
+ return retval
+
+ def Overwrite(self, filename, arcname, tmpdir='/tmp'):
+ """Overwrites specified file if needed for the Linux platform."""
+ pass
+
+ def GetPreferredFilesystemType(self):
+ """Return the optimal filesystem supported for the platform."""
+ return 'ext4'
diff --git a/gcimagebundle/gcimagebundlelib/manifest.py b/gcimagebundle/gcimagebundlelib/manifest.py
new file mode 100755
index 0000000..2e83d9e
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/manifest.py
@@ -0,0 +1,79 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Image manifest."""
+
+
+import json
+from gcimagebundlelib import utils
+
+
+class ImageManifest(object):
+ """Retrieves metadata from the instance and stores it in manifest.json.
+
+ The image manifest is a JSON file that is bundled along side the disk.
+
+ Included Metadata
+ - Licenses
+ """
+
+ def __init__(self, http=utils.Http(), is_gce_instance=True):
+ self._http = http
+ self._licenses = []
+ self._is_gce_instance = is_gce_instance
+
+ def CreateIfNeeded(self, file_path):
+ """Creates the manifest file to the specified path if it's needed.
+
+ Args:
+ file_path: Location of where the manifest should be written to.
+
+ Returns:
+ True Manifest was written to file_path.
+ False Manifest was not created.
+ """
+ if self._is_gce_instance:
+ self._LoadLicenses()
+ if self._IsManifestNeeded():
+ with open(file_path, 'w') as manifest_file:
+ self._WriteToFile(manifest_file)
+ return True
+ return False
+
+ def _LoadLicenses(self):
+ """Loads the licenses from the metadata server if they exist."""
+ response = self._http.GetMetadata('instance/', recursive=True)
+ instance_metadata = json.loads(response)
+ if 'licenses' in instance_metadata:
+ for license_obj in instance_metadata['licenses']:
+ self._licenses.append(license_obj['id'])
+
+ def _ToJson(self):
+ """Formats the image metadata as a JSON object."""
+ return json.dumps(
+ {
+ 'licenses': self._licenses
+ })
+
+ def _IsManifestNeeded(self):
+ """Determines if a manifest should be bundled with the disk."""
+ if self._licenses:
+ return len(self._licenses)
+ return False
+
+ def _WriteToFile(self, file_obj):
+ """Writes the manifest data to the file handle."""
+ manifest_json = self._ToJson()
+ file_obj.write(manifest_json)
diff --git a/gcimagebundle/gcimagebundlelib/opensuse.py b/gcimagebundle/gcimagebundlelib/opensuse.py
new file mode 100644
index 0000000..9f709ff
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/opensuse.py
@@ -0,0 +1,29 @@
+# Copyright 2013 SUSE LLC All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""openSUSE platform info."""
+
+from gcimagebundlelib import suse
+
+class OpenSUSE(suse.SUSE):
+ """openSUSE platform info."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ return 'openSUSE' == suse.SUSE().distribution
+
+ def __init__(self):
+ super(OpenSUSE, self).__init__()
+
diff --git a/gcimagebundle/gcimagebundlelib/os_platform.py b/gcimagebundle/gcimagebundlelib/os_platform.py
new file mode 100644
index 0000000..65e6e7c
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/os_platform.py
@@ -0,0 +1,70 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Base class for platform specific information."""
+
+
+class Platform(object):
+ """Base class for platform information."""
+ EXCLUDE_LIST = []
+ OVERWRITE_LIST = []
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ return False
+
+ def __init__(self):
+ pass
+
+ def GetName(self):
+ """Generic name for the platform."""
+ return 'Unknown'
+
+ def GetProcessor(self):
+ """Real processor."""
+ return ''
+
+ def GetArchitecture(self):
+ """Returns machine architecture."""
+ return ''
+
+ def GetExcludeList(self):
+ """Returns the default exclude list of the platform."""
+ return self.__class__.EXCLUDE_LIST
+
+ def GetOs(self):
+ """Returns the name of OS."""
+ return 'Unknown'
+
+ def IsLinux(self):
+ return False
+
+ def IsWindows(self):
+ return False
+
+ def IsUnix(self):
+ return False
+
+ def GetOverwriteList(self):
+ """Returns list of platform specific files to overwrite."""
+ return self.__class__.OVERWRITE_LIST
+
+ def Overwrite(self, file_path, file_name, scratch_dir):
+ """Called for each file in the OverwriteList."""
+ return file_name
+
+ def GetPlatformSpecialFiles(self, tmpdir):
+ """returns a list of platform special files that should be created."""
+ return []
diff --git a/gcimagebundle/gcimagebundlelib/platform_factory.py b/gcimagebundle/gcimagebundlelib/platform_factory.py
new file mode 100644
index 0000000..da63f0e
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/platform_factory.py
@@ -0,0 +1,60 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Factory that guesses the correct platform and creates it."""
+
+import logging
+
+from gcimagebundlelib import centos
+from gcimagebundlelib import fedora
+from gcimagebundlelib import debian
+from gcimagebundlelib import gcel
+from gcimagebundlelib import opensuse
+from gcimagebundlelib import rhel
+from gcimagebundlelib import sle
+from gcimagebundlelib import ubuntu
+
+
+class UnknownPlatformException(Exception):
+ """The platform could not be correctly determined."""
+
+
+class PlatformFactory(object):
+ """Guess the platform and create it."""
+
+ def __init__(self, root='/'):
+ self.__root = root
+ self.__registry = {}
+ self.__platform_registry = {}
+ self.Register('Centos', centos.Centos)
+ self.Register('Fedora', fedora.Fedora)
+ self.Register('Debian', debian.Debian)
+ self.Register('GCEL', gcel.Gcel)
+ self.Register('openSUSE', opensuse.OpenSUSE)
+ self.Register('Red Hat Enterprise Linux', rhel.RHEL)
+ self.Register('SUSE Linux Enterprise', sle.SLE)
+ self.Register('Ubuntu', ubuntu.Ubuntu)
+
+ def Register(self, name, klass):
+ self.__registry[name] = klass
+
+ def GetPlatform(self):
+ for name in self.__registry:
+ if self.__registry[name].IsThisPlatform(self.__root):
+ logging.info('found platform %s', name)
+ return self.__registry[name]()
+ else:
+ logging.debug('skipping platform %s %s ', name, self.__registry[name])
+ raise UnknownPlatformException('Could not determine host platform.')
diff --git a/gcimagebundle/gcimagebundlelib/rhel.py b/gcimagebundle/gcimagebundlelib/rhel.py
new file mode 100644
index 0000000..9ebf1ef
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/rhel.py
@@ -0,0 +1,42 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Red Hat Enterprise Linux Linux specific platform info."""
+
+
+
+import platform
+
+from gcimagebundlelib import linux
+
+
+class RHEL(linux.LinuxPlatform):
+ """Red Hat Enterprise Linux specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ (distribution, _, _) = platform.linux_distribution()
+ if distribution == 'Red Hat Enterprise Linux Server':
+ return True
+ return False
+
+ def __init__(self):
+ super(RHEL, self).__init__()
+
+ def GetPreferredFilesystemType(self):
+ (_,version,_) = platform.linux_distribution()
+ if version.startswith('7'):
+ return 'xfs'
+ return 'ext4'
diff --git a/gcimagebundle/gcimagebundlelib/sle.py b/gcimagebundle/gcimagebundlelib/sle.py
new file mode 100644
index 0000000..8b74827
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/sle.py
@@ -0,0 +1,34 @@
+# Copyright 2013 SUSE LLC All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""SUSE Linux Enterprise (SLE) platform info."""
+
+import re
+from gcimagebundlelib import suse
+
+class SLE(suse.SUSE):
+ """SLE platform info."""
+
+ @staticmethod
+ def IsThisPlatform(self, root='/'):
+ if re.match(r'SUSE Linux Enterprise', suse.SUSE().distribution):
+ return True
+ return False
+
+ def __init__(self):
+ super(SLE, self).__init__()
+
+ def GetPreferredFilesystemType(self):
+ return 'ext3'
diff --git a/gcimagebundle/gcimagebundlelib/suse.py b/gcimagebundle/gcimagebundlelib/suse.py
new file mode 100644
index 0000000..4911b8b
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/suse.py
@@ -0,0 +1,91 @@
+# Copyright 2013 SUSE LLC All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""openSUSE and SUSE generic platform info."""
+
+import os
+import re
+
+from gcimagebundlelib import linux
+
+
+class SUSE(linux.LinuxPlatform):
+ """openSUSE and SUSE generic platform info."""
+
+ def __init__(self):
+ super(SUSE, self).__init__()
+ self.distribution_codename = None
+ self.ParseOSRelease()
+ if not self.distribution:
+ self.ParseSUSERelease()
+ if not self.distribution:
+ self.distribution = ''
+
+ def ParseOSRelease(self):
+ """Parse the /etc/os-release file."""
+ release_file = '/etc/os-release'
+ if not os.path.isfile(release_file):
+ self.distribution = None
+ return
+ lines = open(release_file, 'r').readlines()
+ for ln in lines:
+ if not ln:
+ continue
+ if re.match(r'^NAME=', ln):
+ self.distribution = self.__getData(ln)
+ if re.match(r'^VERSION_ID=', ln):
+ self.distribution_version = self.__getData(ln)
+ if re.match(r'^VERSION=', ln):
+ data = self.__getData(ln)
+ self.distribution_codename = data.split('(')[-1][:-1]
+ return
+
+ def ParseSUSERelease(self):
+ """Parse /etc/SuSE-release file."""
+ release_file = '/etc/SuSE-release'
+ if not os.path.isfile(release_file):
+ self.distribution = None
+ return
+ lines = open(release_file, 'r').readlines()
+ prts = lines[0].split()
+ cnt = 0
+ self.distribution = ''
+ if len(prts):
+ while 1:
+ item = prts[cnt]
+ if re.match('\d', item):
+ item = None
+ break
+ elif cnt > 0:
+ self.distribution += ' '
+ self.distribution += item
+ cnt += 1
+
+ for ln in lines:
+ if re.match(r'^VERSION =', ln):
+ self.distribution_version = self.__getData(ln)
+ if re.match(r'^CODENAME =', ln):
+ self.distribution_codename = self.__getData(ln)
+ return
+
+ def __getData(self, ln):
+ """Extract data from a line in a file. Either returns data inside the
+ first double quotes ("a b"; a b in this example) or if no double
+ quotes exist, returns the data after the first = sign. Leading
+ and trailing whitspace are stripped."""
+ if ln.find('"') != -1:
+ return ln.split('"')[1]
+ else:
+ return ln.split('=')[-1].strip()
diff --git a/gcimagebundle/gcimagebundlelib/tests/__init__.py b/gcimagebundle/gcimagebundlelib/tests/__init__.py
new file mode 100644
index 0000000..42723d7
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/tests/__init__.py
@@ -0,0 +1,16 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unit tests module for Image Bundle."""
diff --git a/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py b/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py
new file mode 100755
index 0000000..1cbb384
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for block_disk.py module."""
+
+
+__pychecker__ = 'no-local' # for unittest
+
+from contextlib import closing
+import json
+import logging
+import os
+import random
+import subprocess
+import tarfile
+import tempfile
+import unittest
+import urllib2
+
+from gcimagebundlelib import block_disk
+from gcimagebundlelib import exclude_spec
+from gcimagebundlelib.tests import image_bundle_test_base
+from gcimagebundlelib import utils
+
+
+class FsRawDiskTest(image_bundle_test_base.ImageBundleTest):
+ """FsRawDisk Unit Test."""
+
+ _MEGABYTE = 1024*1024
+ _GIGABYTE = 1024*_MEGABYTE
+
+ def setUp(self):
+ super(FsRawDiskTest, self).setUp()
+ self._fs_size = 10* FsRawDiskTest._MEGABYTE
+ self._bundle = block_disk.FsRawDisk(self._fs_size, 'ext4')
+ self._tar_path = self.tmp_path + '/image.tar.gz'
+ self._bundle.SetTarfile(self._tar_path)
+ self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
+ self._bundle.SetKey('key')
+ self._bundle._SetManifest(self._manifest)
+
+ def _SetupMbrDisk(self, partition_start, partition_end, fs_uuid):
+ """Creates a disk with a fake MBR.
+
+ Args:
+ partition_start: The byte offset where the partition starts.
+ partition_end: The byte offset where the partition ends.
+ fs_uuid: The UUID of the filesystem to create on the partition.
+
+ Returns:
+ The path where the disk is located.
+ """
+ # Create the disk file with the size specified.
+ disk_path = os.path.join(self.tmp_root, 'mbrdisk.raw')
+ disk_size = partition_end + FsRawDiskTest._MEGABYTE
+ with open(disk_path, 'wb') as disk_file:
+ disk_file.truncate(disk_size)
+
+ # Create a partition table
+ utils.MakePartitionTable(disk_path)
+
+ # Create the partition
+ utils.MakePartition(disk_path, 'primary', 'ext2',
+ partition_start, partition_end)
+
+ # Create the file system
+ with utils.LoadDiskImage(disk_path) as devices:
+ utils.MakeFileSystem(devices[0], 'ext4', fs_uuid)
+
+ # Write some data after the MBR but before the first partition
+ with open(disk_path, 'r+b') as disk_file:
+ # Seek to last two bytes of first sector
+ disk_file.seek(510)
+ # Write MBR signature
+ disk_file.write(chr(0x55))
+ disk_file.write(chr(0xAA))
+ # Write random data on the disk till the point first partition starts
+ for _ in range(partition_start - 512):
+ # Write a byte
+ disk_file.write(chr(random.randint(0, 127)))
+
+ return disk_path
+
+ def tearDown(self):
+ super(FsRawDiskTest, self).tearDown()
+
+ def testDiskBundle(self):
+ """Tests bundle command when a disk is specified.
+
+ Creates a 20Gb source disk to start with and verifies that creating
+ a 10MB file off it works.
+ """
+ # Create a 20GB disk with first partition starting at 1MB
+ self._TestDiskBundleHelper(FsRawDiskTest._MEGABYTE,
+ FsRawDiskTest._GIGABYTE*20,
+ utils.RunCommand(['uuidgen']).strip())
+
+ def testDiskBundlePartitionAt2MB(self):
+ """Tests bundle command when a disk is specified.
+
+ Creates the first partition at 2MB and verifies all data prior to that is
+ copied.
+ """
+ # Create a 20GB disk with first partition starting at 2MB
+ self._TestDiskBundleHelper(FsRawDiskTest._MEGABYTE*2,
+ FsRawDiskTest._GIGABYTE*20,
+ utils.RunCommand(['uuidgen']).strip())
+
+ def _TestDiskBundleHelper(self, partition_start, partition_end, fs_uuid):
+ disk_path = self._SetupMbrDisk(partition_start, partition_end, fs_uuid)
+
+ with utils.LoadDiskImage(disk_path) as devices:
+ # Get the path to do the disk.
+ # devices will have something which is like /dev/mapper/loop0p1
+ # We need to get loop0 out of it.
+ disk_loop_back_path = '/dev/' + devices[0].split('/')[3][:-2]
+
+ # Create a symlinks to the disk and loopback paths
+ # This is required because of the code where we assume first
+ # partition is device path appended by 1. Will remove it once we
+ # update that part of the code.
+ symlink_disk = os.path.join(self.tmp_root, 'disk')
+ symlink_partition = self.tmp_root + '/disk1'
+ utils.RunCommand(['ln', '-s', disk_loop_back_path, symlink_disk])
+ utils.RunCommand(['ln', '-s', devices[0], symlink_partition])
+
+ # Bundle up
+ self._bundle.AddDisk(symlink_disk)
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ (_, _) = self._bundle.Bundleup()
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
+ self._VerifyDiskSize(self._tar_path, self._fs_size)
+ self._VerifyNonPartitionContents(self._tar_path,
+ disk_path,
+ partition_start)
+ self._VerifyFilesystemUUID(self._tar_path, fs_uuid)
+
+ def testRawDisk(self):
+ """Tests the regular operation. No expected error."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
+
+ def testRawDiskIgnoresHardlinks(self):
+ """Tests if the raw disk ignores hard links if asked."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.IgnoreHardLinks()
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 1)
+ self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 1)
+
+ def testRawDiskIgnoresExcludes(self):
+ """Tests if the raw disk ignores specified excludes files."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir2/', '/dir2/dir1',
+ '/dir2/sl2', '/dir2/hl1'])
+
+ def testRawDiskExcludePreservesSubdirs(self):
+ """Tests if excludes preserves subdirs underneath if asked."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
+ preserve_dir=True,
+ preserve_subdir=True)])
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/',
+ '/dir1/dir11', 'dir2/', '/dir2/dir1',
+ '/dir2/sl2', '/dir2/hl1'])
+
+ def testRawDiskExcludePreservesFiles(self):
+ """Tests if excludes preserves the files underneath if asked."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
+ preserve_dir=True,
+ preserve_file=True)])
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir1/', '/dir1/hl2',
+ '/dir1/sl1', 'dir2/', '/dir2/dir1', '/dir2/sl2',
+ '/dir2/hl1'])
+
+ def testRawDiskUsesModifiedFiles(self):
+ """Tests if the raw disk uses modified files."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AppendExcludes(
+ [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
+ self._bundle.SetPlatform(image_bundle_test_base.MockPlatform(self.tmp_root))
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'test1', 'test2', 'dir2/',
+ '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
+ self._VerifyFileInRawDiskEndsWith(self._tar_path, 'test1',
+ 'something extra.')
+
+ def testRawDiskGeneratesCorrectDigest(self):
+ """Tests if the SHA1 digest generated is accurate."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ p = subprocess.Popen(['/usr/bin/openssl dgst -sha1 ' + self._tar_path],
+ stdout=subprocess.PIPE, shell=True)
+ file_digest = p.communicate()[0].split('=')[1].strip()
+ self.assertEqual(digest, file_digest)
+
+ def testRawDiskHonorsRecursiveOff(self):
+ """Tests if raw disk handles recursive off."""
+ self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
+ self._bundle.AddSource(self.tmp_path + '/dir1',
+ arcname='dir1', recursive=False)
+ self._bundle.AddSource(self.tmp_path + '/dir2', arcname='dir2')
+ self._bundle.Verify()
+ (_, digest) = self._bundle.Bundleup()
+ if not digest:
+ self.fail('raw disk failed')
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+ self._VerifyImageHas(self._tar_path,
+ ['lost+found', 'dir1/', 'dir2/', '/dir2/dir1',
+ '/dir2/sl2', '/dir2/hl1'])
+
+ def testSkipLicenseCheckIfNotOnGCE(self):
+ """Tests that no licenses are loaded if gcimagebundle is not run on GCE."""
+ class MockHttp(utils.Http):
+ def Get(self, request, timeout=None):
+ # if gcimagebundle is not run on GCE the metadata server will be unreachable
+ raise urllib2.URLError("urlopen error timed out")
+
+ self._http = MockHttp()
+ self._manifest._http = self._http
+ self._manifest._is_gce_instance = False
+
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ _ = self._bundle.Bundleup()
+ self.assertFalse(self._bundle._manifest._IsManifestNeeded())
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+
+ def testNoManifestCreatedWithZeroLicenses(self):
+ """Tests that no manifest is created when there are 0 licenses."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ _ = self._bundle.Bundleup()
+ self.assertFalse(self._bundle._manifest._IsManifestNeeded())
+ self._VerifyTarHas(self._tar_path, ['disk.raw'])
+
+ def testManifestWithOneLicense(self):
+ """Tests manifest is populated with 1 license."""
+ self._http._instance_response = ('{"hostname":"test",'
+ '"licenses":[{"id":"TEST-LICENSE"}]}')
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ _ = self._bundle.Bundleup()
+ manifest_json = self._bundle._manifest._ToJson()
+ manifest_obj = json.loads(manifest_json)
+ self.assertTrue(self._bundle._manifest._IsManifestNeeded())
+ self.assertEqual(1, len(manifest_obj['licenses']))
+ self.assertEqual('TEST-LICENSE', manifest_obj['licenses'][0])
+ self._VerifyTarHas(self._tar_path, ['manifest.json', 'disk.raw'])
+ self._VerifyFileContentsInTarball(self._tar_path,
+ 'manifest.json',
+ '{"licenses": ["TEST-LICENSE"]}')
+
+ def testManifestWithTwoLicenses(self):
+ """Tests manifest is populated with 2 licenses."""
+ self._http._instance_response = ('{"hostname":"test",'
+ '"licenses":[{"id":"TEST-1"},'
+ '{"id":"TEST-2"}]}')
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.Verify()
+ _ = self._bundle.Bundleup()
+ manifest_json = self._bundle._manifest._ToJson()
+ manifest_obj = json.loads(manifest_json)
+ self.assertTrue(self._bundle._manifest._IsManifestNeeded())
+ self.assertEqual(2, len(manifest_obj['licenses']))
+ self.assertEqual('TEST-1', manifest_obj['licenses'][0])
+ self.assertEqual('TEST-2', manifest_obj['licenses'][1])
+ self._VerifyTarHas(self._tar_path, ['manifest.json', 'disk.raw'])
+ self._VerifyFileContentsInTarball(self._tar_path,
+ 'manifest.json',
+ '{"licenses": ["TEST-1", "TEST-2"]}')
+
+ def _VerifyFilesystemUUID(self, tar, expected_uuid):
+ """Verifies UUID of the first partition on disk matches the value."""
+ tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+
+ created_disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(created_disk_path) as devices:
+ self.assertEqual(1, len(devices))
+ self.assertEqual(expected_uuid, utils.GetUUID(devices[0]))
+
+ def _VerifyNonPartitionContents(self, tar, disk_path, partition_start):
+ """Verifies that bytes outside the partition are preserved."""
+ tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ created_disk_path = os.path.join(tmp_dir, 'disk.raw')
+
+ # Verify first parition in both disks starts at the same offset
+ self.assertEqual(partition_start,
+ utils.GetPartitionStart(disk_path, 1))
+ self.assertEqual(partition_start,
+ utils.GetPartitionStart(created_disk_path, 1))
+ with open(disk_path, 'r') as source_file:
+ with open(created_disk_path, 'r') as created_file:
+ # Seek to 510'th byte in both streams and verify rest of the
+ # bytes until the partition start are the same
+ source_file.seek(510)
+ created_file.seek(510)
+ for i in range(partition_start - 510):
+ self.assertEqual(source_file.read(1),
+ created_file.read(1),
+ 'byte at position %s not equal' % (i + 510))
+
+ def _VerifyDiskSize(self, tar, expected_size):
+ """Verifies that the disk file has the same size as expected."""
+ tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ statinfo = os.stat(disk_path)
+ self.assertEqual(expected_size, statinfo.st_size)
+
+ def _VerifyImageHas(self, tar, expected):
+ """Tests if raw disk contains an expected list of files/directories."""
+ tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(disk_path) as devices:
+ self.assertEqual(len(devices), 1)
+ mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
+ found = []
+ for root, dirs, files in os.walk(mnt_dir):
+ root = root.replace(mnt_dir, '')
+ for f in files:
+ found.append(os.path.join(root, f))
+ for d in dirs:
+ found.append(os.path.join(root, d))
+ self._AssertListEqual(expected, found)
+
+ def _VerifyFileContentsInTarball(self, tar, file_name, expected_content):
+ """Reads the file from the tar file and turns it."""
+ with closing(tarfile.open(tar)) as tar_file:
+ content = tar_file.extractfile(file_name).read()
+ self.assertEqual(content, expected_content)
+
+ def _VerifyFileInRawDiskEndsWith(self, tar, filename, text):
+ """Tests if a file on raw disk contains ends with a specified text."""
+ tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(disk_path) as devices:
+ self.assertEqual(len(devices), 1)
+ mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
+ f = open(os.path.join(mnt_dir, filename), 'r')
+ file_content = f.read()
+ f.close()
+ self.assertTrue(file_content.endswith(text))
+
+ def _VerifyNumberOfHardLinksInRawDisk(self, tar, filename, count):
+ """Tests if a file on raw disk has a specified number of hard links."""
+ tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
+ self.assertEqual(subprocess.call(tar_cmd), 0)
+ disk_path = os.path.join(tmp_dir, 'disk.raw')
+ with utils.LoadDiskImage(disk_path) as devices:
+ self.assertEqual(len(devices), 1)
+ mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
+ with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
+ self.assertEqual(os.stat(os.path.join(mnt_dir, filename)).st_nlink,
+ count)
+
+
+class RootFsRawTest(image_bundle_test_base.ImageBundleTest):
+ """RootFsRaw Unit Test."""
+
+ def setUp(self):
+ super(RootFsRawTest, self).setUp()
+ self._bundle = block_disk.RootFsRaw(
+ 10*1024*1024, 'ext4', False, self._MockStatvfs)
+ self._tar_path = self.tmp_path + '/image.tar.gz'
+ self._bundle.SetTarfile(self._tar_path)
+ self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
+ self._bundle._SetManifest(self._manifest)
+
+ def tearDown(self):
+ super(RootFsRawTest, self).tearDown()
+
+ def testRootRawDiskVerifiesOneSource(self):
+ """Tests that only one root directory is allowed."""
+ self._bundle.AddSource(self.tmp_path)
+ self._bundle.AddSource(self.tmp_path + '/dir1')
+ self._bundle.SetKey('key')
+ try:
+ self._bundle.Verify()
+ except block_disk.InvalidRawDiskError:
+ return
+ self.fail()
+
+ def testRootRawDiskVerifiesRootDestination(self):
+ """Tests that destination directory must be /."""
+ self._bundle.AddSource(self.tmp_path, arcname='/tmp')
+ self._bundle.SetKey('key')
+ try:
+ self._bundle.Verify()
+ except block_disk.InvalidRawDiskError:
+ return
+ self.fail()
+
+ def testRootRawDiskNotEnoughFreeSpace(self):
+ """Tests that there is not enough disk space to complete the operation."""
+ self._statvfs_map = {
+ "/" : image_bundle_test_base.StatvfsResult(1024, 500, 100),
+ "/tmp" : image_bundle_test_base.StatvfsResult(1024, 500, 100)
+ }
+ self._bundle.AddSource("/")
+ self._bundle.SetKey('key')
+ try:
+ self._bundle.Verify()
+ except block_disk.InvalidRawDiskError as e:
+ print str(e)
+ return
+ self.fail()
+
+ def testRootFilesExceedDiskSize(self):
+ """Tests that source files may exceed the raw disk file size limit."""
+ self._statvfs_map = {
+ "/" : image_bundle_test_base.StatvfsResult(1024, 50000, 20000),
+ "/tmp" : image_bundle_test_base.StatvfsResult(1024, 100000, 90000)
+ }
+ self._bundle.AddSource("/")
+ self._bundle.SetKey('key')
+ try:
+ self._bundle.Verify()
+ except block_disk.InvalidRawDiskError as e:
+ print str(e)
+ return
+ self.fail()
+
+ def _MockStatvfs(self, file_path):
+ return self._statvfs_map[file_path]
+
+def main():
+ logging.basicConfig(level=logging.DEBUG)
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py b/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py
new file mode 100755
index 0000000..37b7fae
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py
@@ -0,0 +1,140 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Base class for image_bundle unittests."""
+
+
+__pychecker__ = 'no-local' # for unittest
+
+
+import os
+import re
+import shutil
+import subprocess
+import tarfile
+import tempfile
+import unittest
+import urllib2
+
+from gcimagebundlelib import manifest
+from gcimagebundlelib.os_platform import Platform
+from gcimagebundlelib import utils
+
+
+class InvalidOverwriteFileException(Exception):
+ """Invalid overwrite target was passed to MockPlatform.Overwrite method."""
+
+
+class MockPlatform(Platform):
+ """Mock platform for image bundle unit tests."""
+ OVERWRITE_LIST = ['test1']
+
+ def __init__(self, tmp_root):
+ super(MockPlatform, self).__init__()
+ self.tmp_root = tmp_root
+
+ def Overwrite(self, filename, arcname, tmpdir):
+ temp = tempfile.mktemp(dir=tmpdir)
+ if arcname != 'test1':
+ raise InvalidOverwriteFileException(arcname)
+ fd = open(temp, 'w')
+ fd.write(open(filename).read())
+ fd.write('something extra.')
+ fd.close()
+ return temp
+
+
+class MockHttp(utils.Http):
+ """Fake implementation of the utils.Http client. Used for metadata queries."""
+ def __init__(self):
+ self._instance_response = '{"hostname":"test"}'
+
+ def Get(self, request, timeout=None):
+ """Accepts an Http request and returns a precanned response."""
+ url = request.get_full_url()
+ if url == utils.METADATA_URL_PREFIX:
+ return 'v1/'
+ elif url.startswith(utils.METADATA_V1_URL_PREFIX):
+ url = url.replace(utils.METADATA_V1_URL_PREFIX, '')
+ if url == 'instance/?recursive=true':
+ return self._instance_response
+ raise urllib2.HTTPError
+
+class StatvfsResult:
+ """ A struct for partial os.statvfs result, used to mock the result. """
+
+ def __init__(self, f_bsize, f_blocks, f_bfree):
+ self.f_bsize = f_bsize
+ self.f_blocks = f_blocks
+ self.f_bfree = f_bfree
+
+class ImageBundleTest(unittest.TestCase):
+ """ImageBundle Unit Test Base Class."""
+
+ def setUp(self):
+ self.tmp_root = tempfile.mkdtemp(dir='/tmp')
+ self.tmp_path = tempfile.mkdtemp(dir=self.tmp_root)
+ self._http = MockHttp()
+ self._manifest = manifest.ImageManifest(http=self._http, is_gce_instance=True)
+ self._SetupFilesystemToTar()
+
+ def tearDown(self):
+ self._CleanupFiles()
+
+ def _SetupFilesystemToTar(self):
+ """Creates some directory structure to tar."""
+ if os.path.exists(self.tmp_path):
+ shutil.rmtree(self.tmp_path)
+ os.makedirs(self.tmp_path)
+ with open(self.tmp_path + '/test1', 'w') as fd:
+ print >> fd, 'some text'
+ shutil.copyfile(self.tmp_path + '/test1', self.tmp_path + '/test2')
+ os.makedirs(self.tmp_path + '/dir1')
+ os.makedirs(self.tmp_path + '/dir1/dir11')
+ os.makedirs(self.tmp_path + '/dir2')
+ os.makedirs(self.tmp_path + '/dir2/dir1')
+ os.symlink(self.tmp_path + '/test1', self.tmp_path + '/dir1/sl1')
+ os.link(self.tmp_path + '/test2', self.tmp_path + '/dir1/hl2')
+ os.symlink(self.tmp_path + '/test2', self.tmp_path + '/dir2/sl2')
+ os.link(self.tmp_path + '/test1', self.tmp_path + '/dir2/hl1')
+
+ def _CleanupFiles(self):
+ """Removes the files under test directory."""
+ if os.path.exists(self.tmp_root):
+ shutil.rmtree(self.tmp_root)
+
+ def _VerifyTarHas(self, tar, expected):
+ p = subprocess.Popen(['tar -tf %s' % tar],
+ stdout=subprocess.PIPE, shell=True)
+ found = p.communicate()[0].split('\n')
+ if './' in found:
+ found.remove('./')
+ if '' in found:
+ found.remove('')
+ self._AssertListEqual(expected, found)
+
+ def _VerifyFileInTarEndsWith(self, tar, filename, text):
+ tf = tarfile.open(tar, 'r:gz')
+ fd = tf.extractfile(filename)
+ file_content = fd.read()
+ self.assertTrue(file_content.endswith(text))
+
+ def _AssertListEqual(self, list1, list2):
+ """Asserts that, when sorted, list1 and list2 are identical."""
+ sorted_list1 = [re.sub(r'/$', '', x) for x in list1]
+ sorted_list2 = [re.sub(r'/$', '', x) for x in list2]
+ sorted_list1.sort()
+ sorted_list2.sort()
+ self.assertEqual(sorted_list1, sorted_list2)
diff --git a/gcimagebundle/gcimagebundlelib/tests/utils_test.py b/gcimagebundle/gcimagebundlelib/tests/utils_test.py
new file mode 100755
index 0000000..dd7d2cd
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/tests/utils_test.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for utils.py module."""
+
+__pychecker__ = 'no-local' # for unittest
+
+import logging
+import subprocess
+import unittest
+import uuid
+
+from gcimagebundlelib import utils
+
+
+class ImageBundleTest(unittest.TestCase):
+
+ def testRunCommand(self):
+ """Run a simple command and verify it works."""
+ utils.RunCommand(['ls', '/'])
+
+ def testRunCommandThatFails(self):
+ """Run a command that will fail and verify it raises the correct error."""
+ def RunCommandUnderTest():
+ non_existent_path = '/' + uuid.uuid4().hex
+ utils.RunCommand(['mkfs', '-t', 'ext4', non_existent_path])
+ self.assertRaises(subprocess.CalledProcessError, RunCommandUnderTest)
+
+
+def main():
+ logging.basicConfig(level=logging.DEBUG)
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/gcimagebundle/gcimagebundlelib/ubuntu.py b/gcimagebundle/gcimagebundlelib/ubuntu.py
new file mode 100644
index 0000000..8d68687
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/ubuntu.py
@@ -0,0 +1,54 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Ubuntu specific platform info."""
+
+import csv
+import os
+from gcimagebundlelib import linux
+
+
+class Ubuntu(linux.LinuxPlatform):
+ """Ubuntu specific information."""
+
+ @staticmethod
+ def IsThisPlatform(root='/'):
+ release_file = root + '/etc/lsb-release'
+ if os.path.exists(release_file):
+ (_, _, flavor, _) = Ubuntu.ParseLsbRelease(release_file)
+ if flavor and flavor.lower() == 'ubuntu':
+ return True
+ return False
+
+ @staticmethod
+ def ParseLsbRelease(release_file='/etc/lsb-release'):
+ """Parses the /etc/lsb-releases file."""
+ release_info = {}
+ for line in csv.reader(open(release_file), delimiter='='):
+ if len(line) > 1:
+ release_info[line[0]] = line[1]
+ if ('DISTRIB_CODENAME' not in release_info or
+ 'DISTRIB_DESCRIPTION' not in release_info or
+ 'DISTRIB_ID' not in release_info or
+ 'DISTRIB_RELEASE' not in release_info):
+ return (None, None, None, None)
+ return (release_info['DISTRIB_CODENAME'],
+ release_info['DISTRIB_DESCRIPTION'],
+ release_info['DISTRIB_ID'],
+ release_info['DISTRIB_RELEASE'])
+
+ def __init__(self):
+ super(Ubuntu, self).__init__()
+ (self.distribution_codename, _, self.distribution,
+ self.distribution_version) = Ubuntu.ParseLsbRelease()
diff --git a/gcimagebundle/gcimagebundlelib/utils.py b/gcimagebundle/gcimagebundlelib/utils.py
new file mode 100644
index 0000000..a8fde40
--- /dev/null
+++ b/gcimagebundle/gcimagebundlelib/utils.py
@@ -0,0 +1,455 @@
+# Copyright 2013 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""Utilities for image bundling tool."""
+
+import logging
+import os
+import subprocess
+import time
+import urllib2
+
+METADATA_URL_PREFIX = 'http://169.254.169.254/computeMetadata/'
+METADATA_V1_URL_PREFIX = METADATA_URL_PREFIX + 'v1/'
+
+
+class MakeFileSystemException(Exception):
+ """Error occurred in file system creation."""
+
+
+class TarAndGzipFileException(Exception):
+ """Error occurred in creating the tarball."""
+
+
+class LoadDiskImage(object):
+ """Loads raw disk image using kpartx."""
+
+ def __init__(self, file_path):
+ """Initializes LoadDiskImage object.
+
+ Args:
+ file_path: a path to a file containing raw disk image.
+
+ Returns:
+ A list of devices for every partition found in an image.
+ """
+ self._file_path = file_path
+
+ def __enter__(self):
+ """Map disk image as a device."""
+ SyncFileSystem()
+ kpartx_cmd = ['kpartx', '-a', '-v', '-s', self._file_path]
+ output = RunCommand(kpartx_cmd)
+ devs = []
+ for line in output.splitlines():
+ split_line = line.split()
+ if (len(split_line) > 2 and split_line[0] == 'add'
+ and split_line[1] == 'map'):
+ devs.append('/dev/mapper/' + split_line[2])
+ time.sleep(2)
+ return devs
+
+ def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
+ """Unmap disk image as a device.
+
+ Args:
+ unused_exc_type: unused.
+ unused_exc_value: unused.
+ unused_exc_tb: unused.
+ """
+ SyncFileSystem()
+ time.sleep(2)
+ kpartx_cmd = ['kpartx', '-d', '-v', '-s', self._file_path]
+ RunCommand(kpartx_cmd)
+
+
+class MountFileSystem(object):
+ """Mounts a file system."""
+
+ def __init__(self, dev_path, dir_path, fs_type):
+ """Initializes MountFileSystem object.
+
+ Args:
+ dev_path: A path to a device to mount.
+ dir_path: A path to a directory where a device is to be mounted.
+ """
+ self._dev_path = dev_path
+ self._dir_path = dir_path
+ self._fs_type = fs_type
+
+ def __enter__(self):
+ """Mounts a device.
+ """
+ # Since the bundled image can have the same uuid as the root disk,
+ # we should prevent uuid conflicts for xfs mounts.
+ if self._fs_type is 'xfs':
+ mount_cmd = ['mount', '-o', 'nouuid', self._dev_path, self._dir_path]
+ else:
+ mount_cmd = ['mount', self._dev_path, self._dir_path]
+ RunCommand(mount_cmd)
+
+ def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
+ """Unmounts a file system.
+
+ Args:
+ unused_exc_type: unused.
+ unused_exc_value: unused.
+ unused_exc_tb: unused.
+ """
+ umount_cmd = ['umount', self._dir_path]
+ RunCommand(umount_cmd)
+ SyncFileSystem()
+
+
+def SyncFileSystem():
+ RunCommand(['sync'])
+
+def GetMounts(root='/'):
+ """Find all mount points under the specified root.
+
+ Args:
+ root: a path to look for a mount points.
+
+ Returns:
+ A list of mount points.
+ """
+ output = RunCommand(['/bin/mount', '-l'])
+ mounts = []
+ for line in output.splitlines():
+ split_line = line.split()
+ mount_point = split_line[2]
+ if mount_point == root:
+ continue
+ # We are simply ignoring the fs_type of fs for now. But we can use that
+ # later Just verify that these are actually mount points.
+ if os.path.ismount(mount_point) and mount_point.startswith(root):
+ mounts.append(mount_point)
+ return mounts
+
+
+def MakePartitionTable(file_path):
+ """Create a partition table in a file.
+
+ Args:
+ file_path: A path to a file where a partition table will be created.
+ """
+ RunCommand(['parted', file_path, 'mklabel', 'msdos'])
+
+
+def MakePartition(file_path, partition_type, fs_type, start, end):
+ """Create a partition in a file.
+
+ Args:
+ file_path: A path to a file where a partition will be created.
+ partition_type: A type of a partition to be created. Tested option is msdos.
+ fs_type: A type of a file system to be created. For example, ext2, ext3,
+ etc.
+ start: Start offset of a partition in bytes.
+ end: End offset of a partition in bytes.
+ """
+ parted_cmd = ['parted', file_path, 'unit B', 'mkpart', partition_type,
+ fs_type, str(start), str(end)]
+ RunCommand(parted_cmd)
+
+
+def MakeFileSystem(dev_path, fs_type, uuid=None):
+ """Create a file system in a device.
+
+ Args:
+ dev_path: A path to a device.
+ fs_type: A type of a file system to be created. For example ext2, ext3, etc.
+ uuid: The value to use as the UUID for the filesystem. If none, a random
+ UUID will be generated and used.
+
+ Returns:
+ The uuid of the filesystem. This will be the same as the passed value if
+ a value was specified. If no uuid was passed in, this will be the randomly
+ generated uuid.
+
+ Raises:
+ MakeFileSystemException: If mkfs encounters an error.
+ """
+ if uuid is None:
+ uuid = RunCommand(['uuidgen']).strip()
+ if uuid is None:
+ raise MakeFileSystemException(dev_path)
+
+ mkfs_cmd = ['mkfs', '-t', fs_type, dev_path]
+ RunCommand(mkfs_cmd)
+
+ if fs_type is 'xfs':
+ set_uuid_cmd = ['xfs_admin', '-U', uuid, dev_path]
+ else:
+ set_uuid_cmd = ['tune2fs', '-U', uuid, dev_path]
+ RunCommand(set_uuid_cmd)
+
+ return uuid
+
+
+def Rsync(src, dest, exclude_file, ignore_hard_links, recursive, xattrs):
+ """Copy files from specified directory using rsync.
+
+ Args:
+ src: Source location to copy.
+ dest: Destination to copy files to.
+ exclude_file: A path to a file which contains a list of exclude from copy
+ filters.
+ ignore_hard_links: If True a hard links are copied as a separate files. If
+ False, hard link are recreated in dest.
+ recursive: Specifies if directories are copied recursively or not.
+ xattrs: Specifies if extended attributes are preserved or not.
+ """
+ rsync_cmd = ['rsync', '--times', '--perms', '--owner', '--group', '--links',
+ '--devices', '--acls', '--sparse']
+ if not ignore_hard_links:
+ rsync_cmd.append('--hard-links')
+ if recursive:
+ rsync_cmd.append('--recursive')
+ else:
+ rsync_cmd.append('--dirs')
+ if xattrs:
+ rsync_cmd.append('--xattrs')
+ if exclude_file:
+ rsync_cmd.append('--exclude-from=' + exclude_file)
+ rsync_cmd.extend([src, dest])
+
+ logging.debug('Calling: %s', repr(rsync_cmd))
+ if exclude_file:
+ logging.debug('Contents of exclude file %s:', exclude_file)
+ with open(exclude_file, 'rb') as excludes:
+ for line in excludes:
+ logging.debug(' %s', line.rstrip())
+
+ RunCommand(rsync_cmd)
+
+
+def GetUUID(partition_path):
+ """Fetches the UUID of the filesystem on the specified partition.
+
+ Args:
+ partition_path: The path to the partition.
+
+ Returns:
+ The uuid of the filesystem.
+ """
+ output = RunCommand(['blkid', partition_path])
+ for token in output.split():
+ if token.startswith('UUID='):
+ uuid = token.strip()[len('UUID="'):-1]
+
+ logging.debug('found uuid = %s', uuid)
+ return uuid
+
+
+def CopyBytes(src, dest, count):
+ """Copies count bytes from the src to dest file.
+
+ Args:
+ src: The source to read bytes from.
+ dest: The destination to copy bytes to.
+ count: Number of bytes to copy.
+ """
+ block_size = 4096
+ block_count = count / block_size
+ dd_command = ['dd',
+ 'if=%s' % src,
+ 'of=%s' % dest,
+ 'conv=notrunc',
+ 'bs=%s' % block_size,
+ 'count=%s' % block_count]
+ RunCommand(dd_command)
+ remaining_bytes = count - block_count * block_size
+ if remaining_bytes:
+ logging.debug('remaining bytes to copy = %s', remaining_bytes)
+ dd_command = ['dd',
+ 'if=%s' % src,
+ 'of=%s' % dest,
+ 'seek=%s' % block_count,
+ 'skip=%s' % block_count,
+ 'conv=notrunc',
+ 'bs=1',
+ 'count=%s' % remaining_bytes]
+ RunCommand(dd_command)
+
+
+def GetPartitionStart(disk_path, partition_number):
+ """Returns the starting position in bytes of the partition.
+
+ Args:
+ disk_path: The path to disk device.
+ partition_number: The partition number to lookup. 1 based.
+
+ Returns:
+ The starting position of the first partition in bytes.
+
+ Raises:
+ subprocess.CalledProcessError: If running parted fails.
+ IndexError: If there is no partition at the given number.
+ """
+ parted_cmd = ['parted',
+ disk_path,
+ 'unit B',
+ 'print']
+ # In case the device is not valid and parted throws the retry/cancel prompt
+ # write c to stdin.
+ output = RunCommand(parted_cmd, input_str='c')
+ for line in output.splitlines():
+ split_line = line.split()
+ if len(split_line) > 4 and split_line[0] == str(partition_number):
+ return int(split_line[1][:-1])
+ raise IndexError()
+
+
+def RemovePartition(disk_path, partition_number):
+ """Removes the partition number from the disk.
+
+ Args:
+ disk_path: The disk to remove the partition from.
+ partition_number: The partition number to remove.
+ """
+ parted_cmd = ['parted',
+ disk_path,
+ 'rm',
+ str(partition_number)]
+ # In case the device is not valid and parted throws the retry/cancel prompt
+ # write c to stdin.
+ RunCommand(parted_cmd, input_str='c')
+
+
+def GetDiskSize(disk_file):
+ """Returns the size of the disk device in bytes.
+
+ Args:
+ disk_file: The full path to the disk device.
+
+ Returns:
+ The size of the disk device in bytes.
+
+ Raises:
+ subprocess.CalledProcessError: If fdisk command fails for the disk file.
+ """
+ output = RunCommand(['fdisk', '-s', disk_file])
+ return int(output) * 1024
+
+
+def RunCommand(command, input_str=None):
+ """Runs the command and returns the output printed on stdout.
+
+ Args:
+ command: The command to run.
+ input_str: The input to pass to subprocess via stdin.
+
+ Returns:
+ The stdout from running the command.
+
+ Raises:
+ subprocess.CalledProcessError: if the command fails.
+ """
+ logging.debug('running %s with input=%s', command, input_str)
+ p = subprocess.Popen(command, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output = p.communicate(input_str)
+ logging.debug('stdout %s', cmd_output[0])
+ logging.debug('stderr %s', cmd_output[1])
+ logging.debug('returncode %s', p.returncode)
+ if p.returncode:
+ logging.warning('Error while running %s return_code = %s\n'
+ 'stdout=%s\nstderr=%s',
+ command, p.returncode, cmd_output[0],
+ cmd_output[1])
+ raise subprocess.CalledProcessError(p.returncode,
+ cmd=command)
+ return cmd_output[0]
+
+
+def TarAndGzipFile(src_paths, dest):
+ """Pack file in tar archive and optionally gzip it.
+
+ Args:
+ src_paths: A list of files that will be archived.
+ (Must be in the same directory.)
+ dest: An archive name. If a file ends with .gz or .tgz an archive is gzipped
+ as well.
+
+ Raises:
+ TarAndGzipFileException: If tar encounters an error.
+ """
+ if dest.endswith('.gz') or dest.endswith('.tgz'):
+ mode = 'czSf'
+ else:
+ mode = 'cSf'
+ src_names = [os.path.basename(src_path) for src_path in src_paths]
+ # Take the directory of the first file in the list, all files are expected
+ # to be in the same directory.
+ src_dir = os.path.dirname(src_paths[0])
+ tar_cmd = ['tar', mode, dest, '-C', src_dir] + src_names
+ retcode = subprocess.call(tar_cmd)
+ if retcode:
+ raise TarAndGzipFileException(','.join(src_paths))
+
+
+class Http(object):
+ def Get(self, request, timeout=None):
+ return urllib2.urlopen(request, timeout=timeout).read()
+
+ def GetMetadata(self, url_path, recursive=False, timeout=None):
+ """Retrieves instance metadata.
+
+ Args:
+ url_path: The path of the metadata url after the api version.
+ http://169.254.169.254/computeMetadata/v1/url_path
+ recursive: If set, returns the tree of metadata starting at url_path as
+ a json string.
+ timeout: How long to wait for blocking operations (in seconds).
+ A value of None uses urllib2's default timeout.
+ Returns:
+ The metadata returned based on the url path.
+
+ """
+ # Use the latest version of the metadata.
+ suffix = ''
+ if recursive:
+ suffix = '?recursive=true'
+ url = '{0}{1}{2}'.format(METADATA_V1_URL_PREFIX, url_path, suffix)
+ request = urllib2.Request(url)
+ request.add_unredirected_header('Metadata-Flavor', 'Google')
+ return self.Get(request, timeout=timeout)
+
+
+def IsRunningOnGCE():
+ """Detect if we are running on GCE.
+
+ Returns:
+ True if we are running on GCE, False otherwise.
+ """
+ # Try accessing DMI/SMBIOS informations through dmidecode first
+ try:
+ dmidecode_cmd = ['dmidecode', '-s', 'bios-vendor']
+ output = RunCommand(dmidecode_cmd)
+ return 'Google' in output
+ except subprocess.CalledProcessError:
+ # We fail if dmidecode doesn't exist or we have insufficient privileges
+ pass
+
+ # If dmidecode is not working, fallback to contacting the metadata server
+ try:
+ Http().GetMetadata('instance/id', timeout=1)
+ return True
+ except urllib2.HTTPError as e:
+ logging.warning('HTTP error: %s (http status code=%s)' % (e.reason, e.code))
+ except urllib2.URLError as e:
+ logging.warning('Cannot reach metadata server: %s' % e.reason)
+
+ return False