summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMax Illfelder <illfelder@users.noreply.github.com>2016-06-08 15:24:49 -0700
committerMax Illfelder <illfelder@users.noreply.github.com>2016-06-08 15:24:49 -0700
commitc98f0c89330f547d772d2c25883fe4a8f152b95c (patch)
tree76fc1aab95337c4b55072d40e41c0a5bd30c63e0
parent290607ea0446d77a2f3d08c0229362695a4546c7 (diff)
parenta4b15e901d90cb9e42688e4788caa2c8591512bc (diff)
downloadgoogle-compute-image-packages-c98f0c89330f547d772d2c25883fe4a8f152b95c.tar.gz
Merge pull request #260 from illfelder/master20160608
Linux guest environment v2.
-rw-r--r--.travis.yml27
-rw-r--r--MANIFEST.in1
-rw-r--r--README.md280
-rw-r--r--VERSION1
-rwxr-xr-xbuild_packages.sh57
-rw-r--r--disk_expand/README.md (renamed from disk-expand/README.md)0
-rwxr-xr-xdisk_expand/build_packages.sh (renamed from disk-expand/build_package.sh)0
-rwxr-xr-xdisk_expand/expand-root (renamed from disk-expand/expand-root)0
-rw-r--r--disk_expand/expand-root.service (renamed from disk-expand/expand-root.service)0
-rw-r--r--disk_expand/gce-disk-expand-el6.spec (renamed from disk-expand/gce-disk-expand-el6.spec)0
-rw-r--r--disk_expand/gce-disk-expand-el7.spec (renamed from disk-expand/gce-disk-expand-el7.spec)0
-rw-r--r--disk_expand/third_party/cloud-utils/LICENSE (renamed from disk-expand/third_party/cloud-utils/LICENSE)0
-rw-r--r--disk_expand/third_party/cloud-utils/README.google (renamed from disk-expand/third_party/cloud-utils/README.google)0
-rwxr-xr-xdisk_expand/third_party/cloud-utils/growpart (renamed from disk-expand/third_party/cloud-utils/growpart)0
-rw-r--r--disk_expand/third_party/dracut-modules-growroot/LICENSE (renamed from disk-expand/third_party/dracut-modules-growroot/LICENSE)0
-rw-r--r--disk_expand/third_party/dracut-modules-growroot/README.google (renamed from disk-expand/third_party/dracut-modules-growroot/README.google)0
-rwxr-xr-xdisk_expand/third_party/dracut-modules-growroot/growroot-dummy.sh (renamed from disk-expand/third_party/dracut-modules-growroot/growroot-dummy.sh)0
-rwxr-xr-xdisk_expand/third_party/dracut-modules-growroot/growroot.sh (renamed from disk-expand/third_party/dracut-modules-growroot/growroot.sh)0
-rwxr-xr-xdisk_expand/third_party/dracut-modules-growroot/install (renamed from disk-expand/third_party/dracut-modules-growroot/install)0
-rw-r--r--google-daemon/README.md59
-rwxr-xr-xgoogle-daemon/etc/init.d/google-accounts-manager193
-rwxr-xr-xgoogle-daemon/etc/init.d/google-address-manager153
-rwxr-xr-xgoogle-daemon/etc/init.d/google-clock-sync-manager153
-rwxr-xr-xgoogle-daemon/etc/init/google-accounts-manager-service.conf10
-rwxr-xr-xgoogle-daemon/etc/init/google-accounts-manager-task.conf18
-rwxr-xr-xgoogle-daemon/etc/init/google-address-manager.conf5
-rwxr-xr-xgoogle-daemon/etc/init/google-clock-sync-manager.conf5
-rw-r--r--google-daemon/usr/lib/systemd/system/google-accounts-manager.service12
-rw-r--r--google-daemon/usr/lib/systemd/system/google-address-manager.service11
-rw-r--r--google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service11
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/accounts.py431
-rw-r--r--google-daemon/usr/share/google/google_daemon/accounts_manager.py127
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py89
-rw-r--r--google-daemon/usr/share/google/google_daemon/address_manager.py179
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/desired_accounts.py191
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/manage_accounts.py94
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/manage_addresses.py52
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/manage_clock_sync.py85
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/metadata_watcher.py97
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/utils.py193
-rw-r--r--google-startup-scripts/README.md52
-rwxr-xr-xgoogle-startup-scripts/etc/init.d/google75
-rwxr-xr-xgoogle-startup-scripts/etc/init.d/google-startup-scripts89
-rwxr-xr-xgoogle-startup-scripts/etc/init/google.conf9
-rwxr-xr-xgoogle-startup-scripts/etc/init/google_run_shutdown_scripts.conf10
-rwxr-xr-xgoogle-startup-scripts/etc/init/google_run_startup_scripts.conf10
-rw-r--r--google-startup-scripts/etc/rsyslog.d/90-google.conf9
-rw-r--r--google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset3
-rw-r--r--google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service15
-rw-r--r--google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service13
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/boto/boot_setup.py92
-rw-r--r--google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py85
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/fetch_script148
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/first-boot94
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/get_metadata_value73
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/onboot162
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/regenerate-host-keys81
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/run-scripts54
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/run-shutdown-scripts31
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/run-startup-scripts27
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/safe_format_and_mount152
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/set-interrupts82
-rw-r--r--google_compute_engine/__init__.py (renamed from legacy/gcimagebundle/gcimagebundlelib/__init__.py)0
-rw-r--r--google_compute_engine/accounts/__init__.py0
-rwxr-xr-xgoogle_compute_engine/accounts/accounts_daemon.py219
-rw-r--r--google_compute_engine/accounts/accounts_utils.py318
-rw-r--r--google_compute_engine/accounts/tests/accounts_daemon_test.py317
-rw-r--r--google_compute_engine/accounts/tests/accounts_utils_test.py585
-rw-r--r--google_compute_engine/boto/__init__.py0
-rw-r--r--google_compute_engine/boto/boto_config.py87
-rw-r--r--google_compute_engine/boto/compute_auth.py62
-rw-r--r--google_compute_engine/boto/tests/boto_config_test.py94
-rw-r--r--google_compute_engine/boto/tests/compute_auth_test.py100
-rw-r--r--google_compute_engine/clock_skew/__init__.py0
-rwxr-xr-xgoogle_compute_engine/clock_skew/clock_skew_daemon.py81
-rw-r--r--google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py107
-rw-r--r--google_compute_engine/compat.py35
-rw-r--r--google_compute_engine/config_manager.py109
-rw-r--r--google_compute_engine/file_utils.py124
-rw-r--r--google_compute_engine/instance_setup/__init__.py0
-rw-r--r--google_compute_engine/instance_setup/instance_config.py96
-rwxr-xr-xgoogle_compute_engine/instance_setup/instance_setup.py186
-rw-r--r--google_compute_engine/instance_setup/tests/instance_config_test.py106
-rw-r--r--google_compute_engine/instance_setup/tests/instance_setup_test.py321
-rw-r--r--google_compute_engine/ip_forwarding/__init__.py0
-rwxr-xr-xgoogle_compute_engine/ip_forwarding/ip_forwarding_daemon.py130
-rw-r--r--google_compute_engine/ip_forwarding/ip_forwarding_utils.py129
-rw-r--r--google_compute_engine/ip_forwarding/tests/ip_forwarding_daemon_test.py168
-rw-r--r--google_compute_engine/ip_forwarding/tests/ip_forwarding_utils_test.py176
-rw-r--r--google_compute_engine/logger.py54
-rw-r--r--google_compute_engine/metadata_scripts/__init__.py0
-rw-r--r--google_compute_engine/metadata_scripts/script_executor.py78
-rwxr-xr-xgoogle_compute_engine/metadata_scripts/script_manager.py97
-rw-r--r--google_compute_engine/metadata_scripts/script_retriever.py202
-rw-r--r--google_compute_engine/metadata_scripts/tests/script_executor_test.py110
-rw-r--r--google_compute_engine/metadata_scripts/tests/script_manager_test.py67
-rw-r--r--google_compute_engine/metadata_scripts/tests/script_retriever_test.py279
-rw-r--r--google_compute_engine/metadata_watcher.py176
-rw-r--r--google_compute_engine/test_compat.py39
-rw-r--r--google_compute_engine/tests/config_manager_test.py175
-rw-r--r--google_compute_engine/tests/file_utils_test.py198
-rw-r--r--google_compute_engine/tests/logger_test.py51
-rw-r--r--google_compute_engine/tests/metadata_watcher_test.py261
-rwxr-xr-xgoogle_configs/bin/set_hostname (renamed from google-startup-scripts/usr/share/google/set-hostname)5
-rwxr-xr-xgoogle_configs/build_packages.sh64
-rwxr-xr-x[-rw-r--r--]google_configs/dhcp/google_hostname.sh (renamed from legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py)11
-rw-r--r--google_configs/rsyslog/90-google.conf6
-rw-r--r--google_configs/sysctl/11-gce-network-security.conf (renamed from google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf)14
-rw-r--r--google_configs/udev/64-gce-disk-removal.rules (renamed from google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules)2
-rw-r--r--google_configs/udev/65-gce-disk-naming.rules (renamed from google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules)2
-rw-r--r--legacy/README.md5
-rw-r--r--legacy/gcimagebundle/LICENSE201
-rw-r--r--legacy/gcimagebundle/MANIFEST.in4
-rw-r--r--legacy/gcimagebundle/README30
-rw-r--r--legacy/gcimagebundle/README.md48
-rw-r--r--legacy/gcimagebundle/VERSION1
-rw-r--r--legacy/gcimagebundle/distribute_setup.py556
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/block_disk.py389
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/centos.py66
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/debian.py36
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py82
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/fedora.py56
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/fs_copy.py180
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/gcel.py57
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/imagebundle.py265
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/linux.py135
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/manifest.py79
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/os_platform.py70
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/platform_factory.py60
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/rhel.py42
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/sle.py34
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/suse.py91
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py512
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py140
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py49
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/ubuntu.py54
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/utils.py455
-rwxr-xr-xlegacy/gcimagebundle/setup.py58
-rw-r--r--legacy/gcimagebundle/stdeb.cfg3
-rw-r--r--package/instance_configs.cfg22
-rw-r--r--package/systemd/google-accounts-daemon.service12
-rw-r--r--package/systemd/google-clock-skew-daemon.service11
-rw-r--r--package/systemd/google-instance-setup.service (renamed from google-startup-scripts/usr/lib/systemd/system/google.service)6
-rw-r--r--package/systemd/google-ip-forwarding-daemon.service11
-rw-r--r--package/systemd/google-shutdown-scripts.service15
-rw-r--r--package/systemd/google-startup-scripts.service13
-rwxr-xr-xpackage/systemd/postinst.sh32
-rwxr-xr-xpackage/systemd/prerm.sh27
-rwxr-xr-xpackage/sysvinit/google-accounts-daemon107
-rwxr-xr-xpackage/sysvinit/google-clock-skew-daemon106
-rwxr-xr-xpackage/sysvinit/google-instance-setup51
-rwxr-xr-xpackage/sysvinit/google-ip-forwarding-daemon106
-rwxr-xr-xpackage/sysvinit/google-shutdown-scripts50
-rwxr-xr-xpackage/sysvinit/google-startup-scripts50
-rwxr-xr-x[-rw-r--r--]package/sysvinit/postinst.sh (renamed from legacy/gcimagebundle/gcimagebundlelib/opensuse.py)24
-rwxr-xr-xpackage/sysvinit/prerm.sh23
-rw-r--r--package/upstart/google-accounts-daemon.conf5
-rw-r--r--package/upstart/google-clock-skew-daemon.conf5
-rw-r--r--package/upstart/google-instance-setup.conf6
-rw-r--r--package/upstart/google-ip-forwarding-daemon.conf5
-rw-r--r--package/upstart/google-shutdown-scripts.conf5
-rw-r--r--package/upstart/google-startup-scripts.conf4
-rwxr-xr-xpackage/upstart/postinst.sh (renamed from legacy/gcimagebundle/gcimagebundle)24
-rwxr-xr-xpackage/upstart/prerm.sh (renamed from google-startup-scripts/etc/rc.local)11
-rwxr-xr-xscripts/optimize_local_ssd95
-rwxr-xr-xscripts/set_multiqueue (renamed from google-startup-scripts/usr/share/google/virtionet-irq-affinity)32
-rwxr-xr-xsetup.py103
-rw-r--r--unit-tests/travis-run-tests.sh3
168 files changed, 6667 insertions, 7396 deletions
diff --git a/.travis.yml b/.travis.yml
index 3eaff70..cc5b132 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,26 @@
language: python
+sudo: true
python:
- - "2.7"
-before_script:
- - chmod +x unit-tests/travis-run-tests.sh
+ - 2.6
+ - 2.7
+ - 3.2
+ - 3.3
+ - 3.4
+ - 3.5
+ - pypy
+ - pypy3
+# Global configs cause unit tests to break.
+# Issue: travis-ci/travis-ci#5246
+env:
+ - BOTO_CONFIG=/tmp/fake
+os:
+ - linux
+# Python 2.6 uses a backported version of unittest.
+# Python 3.2 does not have mock installed by default.
+install:
+ - pip install boto
+ - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install unittest2; fi
+ - if [[ $TRAVIS_PYTHON_VERSION == 3.2 ]]; then pip3.2 install mock; fi
+# nosetests will run all tests within the current directory.
script:
- - unit-tests/travis-run-tests.sh \ No newline at end of file
+ - nosetests
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..dc046f4
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1 @@
+graft package
diff --git a/README.md b/README.md
index 677f17a..5376587 100644
--- a/README.md
+++ b/README.md
@@ -1,54 +1,270 @@
-## LEGACY
+## Linux Guest Environment for Google Compute Engine
-## [Image Packages](https://cloud.google.com/compute/docs/images) for [Google Compute Engine](https://cloud.google.com/compute/)
-This repository is the collection of packages that are installed on the standard Google Compute Engine images.
+This repository stores the collection of packages installed on Google supported
+Compute Engine [images](https://cloud.google.com/compute/docs/images).
-1. [Google Startup Scripts](https://cloud.google.com/compute/docs/startupscript) - Scripts and configuration files that setup a Linux-based image to work smoothly with GCE.
-1. Google Daemon - A service that manages user accounts, maintains ssh login keys, syncs the system clock after migration, and syncs public endpoint IP addresses.
-1. Disk Expand - Scripts to expand the root partition on GCE VM's for CentOS 6 and RHEL 6 images.
+## Background
-## Installation
+The Linux guest environment denotes the Google provided configuration and
+tooling inside of a [Google Compute Engine](https://cloud.google.com/compute/)
+(GCE) virtual machine. The
+[metadata server](https://cloud.google.com/compute/docs/metadata) is a
+communication channel for transferring information from a client into the guest.
+The Linux guest environment includes a set of scripts and daemons (long running
+processes) that read the content of the metadata server to make a virtual
+machine run properly on our platform.
-### From Release Tarballs
-The easiest way to install these packages into a Linux-based image is to extract each tarball to `/` (root). Image Bundle does not have a directory structure, it is recommended to it extract to `/usr/share/imagebundle`. The tarballs are available in [releases](https://github.com/GoogleCloudPlatform/compute-image-packages/releases).
+## Guest Overview
-Refer to [Building a Google Compute Engine Image](https://cloud.google.com/compute/docs/images) for the complete guide.
+The guest environment is made up of the following components:
-### From Source Repository
-Occasionally you may want to install the latest commits to the [repository](https://github.com/GoogleCloudPlatform/compute-image-packages/) even if they have not been released. This is not recommended unless there is a change that you specifically need and cannot wait for. To do this:
+* **Accounts** daemon to setup and manage user accounts, and to enable SSH key
+ based authentication.
+* **Clock skew** daemon to keep the system clock in sync after VM start and
+ stop events.
+* **Disk expand** scripts to expand the VM root partition for CentOS 6,
+ CentOS 7, RHEL 6, and RHEL 7 images.
+* **Instance setup** scripts to execute VM configuration scripts during boot.
+* **IP forwarding** daemon that integrates network load balancing with
+ forwarding rule changes into the guest.
+* **Metadata scripts** running user provided scripts at VM startup and
+ shutdown.
-1. Log in to your target machine.
-1. Clone the repository with
+The Linux guest environment is written in Python, and is version agnostic
+between Python 2.6 and 3.5. There is complete unittest coverage for every Python
+library and script. The design of various guest libraries, daemons, and scripts,
+are detailed in the sections below.
- git clone https://github.com/GoogleCloudPlatform/compute-image-packages.git
+## Common Libraries
-1. Copy the google-daemon and google-startup-scripts files to your root directory with
+The Python libraries are shared with each of the daemons and the instance setup
+tools.
- sudo cp -R compute-image-packages/{google-daemon/{etc,usr},google-startup-scripts/{etc,usr,lib}} /
+### Metadata Watcher
-1. Configure the packages to run on startup with (Debian)
+The guest environment relies upon retrieving content from the metadata server to
+configure the VM environment. A metadata watching library handles all
+communication with the metadata server.
- sudo update-rc.d google-startup-scripts defaults && sudo update-rc.d google-accounts-manager defaults && sudo update-rc.d google-address-manager defaults && sudo update-rc.d google-clock-sync-manager defaults
+The library exposes two functions:
- or (Redhat)
+* **GetMetadata** immediately retrieves the contents of the metadata server
+ for a given metadata key. The function catches and logs any connection
+ related exceptions. The metadata server content is returned as a
+ deserialized JSON object.
+* **WatchMetadata** continuously makes a hanging GET, watching for changes to
+ the specified contents of the metadata server. When the request closes, the
+ watcher verifies the etag was updated. In case of an update, the etag is
+ updated and a provided handler function is called with the deserialized JSON
+ metadata content. The WatchMetadata function should never terminate; it
+ catches and logs any connection related exceptions, and catches and logs any
+ exception generated from calling the handler.
- sudo chkconfig --add google-startup-scripts && sudo chkconfig --add google-accounts-manager && sudo chkconfig --add google-address-manager && sudo chkconfig --add google-clock-sync-manager
+Metadata server requests have custom retry logic for metadata server
+unavailability; by default, any request has one minute to complete before the
+request is cancelled. In case of a brief network outage where the metadata
+server is unavailable, there is a short delay between retries.
-1. Either restart so the packages run or start them with (Debian and Redhat)
+### Logging
- sudo service google-accounts-manager restart && sudo service google-address-manager restart && sudo service google-clock-sync-manager restart
+The Google added daemons and scripts write to the serial port for added
+transparency. A common logging library is a thin wrapper around the Python
+logging module. The library configures appropriate SysLog handlers, sets the
+logging formatter, and provides a debug options for added logging and console
+output.
-## Source Code
-This repository is structured so that each package is located in its own top-level directory. [`google-startup-scripts`](google-startup-scripts/) and [`google-daemon`](google-daemon/) are stored as the directory structure of where the files would be from root.
+### Configuration Management
+
+A configuration file allows users to disable daemons and modify instance setup
+behaviors from a single location. Guest environment daemons and scripts need a
+mechanism to integrate user settings into the guest. A configuration management
+library retrieves and modifies these settings.
+
+The library exposes the following functions:
+
+* **GetOptionString** retrieves the value for a configuration option. The type
+ of the value is a string if set.
+* **GetOptionBool** retrieves the value for a configuration option. The type
+ of the value is a boolean if set.
+* **SetOption** sets the value of an option in the config file. An overwrite
+ flag specifies whether to replace an existing value.
+* **WriteConfig** writes the configuration values to a file. The function is
+ responsible for locking the file, preventing concurrent writes, and writing
+ a file header if one is provided.
+
+### File Management
+
+Guest environment daemons and scripts use a common library for file management.
+The library provides the following functions:
+
+* **SetPermissions** unifies the logic to set permissions and simplify file
+ creation across the various Linux distributions. The function sets the mode,
+ UID, and GID, of a provided path. On supported OS configurations that user
+ SELinux, the SELinux context is automatically set.
+* **LockFile** is a context manager that simplifies the process of file
+ locking in Python. The function sets up an flock and releases the lock on
+ exit.
+
+## Daemons
+
+The guest environment daemons import and use the common libraries described
+above. Each daemon reads the configuration file before execution. This allows a
+user to easily disable undesired functionality. Additional daemon behaviors are
+detailed below.
+
+### Accounts
+
+The accounts daemon is responsible for provisioning and deprovisioning user
+accounts. The daemon grants permissions to user accounts, and updates the list
+of authorized keys that has access to accounts based on metadata SSH key
+updates. User account creation is based on
+[adding and remove SSH Keys](https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys)
+stored in metadata.
+
+The accounts management daemon has the following behaviors.
+
+* Administrator permissions are managed with a `google-sudoers` Linux group.
+* All users provisioned by the account daemon are added to the
+ `google-sudoers` group.
+* The daemon stores a file in the guest to preserve state for the user
+ accounts managed by Google.
+* The authorized keys file for a Google managed user is delete when all SSH
+ keys for the user are removed from metadata.
+* User accounts not managed by Google are not modified by the accounts daemon.
+
+### Clock Skew
+
+The clock skew daemon is responsible for syncing the software clock with the
+hypervisor clock after a stop/start event or after a migration. Preventing clock
+skew may result in `system time has changed` messages in VM logs.
+
+### IP Forwarding
+
+The IP forwarding daemon uses IP forwarding metadata to setup or remove IP
+routes in the guest.
+
+* Only IPv4 IP addresses are currently supported.
+* Routes are set on the default ethernet interface determined dynamically.
+* Google routes are configured, by default, with the routing protocol ID `66`.
+ This ID is a namespace for daemon configured IP addresses.
+
+## Instance Setup
+
+Instance setup runs during VM boot. The script configures the Linux guest
+environment by performing the following tasks.
+
+* Optimize for local SSD.
+* Enable multi-queue on all the virtionet devices.
+* Wait for network availability.
+* Set SSH host keys the first time the instance is booted.
+* Set the boto config for using Google Cloud Storage.
+* Create the defaults configuration file.
+
+The defaults configuration file incorporates any user provided setting in
+`/etc/default/instance_configs.cfg.template` and does not override other
+conflicting settings. This allows package updates without overriding user
+configuration.
+
+## Metadata Scripts
+
+Metadata scripts implement support for running user provided
+[startup scripts](https://cloud.google.com/compute/docs/startupscript) and
+[shutdown scripts](https://cloud.google.com/compute/docs/shutdownscript). The
+guest support for metadata scripts is implemented in Python with the following
+design details.
+
+* Metadata scripts are executed in a shell.
+* If multiple metadata keys are specified (e.g. `startup-script` and
+ `startup-script-url`) both are executed.
+* If multiple metadata keys are specified (e.g. `startup-script` and
+ `startup-script-url`) a URL is executed first.
+* The exit status of a metadata script is logged after completed execution.
+
+## Configuration
+
+Users of Google provided images may configure the guest environment behaviors
+using a configuration file. To make configuration changes, add settings to
+`/etc/default/instance_configs.cfg.template`. Settings are not overridden in the
+guest.
+
+The following are valid user configuration options.
+
+Section | Option | Value
+--------------- | -------------------- | -----
+Accounts | deprovision_remove | `true` makes deprovisioning a user destructive.
+Accounts | groups | Comma separated list of groups for newly provisioned users.
+Daemons | accounts_daemon | `false` disables the accounts daemon.
+Daemons | clock_skew_daemon | `false` disables the clock skew daemon.
+Daemons | ip_forwarding_daemon | `false` disables the IP forwarding daemon.
+InstanceSetup | optimize_local_ssd | `false` prevents optimizing for local SSD.
+InstanceSetup | network_enabled | `false` skips all metadata related-functionality during instance setup.
+InstanceSetup | set_boto_config | `false` skips setting up a boto config.
+InstanceSetup | set_host_keys | `false` skips generating host keys on first boot.
+InstanceSetup | set_multiqueue | `false` skips multiqueue driver support.
+IpForwarding | ethernet_proto_id | Protocol ID string for daemon added routes.
+MetadataScripts | startup | `false` disables startup script execution.
+MetadataScripts | shutdown | `false` disables shutdown script execution.
+
+Setting `network_enabled` to `false` will skip setting up host keys and the
+boto config in the guest. The setting may also prevent startup and shutdown
+script execution.
+
+## Packaging
+
+The guest Python code is packaged as a
+[compliant PyPI Python package](http://python-packaging-user-guide.readthedocs.io/en/latest/)
+that can be used as a library or run independently. In addition to the Python
+package, deb and rpm packages are created with appropriate init configuration
+for supported GCE distros. The packages are targeted towards distribution
+provided Python versions.
+
+Distro | Package Type | Python Version | Init System
+------------ | ------------ | -------------- | -----------
+Debian 7 | deb | 2.7 | sysvinit
+Debian 8 | deb | 2.7 | systemd
+CentOS 6 | rpm | 2.6 | upstart
+CentOS 7 | rpm | 2.7 | systemd
+RHEL 6 | rpm | 2.6 | upstart
+RHEL 7 | rpm | 2.7 | systemd
+Ubuntu 12.04 | deb | 2.7 | upstart
+Ubuntu 14.04 | deb | 2.7 | upstart
+Ubuntu 16.04 | deb | 3.5 or 2.7 | systemd
+SLES 11 | rpm | 2.6 | sysvinit
+SLES 12 | rpm | 2.7 | systemd
+
+We build the following packages for the Linux guest environment.
+
+* `google_compute_engine` is a Python package for Linux daemons, scripts, and
+ libraries.
+ * The package is installed to its distro default Python package location
+ (e.g. `/usr/lib/python2.7/site-packages`).
+ * Includes appropriate init files for sysvinit, upstart, and systemd.
+ * Entry point scripts, created by the Python package, are located in
+ `/usr/bin`.
+* `google_configs` is a package containing non-Python scripts and guest
+ configuration.
+ * Sets up udev rules and sysctl rules.
+ * Configures the SysLog output that gets sent to serial port output.
+ * Includes bash scripts needed by `instance_setup`.
+
+The package build tools are published. The tools are run on a GCE VM to build
+release packages, and the output deb and rpm packages are included in our
+GitHub releases.
## Contributing
-Have a patch that will benefit this project? Awesome! Follow these steps to have it accepted.
-1. Please sign our [Contributor License Agreement](CONTRIB.md).
-1. Fork this Git repository and make your changes.
-1. Create a Pull Request
-1. Incorporate review feedback to your changes.
-1. Accepted!
+Have a patch that will benefit this project? Awesome! Follow these steps to have
+it accepted.
+
+1. Please sign our [Contributor License Agreement](CONTRIB.md).
+1. Fork this Git repository and make your changes.
+1. Create a Pull Request against the
+ [development](https://github.com/GoogleCloudPlatform/compute-image-packages/tree/development)
+ branch.
+1. Incorporate review feedback to your changes.
+1. Accepted!
## License
-All files in this repository are under the [Apache License, Version 2.0](LICENSE) unless noted otherwise.
+
+All files in this repository are under the
+[Apache License, Version 2.0](LICENSE) unless noted otherwise.
diff --git a/VERSION b/VERSION
deleted file mode 100644
index 31e5c84..0000000
--- a/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.3.3
diff --git a/build_packages.sh b/build_packages.sh
new file mode 100755
index 0000000..b119403
--- /dev/null
+++ b/build_packages.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Build the Linux guest environment deb and rpm packages.
+
+TIMESTAMP="$(date +%s)"
+
+function build_distro() {
+ declare -r distro="$1"
+ declare -r pkg_type="$2"
+ declare -r init_config="$3"
+ declare -r py_path="$4"
+ declare name='google-compute-engine'
+
+ export CONFIG="${init_config}"
+
+ if [[ "${pkg_type}" == 'deb' ]]; then
+ name="${name}-${distro}"
+ fi
+
+ fpm \
+ -s python \
+ -t "${pkg_type}" \
+ --after-install "package/${init_config}/postinst.sh" \
+ --before-remove "package/${init_config}/prerm.sh" \
+ --depends 'python-boto' \
+ --depends 'python-setuptools' \
+ --iteration "0.${TIMESTAMP}" \
+ --maintainer 'gc-team@google.com' \
+ --name "${name}" \
+ --no-python-fix-name \
+ --python-install-bin '/usr/bin' \
+ --python-install-lib "${py_path}" \
+ --python-install-data "/usr/share/doc/${name}" \
+ --rpm-dist "${distro}" \
+ setup.py
+}
+
+# RHEL/CentOS
+build_distro 'el6' 'rpm' 'upstart' '/usr/lib/python2.6/site-packages'
+build_distro 'el7' 'rpm' 'systemd' '/usr/lib/python2.7/site-packages'
+
+# Debian
+build_distro 'wheezy' 'deb' 'sysvinit' '/usr/lib/python2.7/dist-packages'
+build_distro 'jessie' 'deb' 'systemd' '/usr/lib/python2.7/dist-packages'
diff --git a/disk-expand/README.md b/disk_expand/README.md
index 276e790..276e790 100644
--- a/disk-expand/README.md
+++ b/disk_expand/README.md
diff --git a/disk-expand/build_package.sh b/disk_expand/build_packages.sh
index c3ef36b..c3ef36b 100755
--- a/disk-expand/build_package.sh
+++ b/disk_expand/build_packages.sh
diff --git a/disk-expand/expand-root b/disk_expand/expand-root
index 7b35ce3..7b35ce3 100755
--- a/disk-expand/expand-root
+++ b/disk_expand/expand-root
diff --git a/disk-expand/expand-root.service b/disk_expand/expand-root.service
index 723d317..723d317 100644
--- a/disk-expand/expand-root.service
+++ b/disk_expand/expand-root.service
diff --git a/disk-expand/gce-disk-expand-el6.spec b/disk_expand/gce-disk-expand-el6.spec
index a2366b9..a2366b9 100644
--- a/disk-expand/gce-disk-expand-el6.spec
+++ b/disk_expand/gce-disk-expand-el6.spec
diff --git a/disk-expand/gce-disk-expand-el7.spec b/disk_expand/gce-disk-expand-el7.spec
index 2cfae42..2cfae42 100644
--- a/disk-expand/gce-disk-expand-el7.spec
+++ b/disk_expand/gce-disk-expand-el7.spec
diff --git a/disk-expand/third_party/cloud-utils/LICENSE b/disk_expand/third_party/cloud-utils/LICENSE
index 94a9ed0..94a9ed0 100644
--- a/disk-expand/third_party/cloud-utils/LICENSE
+++ b/disk_expand/third_party/cloud-utils/LICENSE
diff --git a/disk-expand/third_party/cloud-utils/README.google b/disk_expand/third_party/cloud-utils/README.google
index d905302..d905302 100644
--- a/disk-expand/third_party/cloud-utils/README.google
+++ b/disk_expand/third_party/cloud-utils/README.google
diff --git a/disk-expand/third_party/cloud-utils/growpart b/disk_expand/third_party/cloud-utils/growpart
index ce3b787..ce3b787 100755
--- a/disk-expand/third_party/cloud-utils/growpart
+++ b/disk_expand/third_party/cloud-utils/growpart
diff --git a/disk-expand/third_party/dracut-modules-growroot/LICENSE b/disk_expand/third_party/dracut-modules-growroot/LICENSE
index 94a9ed0..94a9ed0 100644
--- a/disk-expand/third_party/dracut-modules-growroot/LICENSE
+++ b/disk_expand/third_party/dracut-modules-growroot/LICENSE
diff --git a/disk-expand/third_party/dracut-modules-growroot/README.google b/disk_expand/third_party/dracut-modules-growroot/README.google
index e65c620..e65c620 100644
--- a/disk-expand/third_party/dracut-modules-growroot/README.google
+++ b/disk_expand/third_party/dracut-modules-growroot/README.google
diff --git a/disk-expand/third_party/dracut-modules-growroot/growroot-dummy.sh b/disk_expand/third_party/dracut-modules-growroot/growroot-dummy.sh
index 25bed15..25bed15 100755
--- a/disk-expand/third_party/dracut-modules-growroot/growroot-dummy.sh
+++ b/disk_expand/third_party/dracut-modules-growroot/growroot-dummy.sh
diff --git a/disk-expand/third_party/dracut-modules-growroot/growroot.sh b/disk_expand/third_party/dracut-modules-growroot/growroot.sh
index e09c809..e09c809 100755
--- a/disk-expand/third_party/dracut-modules-growroot/growroot.sh
+++ b/disk_expand/third_party/dracut-modules-growroot/growroot.sh
diff --git a/disk-expand/third_party/dracut-modules-growroot/install b/disk_expand/third_party/dracut-modules-growroot/install
index 70df2a8..70df2a8 100755
--- a/disk-expand/third_party/dracut-modules-growroot/install
+++ b/disk_expand/third_party/dracut-modules-growroot/install
diff --git a/google-daemon/README.md b/google-daemon/README.md
deleted file mode 100644
index 0e44860..0000000
--- a/google-daemon/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-## Google Daemon
-Google daemon runs in the background and provides the following services:
-
-+ Creates new accounts based on the instance metadata.
-+ Configures SSH to accept the accounts' public keys from the instance metadata.
-+ Adds IP addresses of network load balancers as aliases of the external Ethernet interface
-+ Resyncs clock if skewed due to [live migration](https://googlecloudplatform.blogspot.com/2015/03/Google-Compute-Engine-uses-Live-Migration-technology-to-service-infrastructure-without-application-downtime.html)
-
-Google Daemon services are typically located at:
-
- /usr/share/google/google_daemon/
-
-#### Account synchronization
-
-Your users can create SSH keys for accounts on a virtual machine using [gcloud compute](https://cloud.google.com/compute/docs/gcloud-compute/) or manually using these steps:
-
- # Generate the ssh keys
- $ ssh-keygen -t rsa -f ~/.ssh/google_compute_engine
-
- # Create public RSA key in OpenSSH format
- $ ssh-rsa [base-64-encoded-public-key] [comment]
-
-In the metadata server, the SSH keys are passed to a virtual machine individually, or to the project using the `commoninstancemetadata` property:
-
- {
- kind: "compute#metadata",
- items: [
- "key": "sshKeys",
- "value": "<ssh-keys-value>"
- ]
- }
-
-`<ssh-keys-value>` is a newline-separated list of individual authorized public ssh key records, each in the format:
-
- <username>:<public-ssh-key-file-contents>
-
-For example:
-
- {
- "kind": "compute#project",
- "name": "project-name",
- "commonInstanceMetadata": {
- "kind": "compute#metadata",
- "items": [
- {
- "key": "sshKeys",
- "value": "user1:ssh-rsa AAAA...pIy9 user@host.domain.com\nuser2:ssh-rsa AAAA...ujOz user@host.domain.com"
- }
- ]
- }
-
-For more information about the metadata server, read the [metadata server](http://developers.google.com/compute/docs/metadata "metadata server") documentation.
-
-Inside a virtual machine, a cron job runs every minute to check if project or instance metadata was updated with the new sshKeys value, and makes sure those users exist. It also checks that the keys are in the `~$USER/.ssh/authorized_keys` file.
-
-__Note:__ It is recommended that you use a `wait-for-change` request through the metadata server to detect updates. See [metadata server](https://developers.google.com/compute/docs/metadata#waitforchange) for more information.
-
-Other account management software can be used instead of Google Daemon but you will have to configure the software to read user accounts from the metadata server.
-
diff --git a/google-daemon/etc/init.d/google-accounts-manager b/google-daemon/etc/init.d/google-accounts-manager
deleted file mode 100755
index 4f414c4..0000000
--- a/google-daemon/etc/init.d/google-accounts-manager
+++ /dev/null
@@ -1,193 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: gce_manage_accounts
-# X-Start-Before: ssh
-# Required-Start: $local_fs $network $named $syslog
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Google Compute Engine accounts manager service
-# Description: This launches the Google Compute Engine accounts manager
-# daemon.
-### END INIT INFO
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Google Compute Engine accounts manager service"
-NAME=manage_accounts
-DAEMON=/usr/share/google/google_daemon/manage_accounts.py
-DAEMON_ARGS="--daemon"
-PIDFILE=/var/run/$NAME.pid
-SCRIPTNAME=/etc/init.d/google-manage-accounts
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # One-shot run prior to daemonizing.
- $DAEMON --single-pass
-
- # In case of power outage or hard reboot, ensure that SSH keys have been
- # written to disk before starting the daemon. At this point the other
- # Google-specific startup logic will have already occurred, sometimes
- # including other steps which would be good to write to disk; since syncs are
- # expensive and we don't want to do it twice during boot, just do it once
- # here.
- sync
-
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
- $DAEMON_ARGS \
- || return 2
- # Add code here, if necessary, that waits for the process to be ready
- # to handle requests from services started subsequently which depend
- # on this one. As a last resort, sleep for some time.
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-#
-# Function that sends a SIGHUP to the daemon/service
-#
-do_reload() {
- #
- # If the daemon can reload its configuration without
- # restarting (for example, when it is sent a SIGHUP),
- # then implement that here.
- #
- start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
- return 0
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- #reload|force-reload)
- #
- # If do_reload() is not implemented then leave this commented out
- # and leave 'force-reload' as an alias for 'restart'.
- #
- #log_daemon_msg "Reloading $DESC" "$NAME"
- #do_reload
- #log_end_msg $?
- #;;
- restart|force-reload)
- #
- # If the "reload" option is implemented then remove the
- # 'force-reload' alias
- #
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-daemon/etc/init.d/google-address-manager b/google-daemon/etc/init.d/google-address-manager
deleted file mode 100755
index e74cab6..0000000
--- a/google-daemon/etc/init.d/google-address-manager
+++ /dev/null
@@ -1,153 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: google-address-manager
-# Required-Start: $network $syslog
-# Required-Stop: $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Example initscript
-# Description: This file should be used to construct scripts to be
-# placed in /etc/init.d.
-### END INIT INFO
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Google IP address manager"
-NAME=google-address-manager
-DAEMON=/usr/share/google/google_daemon/manage_addresses.py
-DAEMON_ARGS=""
-PIDFILE=/var/run/$NAME.pid
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON -- \
- $DAEMON_ARGS || return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 \
- --pidfile $PIDFILE
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- restart|force-reload)
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-daemon/etc/init.d/google-clock-sync-manager b/google-daemon/etc/init.d/google-clock-sync-manager
deleted file mode 100755
index b85f9de..0000000
--- a/google-daemon/etc/init.d/google-clock-sync-manager
+++ /dev/null
@@ -1,153 +0,0 @@
-#! /bin/sh
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: google-clock-manager
-# Required-Start: $network $syslog
-# Required-Stop: $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Example initscript
-# Description: This file should be used to construct scripts to be
-# placed in /etc/init.d.
-### END INIT INFO
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Google clock sync manager"
-NAME=google-clock-sync-manager
-DAEMON=/usr/share/google/google_daemon/manage_clock_sync.py
-DAEMON_ARGS=""
-PIDFILE=/var/run/$NAME.pid
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON -- \
- $DAEMON_ARGS || return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 \
- --pidfile $PIDFILE
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- restart|force-reload)
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-daemon/etc/init/google-accounts-manager-service.conf b/google-daemon/etc/init/google-accounts-manager-service.conf
deleted file mode 100755
index 8707ca7..0000000
--- a/google-daemon/etc/init/google-accounts-manager-service.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# google - Run account manager as a service after the one-shot account manager
-# task is done.
-#
-#
-start on (stopped google-accounts-manager-task
- and (starting ssh or starting sshd))
-stop on (stopping ssh or stopping sshd)
-respawn
-
-exec /usr/share/google/google_daemon/manage_accounts.py
diff --git a/google-daemon/etc/init/google-accounts-manager-task.conf b/google-daemon/etc/init/google-accounts-manager-task.conf
deleted file mode 100755
index 533ef6b..0000000
--- a/google-daemon/etc/init/google-accounts-manager-task.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-# google - Run account manager as a one-shot task prior to sshd starting.
-#
-#
-start on (starting ssh or starting sshd)
-task
-
-script
- # One-shot run prior to daemonizing (in a different upstart job config file).
- /usr/share/google/google_daemon/manage_accounts.py --single-pass
-
- # In case of power outage or hard reboot, ensure that SSH keys have been
- # written to disk before starting the daemon. At this point the other
- # Google-specific startup logic will have already occurred, sometimes
- # including other steps which would be good to write to disk; since syncs are
- # expensive and we don't want to do it twice during boot, just do it once
- # here.
- sync
-end script
diff --git a/google-daemon/etc/init/google-address-manager.conf b/google-daemon/etc/init/google-address-manager.conf
deleted file mode 100755
index 9cf217c..0000000
--- a/google-daemon/etc/init/google-address-manager.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-# This service configures local addresses in a Google Compute Engine instance.
-start on google-rc-local-has-run
-
-respawn
-exec /usr/share/google/google_daemon/manage_addresses.py
diff --git a/google-daemon/etc/init/google-clock-sync-manager.conf b/google-daemon/etc/init/google-clock-sync-manager.conf
deleted file mode 100755
index 6d23a28..0000000
--- a/google-daemon/etc/init/google-clock-sync-manager.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-# This service syncs the clock after migration in a Google Compute Engine instance.
-start on google-rc-local-has-run
-
-respawn
-exec /usr/share/google/google_daemon/manage_clock_sync.py
diff --git a/google-daemon/usr/lib/systemd/system/google-accounts-manager.service b/google-daemon/usr/lib/systemd/system/google-accounts-manager.service
deleted file mode 100644
index 660cf54..0000000
--- a/google-daemon/usr/lib/systemd/system/google-accounts-manager.service
+++ /dev/null
@@ -1,12 +0,0 @@
-[Unit]
-Description=Google Compute Engine User Accounts Manager Daemon
-After=network.target
-Before=sshd.service
-Requires=network.target
-
-[Service]
-Type=simple
-ExecStart=/usr/share/google/google_daemon/manage_accounts.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-daemon/usr/lib/systemd/system/google-address-manager.service b/google-daemon/usr/lib/systemd/system/google-address-manager.service
deleted file mode 100644
index eadd2b0..0000000
--- a/google-daemon/usr/lib/systemd/system/google-address-manager.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=Google Compute Engine Address Manager Daemon
-After=network.target
-Requires=network.target
-
-[Service]
-Type=simple
-ExecStart=/usr/share/google/google_daemon/manage_addresses.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service b/google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service
deleted file mode 100644
index c4fcc9b..0000000
--- a/google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=Google Compute Engine Clock Sync Daemon
-After=network.target
-Requires=network.target
-
-[Service]
-Type=simple
-ExecStart=/usr/share/google/google_daemon/manage_clock_sync.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-daemon/usr/share/google/google_daemon/accounts.py b/google-daemon/usr/share/google/google_daemon/accounts.py
deleted file mode 100755
index e14f5c0..0000000
--- a/google-daemon/usr/share/google/google_daemon/accounts.py
+++ /dev/null
@@ -1,431 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Update accounts needed on this GCE instance.
-
-Update accounts based on the contents of ACCOUNTS_URL, which should contain a
-newline-delimited file of accounts and SSH public keys. Each line represents a
-SSH public key which should be allowed to log in to that account.
-
-If the account does not already exist on the system, it is created and added
-to /etc/sudoers to allow that account to administer the machine without needing
-a password.
-"""
-
-import errno
-import grp
-import logging
-import os
-import pwd
-import re
-import stat
-import tempfile
-import time
-
-
-def EnsureTrailingNewline(line):
- if line.endswith('\n'):
- return line
- return line + '\n'
-
-
-def IsUserSudoerInLines(user, sudoer_lines):
- """Return whether the user has an entry in the sudoer lines."""
-
- def IsUserSudoerEntry(line):
- return re.match(r'^%s\s+' % user, line)
-
- return filter(IsUserSudoerEntry, sudoer_lines)
-
-
-class Accounts(object):
- """Manage accounts on a machine."""
-
- # Comes from IEEE Std 1003.1-2001. Characters from the portable
- # filename character set. The hyphen should not be the first char
- # of a portable user name.
- VALID_USERNAME_CHARS = set(
- map(chr, range(ord('A'), ord('Z') + 1)) +
- map(chr, range(ord('a'), ord('z') + 1)) +
- map(chr, range(ord('0'), ord('9') + 1)) +
- ['_', '-', '.'])
-
- def __init__(self, grp_module=grp, os_module=os,
- pwd_module=pwd, system_module=None,
- urllib2_module=None, time_module=time):
- """Construct an Accounts given the module injections."""
- self.system_module = system_module
-
- self.grp = grp_module
- self.os = os_module
- self.pwd = pwd_module
- self.system = system_module
- self.time_module = time_module
- self.urllib2 = urllib2_module
-
- self.default_user_groups = self.GroupsThatExist(
- ['adm', 'video', 'dip', 'plugdev', 'sudo'])
-
- def UpdateUser(self, username, ssh_keys):
- """Create username on the system, with authorized ssh_keys."""
-
- if not self.IsValidUsername(username):
- logging.warning(
- 'Not creating account for user %s. Usernames must comprise'
- ' characters [A-Za-z0-9._-] and not start with \'-\'.', username)
- return
-
- if not self.UserExists(username):
- self.system.UserAdd(username, self.default_user_groups)
-
- if self.UserExists(username):
- # Don't try to manage the sshkeys of an account with a shell set to
- # disable logins. Helps avoid problems caused by operator and root
- # sharing a home directory in CentOS and RHEL
- if self.UserNoLogin(username):
- logging.debug(
- 'Not processing account for user %s. User has /sbin/nologin'
- ' set as login shell', username)
- return
-
- # If we're just removing keys from a user who may have been in the
- # metadata server but isn't currently, we should never increase their
- # privileges. Therefore, only grant sudo access if we have ssh keys.
- if ssh_keys:
- self.MakeUserSudoer(username)
- self.AuthorizeSshKeys(username, ssh_keys)
-
- def IsValidUsername(self, username):
- """Return whether username looks like a valid user name."""
-
- def InvalidCharacterFilter(c):
- return c not in Accounts.VALID_USERNAME_CHARS
-
- if filter(InvalidCharacterFilter, username):
- # There's an invalid character in it.
- return False
-
- if username.startswith('-'):
- return False
-
- return True
-
- def GroupsThatExist(self, groups_list):
- """Return all the groups in groups_list that exist on the machine."""
-
- def GroupExists(group):
- try:
- self.grp.getgrnam(group)
- return True
- except KeyError:
- return False
-
- return filter(GroupExists, groups_list)
-
- def GetUserInfo(self, user):
- """Return a tuple of the user's (home_dir, pid, gid)."""
- pwent = self.pwd.getpwnam(user)
- return (pwent.pw_dir, pwent.pw_uid, pwent.pw_gid)
-
- def UserExists(self, user):
- """Test whether a given user exists or not."""
- try:
- self.pwd.getpwnam(user)
- return True
- except KeyError:
- return False
-
- def UserNoLogin(self, user):
- """Test whether a user's shell is /sbin/nologin."""
- pwent = self.pwd.getpwnam(user)
- return pwent.pw_shell == '/sbin/nologin'
-
- def LockSudoers(self):
- """Create an advisory lock on /etc/sudoers.tmp.
-
- Returns:
- True if successful, False if not.
- """
- try:
- f = self.os.open('/etc/sudoers.tmp', os.O_EXCL|os.O_CREAT)
- self.os.close(f)
- return True
- except OSError as e:
- if e.errno == errno.EEXIST:
- logging.warning('/etc/sudoers.tmp lock file already exists')
- else:
- logging.warning('Could not create /etc/sudoers.tmp lock file: %s', e)
- return False
-
- def UnlockSudoers(self):
- """Remove the advisory lock on /etc/sudoers.tmp."""
- try:
- self.os.unlink('/etc/sudoers.tmp')
- return True
- except OSError as e:
- if e.errno == errno.ENOENT:
- return True
- logging.warning('Could not remove /etc/sudoers.tmp: %s', e)
- return False
-
- def MakeUserSudoer(self, user):
- """Add user to the sudoers file."""
- # If the user has no sudoers file, don't add an entry.
- if not self.os.path.isfile('/etc/sudoers'):
- logging.info('Did not grant admin access to %s. /etc/sudoers not found.',
- user)
- return
-
- with self.system.OpenFile('/etc/sudoers', 'r') as sudoer_f:
- sudoer_lines = sudoer_f.readlines()
-
- if IsUserSudoerInLines(user, sudoer_lines):
- # User is already sudoer. Done. We don't have to check for a lock
- # file.
- return
-
- # Lock sudoers.
- if not self.LockSudoers():
- logging.warning('Did not grant admin access to %s. /etc/sudoers locked.',
- user)
- return
-
- try:
- # First read in the sudoers file (this time under the lock).
- with self.system.OpenFile('/etc/sudoers', 'r') as sudoer_f:
- sudoer_lines = sudoer_f.readlines()
-
- if IsUserSudoerInLines(user, sudoer_lines):
- # User is already sudoer. Done.
- return
-
- # Create a temporary sudoers file with the contents we want.
- sudoer_lines.append('%s ALL=NOPASSWD: ALL' % user)
- sudoer_lines = [EnsureTrailingNewline(line) for line in sudoer_lines]
- (tmp_sudoers_fd, tmp_sudoers_fname) = tempfile.mkstemp()
- with self.os.fdopen(tmp_sudoers_fd, 'w+') as tmp_sudoer_f:
- # Put the old lines.
- tmp_sudoer_f.writelines(sudoer_lines)
- tmp_sudoer_f.seek(0)
-
- try:
- # Validate our result.
- if not self.system.IsValidSudoersFile(tmp_sudoers_fname):
- logging.warning(
- 'Did not grant admin access to %s. Sudoers was invalid.', user)
- return
-
- self.os.chmod('/etc/sudoers', 0640)
- with self.system.OpenFile('/etc/sudoers', 'w') as sudoer_f:
- sudoer_f.writelines(sudoer_lines)
- # Make sure we're still 0640.
- self.os.fchmod(sudoer_f.fileno(), stat.S_IWUSR | 0640)
- try:
- self.os.fchmod(sudoer_f.fileno(), 0440)
- except (IOError, OSError) as e:
- logging.warning('Could not restore perms to /etc/sudoers: %s', e)
- finally:
- # Clean up the temp file.
- try:
- self.os.unlink(tmp_sudoers_fname)
- except (IOError, OSError) as e:
- pass
- except (IOError, OSError) as e:
- logging.warning('Could not grant %s admin access: %s', user, e)
- finally:
- self.UnlockSudoers()
-
- def AuthorizeSshKeys(self, user, ssh_keys):
- """Add ssh_keys to the user's ssh authorized_keys.gce file."""
- (home_dir, uid, gid) = self.GetUserInfo(user)
-
- ssh_dir = os.path.join(home_dir, '.ssh')
-
- if not self.os.path.isdir(ssh_dir):
- # Create a user's ssh directory, with u+rwx as the only permissions.
- # There's proper handling and logging of OSError within EnsureDir(),
- # so neither of these calls needs th handle that.
- if not self.EnsureHomeDir(home_dir, uid, gid):
- return False
-
- if not self.EnsureDir(ssh_dir, uid, gid, 0700):
- return False
-
- # Not all sshd's support mulitple authorized_keys files. We have to
- # share one with the user. We add our entries as follows:
- # # Added by Google
- # authorized_key_entry
- authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys')
- try:
- self.WriteAuthorizedSshKeysFile(authorized_keys_file, ssh_keys, uid, gid)
- except IOError as e:
- logging.warning('Could not update %s due to %s', authorized_keys_file, e)
-
- def SetSELinuxContext(self, path):
- """Set the appropriate SELinux context, if SELinux tools are installed.
-
- Calls /sbin/restorecon on the provided path to set the SELinux context as
- specified by policy. This call does not operate recursively.
-
- Only some OS configurations use SELinux. It is therefore acceptable for
- restorecon to be missing, in which case we do nothing.
-
- Arguments:
- path: The path on which to fix the SELinux context.
-
- Returns:
- True if successful or if restorecon is missing, False in case of error.
- """
-
- if self.system.IsExecutable('/sbin/restorecon'):
- result = self.system.RunCommand(['/sbin/restorecon', path])
- if self.system.RunCommandFailed(result):
- logging.error('Unable to set SELinux context for %s', path)
- return False
- else:
- return True
- else:
- logging.debug('restorecon missing; not setting SELinux context for %s',
- path)
- return True
-
- def EnsureHomeDir(self, home_dir, uid, gid):
- """Make sure user's home directory exists.
-
- Create the directory and its ancestor directories if necessary.
-
- No changes are made to the ownership or permissions of a directory which
- already exists.
-
- Arguments:
- home_dir: The path to the home directory.
- uid: user ID to own the home dir.
- gid: group ID to own the home dir.
-
- Returns:
- True if successful, False if not.
- """
-
- if self.os.path.isdir(home_dir):
- return True
-
- # Use root as owner when creating ancestor directories.
- if not self.EnsureDir(home_dir, 0, 0, 0755):
- return False
-
- self.os.chown(home_dir, uid, gid)
- return True
-
- def EnsureDir(self, dir_path, uid, gid, mode):
- """Make sure the specified directory exists.
-
- If dir doesn't exist, create it and its ancestor directories, if necessary.
-
- No changes are made to the ownership or permissions of a directory which
- already exists.
-
- Arguments:
- dir_path: The path to the dir.
- uid: user ID of the owner.
- gid: group ID of the owner.
- mode: Permissions for the dir, as an integer (e.g. 0755).
-
- Returns:
- True if successful, False if not.
- """
-
- if self.os.path.isdir(dir_path):
- return True # We are done
-
- parent_dir = self.os.path.dirname(dir_path)
- if not parent_dir == dir_path:
- if not self.EnsureDir(parent_dir, uid, gid, 0755):
- return False
-
- try:
- self.os.mkdir(dir_path, mode)
- self.os.chown(dir_path, uid, gid)
- self.SetSELinuxContext(dir_path)
- except OSError as e:
- if self.os.path.isdir(dir_path):
- logging.warning('Could not prepare %s: %s', dir_path, e)
- return True
- logging.error('Could not create %s: %s', dir_path, e)
- return False
-
- return True
-
- def WriteAuthorizedSshKeysFile(
- self, authorized_keys_file, ssh_keys, uid, gid):
- """Update the authorized_keys_file to contain the given ssh_keys.
-
- Arguments:
- authorized_keys_file: The name of the authorized keys file.
- ssh_keys: The google added ssh keys for the file.
- uid: The uid for the user.
- gid: The gid for the user.
- """
- # Create a temp file to store the new keys.
- with self.system.CreateTempFile(delete=False) as keys_file:
- new_keys_path = keys_file.name
- # Read all the ssh keys in the original key file if it exists.
- if self.os.path.exists(authorized_keys_file):
- with self.system.OpenFile(authorized_keys_file, 'r') as original_keys:
- original_keys.seek(0)
- lines = original_keys.readlines()
- else:
- lines = []
-
- # Pull out the # Added by Google lines.
- google_added_ixs = [i for i in range(len(lines) - 1) if
- lines[i].startswith('# Added by Google')]
- google_added_ixs += [i + 1 for i in google_added_ixs]
-
- user_lines = [
- lines[i] for i in range(len(lines)) if i not in google_added_ixs]
-
- # First write user's entries.
- for user_line in user_lines:
- keys_file.write(EnsureTrailingNewline(user_line))
-
- # Put google entries at the end, each preceeded by Added by Google.
- for ssh_key in ssh_keys:
- keys_file.write('# Added by Google\n')
- keys_file.write(EnsureTrailingNewline(ssh_key))
-
- # Check that we have enough disk space to move the file.
- stat = self.os.statvfs(self.os.path.dirname(authorized_keys_file))
- available_space = stat.f_bavail * stat.f_bsize
- required_space = self.os.path.getsize(new_keys_path) + 1024 * 1024
- logging.debug('Writing keys file: %s bytes required; %s available.',
- required_space, available_space)
- if available_space < required_space:
- raise IOError('Disk is too full')
-
- try:
- # Override the old authorized keys file with the new one.
- self.system.MoveFile(new_keys_path, authorized_keys_file)
- finally:
- try:
- self.system.DeleteFile(new_keys_path)
- except:
- pass
-
- # Make sure the authorized_keys_file has the right perms (u+rw).
- self.os.chmod(authorized_keys_file, 0600)
- self.os.chown(authorized_keys_file, uid, gid)
-
- # Set SELinux context, if applicable to this system
- self.SetSELinuxContext(authorized_keys_file)
diff --git a/google-daemon/usr/share/google/google_daemon/accounts_manager.py b/google-daemon/usr/share/google/google_daemon/accounts_manager.py
deleted file mode 100644
index 5932796..0000000
--- a/google-daemon/usr/share/google/google_daemon/accounts_manager.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main driver logic for managing accounts on GCE instances."""
-
-import logging
-import os
-import pwd
-import time
-
-LOCKFILE = '/var/lock/manage-accounts.lock'
-
-
-class AccountsManager(object):
- """Create accounts on a machine."""
-
- def __init__(self, accounts_module, desired_accounts, system, lock_file,
- lock_fname, single_pass=True):
- """Construct an AccountsFromMetadata with the given module injections."""
- if not lock_fname:
- lock_fname = LOCKFILE
- self.accounts = accounts_module
- self.desired_accounts = desired_accounts
- self.lock_file = lock_file
- self.lock_fname = lock_fname
- self.system = system
- self.single_pass = single_pass
-
- def Main(self):
- logging.debug('AccountsManager main loop')
- # If this is a one-shot execution, then this can be run normally.
- # Otherwise, run the actual operations in a subprocess so that any
- # errors don't kill the long-lived process.
- if self.single_pass:
- self.RegenerateKeysAndUpdateAccounts()
- return
- # Run this forever in a loop.
- while True:
- # Fork and run the key regeneration and account update while the
- # parent waits for the subprocess to finish before continuing.
-
- # Create a pipe used to get the new etag value from child
- reader, writer = os.pipe() # these are file descriptors, not file objects
- pid = os.fork()
- if pid:
- # We are the parent.
- os.close(writer)
- reader = os.fdopen(reader) # turn reader into a file object
- etag = reader.read()
- if etag:
- self.desired_accounts.etag = etag
- reader.close()
- logging.debug('New etag: %s', self.desired_accounts.etag)
- os.waitpid(pid, 0)
- else:
- # We are the child.
- os.close(reader)
- writer = os.fdopen(writer, 'w')
- try:
- self.RegenerateKeysAndUpdateAccounts()
- except Exception as e:
- logging.warning('error while trying to update accounts: %s', e)
- # An error happened while trying to update the accounts.
- # Sleep for five seconds before trying again.
- time.sleep(5)
-
- # Write the etag to pass to parent.
- etag = self.desired_accounts.etag or ''
- writer.write(etag)
- writer.close()
-
- # The use of os._exit here is recommended for subprocesses spawned
- # by forking to avoid issues with running the cleanup tasks that
- # sys.exit() runs by preventing issues from the cleanup being run
- # once by the subprocess and once by the parent process.
- os._exit(0)
-
- def RegenerateKeysAndUpdateAccounts(self):
- """Regenerate the keys and update accounts as needed."""
- logging.debug('RegenerateKeysAndUpdateAccounts')
- if self.system.IsExecutable('/usr/share/google/first-boot'):
- self.system.RunCommand('/usr/share/google/first-boot')
-
- self.lock_file.RunExclusively(self.lock_fname, self.UpdateAccounts)
-
- def UpdateAccounts(self):
- """Update all accounts that should be present or exist already."""
-
- # Note GetDesiredAccounts() returns a dict of username->sshKeys mappings.
- desired_accounts = self.desired_accounts.GetDesiredAccounts()
-
- # Plan a processing pass for extra accounts existing on the system with a
- # ~/.ssh/authorized_keys file, even if they're not otherwise in the metadata
- # server; this will only ever remove the last added-by-Google key from
- # accounts which were formerly in the metadata server but are no longer.
- all_accounts = pwd.getpwall()
- keyfile_suffix = os.path.join('.ssh', 'authorized_keys')
- sshable_usernames = [
- entry.pw_name
- for entry in all_accounts
- if os.path.isfile(os.path.join(entry.pw_dir, keyfile_suffix))]
- extra_usernames = set(sshable_usernames) - set(desired_accounts.keys())
-
- if desired_accounts:
- for username, ssh_keys in desired_accounts.iteritems():
- if not username:
- continue
-
- self.accounts.UpdateUser(username, ssh_keys)
-
- for username in extra_usernames:
- # If a username is present in extra_usernames, it is no longer reflected
- # in the metadata server but has an authorized_keys file. Therefore, we
- # should pass the empty list for sshKeys to ensure that any Google-managed
- # keys are no longer authorized.
- self.accounts.UpdateUser(username, [])
diff --git a/google-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py b/google-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py
deleted file mode 100755
index d489112..0000000
--- a/google-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tool for running account manager as a daemon."""
-
-import fcntl
-import logging
-import os
-import signal
-
-PIDFILE = '/var/run/manage_accounts.pid'
-
-
-class AccountsManagerDaemon(object):
- """Creates a daemon process to run the accounts manager in."""
-
- def __init__(self, pidfile, accounts_manager, fcntl_module=fcntl):
- logging.debug('Initializing Daemon Module')
- if not pidfile:
- pidfile = PIDFILE
-
- self.pidfile = pidfile
- self.accounts_manager = accounts_manager
- self.fcntl_module = fcntl_module
-
- def StartDaemon(self):
- """Spins off a process that runs as a daemon."""
- # To spin off the process, use what seems to be the "standard" way to spin
- # off daemons: fork a child process, make it the session and process group
- # leader, then fork it again so that the actual daemon process is no longer
- # a session leader.
- #
- # This is a very simplified (with significantly reduced features) version of
- # the python-daemon library at https://pypi.python.org/pypi/python-daemon/.
- pid = os.fork()
- logging.debug('Forked new process, pid= {0}'.format(pid))
- if pid == 0:
- os.setsid()
- pid = os.fork()
- if pid == 0:
- os.chdir('/')
- os.umask(0)
- else:
- # The use of os._exit here is recommended for parents of a daemon
- # process to avoid issues with running the cleanup tasks that
- # sys.exit() runs by preventing issues from the cleanup being run
- # more than once when the two parents exit and later when the daemon
- # exits.
- os._exit(0)
- else:
- os._exit(0)
-
- # Set up pidfile and signal handlers.
- pidf = open(self.pidfile, 'w')
- pidf.write(str(os.getpid()))
- pidf.close()
-
- logging.debug('Sending signal SIGTERM to shutdown daemon')
- signal.signal(signal.SIGTERM, self.ShutdownDaemon)
-
- self.accounts_manager.Main()
-
- def ShutdownDaemon(self, signal_number, unused_stack_frame):
- # Grab the lock on the lock file, ensuring that the accounts manager is not
- # in the middle of something. Using a different file reference guarantees
- # that the lock can only be grabbed once the accounts manager is done with
- # it and holding it guarantees that the accounts manager won't start up
- # again while shutting down.
- logging.debug('Acquiring Daemon lock.')
- lockfile = open(self.accounts_manager.lock_fname, 'r')
- self.fcntl_module.flock(lockfile.fileno(), fcntl.LOCK_EX)
-
- logging.debug('Shutting down Daemon module.')
- # Clean up pidfile and terminate. Lock will be released with termination.
- os.remove(self.pidfile)
- exception = SystemExit('Terminating on signal number %d' % signal_number)
- raise exception
diff --git a/google-daemon/usr/share/google/google_daemon/address_manager.py b/google-daemon/usr/share/google/google_daemon/address_manager.py
deleted file mode 100644
index 7a0e911..0000000
--- a/google-daemon/usr/share/google/google_daemon/address_manager.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Manage extra network interface addresses on a GCE instance.
-
-Fetch a list of public endpoint IPs from the metadata server, compare it with
-what's configured on eth0, and add/remove addresses from eth0 to make them
-match. Only remove those which match our proto code.
-
-This must be run by root. If it reads any malformed data, it will take no
-action.
-
-Command used to add ips:
- ip route add to local $IP/32 dev eth0 proto 66
-Command used to fetch list of configured IPs:
- ip route ls table local type local dev eth0 scope host proto 66
-"""
-
-
-import logging
-import os
-import re
-import socket
-import time
-import urllib2
-
-PUBLIC_ENDPOINT_URL_PREFIX = (
-'http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/forwarded-ips/?recursive=true&alt=text&wait_for_change=true&timeout_sec=60&last_etag=')
-GOOGLE_PROTO_ID = 66 # "GG"
-
-class InputError(Exception):
- pass
-
-class AddressManager(object):
- """Manage public endpoint IPs."""
-
- def __init__(self, system_module, urllib2_module=urllib2, time_module=time):
- self.system = system_module
- self.urllib2 = urllib2_module
- self.time = time_module
- self.ip_path = '/sbin/ip'
- if not os.path.exists(self.ip_path):
- self.ip_path = '/bin/ip'
-
- # etag header value is hex, so this is guaranteed to not match.
- self.default_last_etag = 'NONE'
- self.ResetEtag()
-
- def SyncAddressesForever(self):
- while True:
- try:
- # Block until the metadata changes or there is a timeout or error.
- self.SyncAddresses()
- except socket.timeout as e:
- self.ResetEtag()
- logging.warning('Backend timeout. Retrying.')
- except Exception as e:
- self.ResetEtag()
- logging.error('SyncAddresses exception: %s', e)
- # Don't spin
- self.time.sleep(5)
-
- def SyncAddresses(self):
- """Main entry point -- syncs configured w/ desired IP addresses."""
-
- addrs_wanted = self.ReadPublicEndpoints()
- addrs_configured = self.ReadLocalConfiguredAddrs()
- (to_add, to_remove) = self.DiffAddrs(addrs_wanted, addrs_configured)
- self.LogChanges(addrs_wanted, addrs_configured, to_add, to_remove)
- self.AddAddresses(to_add)
- self.DeleteAddresses(to_remove)
-
- def ResetEtag(self):
- """Reset the etag so the next call will return the current data."""
- self.last_etag = self.default_last_etag
-
- def ReadPublicEndpoints(self):
- """Fetch list of public endpoint IPs from metadata server."""
- try:
- # If the connection gets abandoned, ensure we don't hang more than
- # 70 seconds.
- url = PUBLIC_ENDPOINT_URL_PREFIX + self.last_etag
- request = urllib2.Request(url)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- u = self.urllib2.urlopen(request, timeout=70)
- addrs_data = u.read()
- headers = u.info().dict
- self.last_etag = headers.get('etag', self.default_last_etag)
- except urllib2.HTTPError as h:
- self.ResetEtag()
- # 404 is treated like an empty list, for backward compatibility.
- if h.code == 404:
- return []
- raise h
- return self.ParseIPAddrs(addrs_data)
-
- def ReadLocalConfiguredAddrs(self):
- """Fetch list of addresses we've configured on eth0 already."""
- cmd = ('{0} route ls table local type local dev eth0 scope host ' +
- 'proto {1:d}').format(self.ip_path, GOOGLE_PROTO_ID)
- result = self.system.RunCommand(cmd.split())
- if self.IPCommandFailed(result, cmd):
- raise InputError('Can''t check local addresses')
- (rc, stdout, stderr) = result
- return self.ParseIPAddrs(stdout)
-
- def DiffAddrs(self, addrs_wanted, addrs_configured):
- """"Returns set differences: (to_add, to_remove)."""
- want = set(addrs_wanted)
- have = set(addrs_configured)
- to_add = want - have
- to_remove = have - want
- return (sorted(to_add), sorted(to_remove))
-
- def LogChanges(self, addrs_wanted, addrs_configured, to_add, to_remove):
- """Log what addrs we are going to change."""
- if not to_add and not to_remove:
- return
- logging.info(
- 'Changing public IPs from %s to %s by adding %s and removing %s' % (
- addrs_configured or None,
- addrs_wanted or None,
- to_add or None,
- to_remove or None))
-
- def AddAddresses(self, to_add):
- """Configure new addresses on eth0."""
- for addr in to_add:
- self.AddOneAddress(addr)
-
- def AddOneAddress(self, addr):
- """Configure one address on eth0."""
- cmd = '%s route add to local %s/32 dev eth0 proto %d' % (
- self.ip_path, addr, GOOGLE_PROTO_ID)
- result = self.system.RunCommand(cmd.split())
- self.IPCommandFailed(result, cmd) # Ignore return code
-
- def DeleteAddresses(self, to_remove):
- """Un-configure a list of addresses from eth0."""
- for addr in to_remove:
- self.DeleteOneAddress(addr)
-
- def DeleteOneAddress(self, addr):
- """Delete one address from eth0."""
- # This will fail if it doesn't match exactly the specs listed.
- # That'll help ensure we don't remove one added by someone else.
- cmd = '%s route delete to local %s/32 dev eth0 proto %d' % (
- self.ip_path, addr, GOOGLE_PROTO_ID)
- result = self.system.RunCommand(cmd.split())
- self.IPCommandFailed(result, cmd) # Ignore return code
-
- # Helper methods
- def ParseIPAddrs(self, addrs_data):
- """Parse and validate IP addrs, return list of strings or None."""
- addrs = addrs_data.strip().split()
- reg = re.compile(r'^([0-9]+.){3}[0-9]+$')
- for addr in addrs:
- if not reg.search(addr):
- raise InputError('Failed to parse ip addr: "%s"' % addr)
- return addrs
-
- def IPCommandFailed(self, result, cmd):
- """If an /sbin/ip command failed, log and return True."""
- if self.system.RunCommandFailed(
- result, 'Non-zero exit status from: "%s"' % cmd):
- return True
- else:
- return False
diff --git a/google-daemon/usr/share/google/google_daemon/desired_accounts.py b/google-daemon/usr/share/google/google_daemon/desired_accounts.py
deleted file mode 100755
index f6fb5ff..0000000
--- a/google-daemon/usr/share/google/google_daemon/desired_accounts.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Get the accounts desired to be present on the VM."""
-
-import datetime
-import json
-import logging
-import time
-import urllib2
-
-
-METADATA_URL = 'http://metadata.google.internal/computeMetadata/v1'
-METADATA_HANG = ('/?recursive=true&alt=json&wait_for_change=true'
- '&timeout_sec=%s&last_etag=%s')
-
-
-def KeyHasExpired(key):
- """Check to see whether an SSH key has expired.
-
- Uses Google-specific (for now) semantics of the OpenSSH public key format's
- comment field to determine if an SSH key is past its expiration timestamp, and
- therefore no longer to be trusted. This format is still subject to change.
- Reliance on it in any way is at your own risk.
-
- Args:
- key: A single public key entry in OpenSSH public key file format. This will
- be checked for Google-specific comment semantics, and if present, those
- will be analysed.
-
- Returns:
- True if the key has Google-specific comment semantics and has an expiration
- timestamp in the past, or False otherwise.
- """
-
- logging.debug('Processing key: %s', key)
-
- try:
- schema, json_str = key.split(None, 3)[2:]
- except ValueError:
- logging.debug('Key does not seem to have a schema identifier.')
- logging.debug('Not expiring key.')
- return False
-
- if schema != 'google-ssh':
- logging.debug('Rejecting %s as potential key schema identifier.', schema)
- return False
-
- logging.debug('Google SSH key schema identifier found.')
- logging.debug('JSON string detected: %s', json_str)
-
- try:
- json_obj = json.loads(json_str)
- except ValueError:
- logging.error('Invalid JSON. Not expiring key.')
- return False
-
- if 'expireOn' not in json_obj:
- # Use warning instead of error for this failure mode in case we
- # add future use cases for this JSON which are unrelated to expiration.
- logging.warning('No expiration timestamp. Not expiring key.')
- return False
-
- expire_str = json_obj['expireOn']
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
-
- try:
- expire_time = datetime.datetime.strptime(expire_str, format_str)
- except ValueError:
- logging.error(
- 'Expiration timestamp "%s" not in format %s.', expire_str, format_str)
- logging.error('Not expiring key.')
- return False
-
- # Expire the key if and only if we have exceeded the expiration timestamp.
- return datetime.datetime.utcnow() > expire_time
-
-
-def AccountDataToDictionary(data):
- """Given SSH key data, construct a usermap.
-
- Args:
- data: The data returned from the metadata server's SSH key attributes.
-
- Returns:
- A map of {'username': ssh_keys_list}.
- """
- if not data:
- return {}
- lines = [line for line in data.splitlines() if line]
- usermap = {}
- for line in lines:
- split_line = line.split(':', 1)
- if len(split_line) != 2:
- logging.warning(
- 'SSH key is not a complete entry: %s', split_line)
- continue
- user, key = split_line
- if KeyHasExpired(key):
- logging.debug(
- 'Skipping expired SSH key for user %s: %s', user, key)
- continue
- if user not in usermap:
- usermap[user] = []
- usermap[user].append(key)
- logging.debug('User accounts: %s', usermap)
- return usermap
-
-
-class DesiredAccounts(object):
- """Interface to determine the accounts desired on this instance."""
-
- def __init__(self, time_module=time, urllib2_module=urllib2):
- self.urllib2 = urllib2_module
- self.time = time_module
- self.etag = 0
-
- def _WaitForUpdate(self, timeout_secs):
- """Makes a hanging get request for the contents of the metadata server."""
- request_url = METADATA_URL + METADATA_HANG % (timeout_secs, self.etag)
- logging.debug('Getting url: %s', request_url)
- request = urllib2.Request(request_url)
- request.add_header('Metadata-Flavor', 'Google')
- return self.urllib2.urlopen(request, timeout=timeout_secs*1.1)
-
- def _GetMetadataUpdate(self, timeout_secs=60):
- """Fetches the content of the metadata server.
-
- Args:
- timeout_secs: The timeout in seconds.
-
- Returns:
- The JSON formatted string content of the metadata server.
- """
- try:
- response = self._WaitForUpdate(timeout_secs=timeout_secs)
- response_info = response.info()
- if response_info and response_info.has_key('etag'):
- self.etag = response_info.getheader('etag')
- content = response.read()
- logging.debug('response: %s', content)
- return content
- except urllib2.HTTPError as e:
- if e.code == 404:
- # The metadata server content doesn't exist. Return None.
- # No need to log a warning.
- return None
- # Rethrow the exception since we don't know what it is. Let the
- # top layer handle it.
- raise
- return None
-
- def GetDesiredAccounts(self):
- """Get a list of the accounts desired on the system.
-
- Returns:
- A dict of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
- """
- logging.debug('Getting desired accounts from metadata.')
- # Fetch the top level attribute with a hanging get.
- metadata_content = self._GetMetadataUpdate()
- metadata_dict = json.loads(metadata_content or '{}')
- account_data = None
-
- try:
- instance_data = metadata_dict['instance']['attributes']
- project_data = metadata_dict['project']['attributes']
- # Instance SSH keys to use regardless of project metadata.
- valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
- block_project = instance_data.get('block-project-ssh-keys', '').lower()
- if block_project != 'true' and not instance_data.get('sshKeys'):
- valid_keys.append(project_data.get('ssh-keys'))
- valid_keys.append(project_data.get('sshKeys'))
- valid_keys = [key for key in valid_keys if key]
- account_data = '\n'.join(valid_keys)
- except KeyError:
- logging.debug('Project or instance attributes were not found.')
-
- return AccountDataToDictionary(account_data)
diff --git a/google-daemon/usr/share/google/google_daemon/manage_accounts.py b/google-daemon/usr/share/google/google_daemon/manage_accounts.py
deleted file mode 100755
index 9f3bb33..0000000
--- a/google-daemon/usr/share/google/google_daemon/manage_accounts.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main driver logic for managing accounts on GCE instances."""
-
-import logging
-import optparse
-import os
-import os.path
-import sys
-
-
-def FixPath():
- parent_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- if os.path.isdir(parent_dir):
- sys.path.append(parent_dir)
-
-
-FixPath()
-
-
-from accounts import Accounts
-from accounts_manager import AccountsManager
-from accounts_manager_daemon import AccountsManagerDaemon
-from desired_accounts import DesiredAccounts
-from utils import LockFile
-from utils import System
-
-
-def Main(accounts, desired_accounts, system, logger,
- log_handler, lock_file, lock_fname=None, single_pass=True,
- daemon_mode=False, force_mode=False, debug_mode=False):
-
- if not log_handler:
- log_handler = system.MakeLoggingHandler(
- 'accounts-from-metadata', logging.handlers.SysLogHandler.LOG_AUTH)
- system.SetLoggingHandler(logger, log_handler)
-
- if debug_mode:
- system.EnableDebugLogging(logger)
- logging.debug('Running in Debug Mode')
-
- if not force_mode and os.path.isfile('/usr/share/google/gcua'):
- logging.error('Google Compute User Accounts is installed.')
- sys.exit(1)
-
- accounts_manager = AccountsManager(
- accounts, desired_accounts, system, lock_file, lock_fname,
- single_pass)
-
- if daemon_mode:
- manager_daemon = AccountsManagerDaemon(None, accounts_manager)
- manager_daemon.StartDaemon()
- else:
- accounts_manager.Main()
-
-
-if __name__ == '__main__':
- parser = optparse.OptionParser()
- parser.add_option('--daemon', dest='daemon', action='store_true')
- parser.add_option('--no-daemon', dest='daemon', action='store_false')
- # Leaving --interval flag for now to allow some time for each platform to move to
- # new flag
- parser.add_option('--interval', type='int', dest='interval')
- parser.add_option('--single-pass', dest='single_pass', action='store_true')
- parser.add_option('--no-single-pass', dest='single_pass', action='store_false')
- parser.add_option('--force', dest='force', action='store_true')
- parser.add_option('--debug', dest='debug', action='store_true')
- parser.set_defaults(interval=60)
- parser.set_defaults(single_pass=False)
- parser.set_defaults(daemon=False)
- parser.set_defaults(force=False)
- parser.set_defaults(debug=False)
- (options, args) = parser.parse_args()
-
- # set single_pass to True if interval is -1.
- if options.interval == -1:
- options.single_pass = True
-
- Main(Accounts(system_module=System()), DesiredAccounts(),
- System(), logging.getLogger(), None, LockFile(), None, options.single_pass,
- options.daemon, options.force, options.debug)
diff --git a/google-daemon/usr/share/google/google_daemon/manage_addresses.py b/google-daemon/usr/share/google/google_daemon/manage_addresses.py
deleted file mode 100755
index 5e9ade6..0000000
--- a/google-daemon/usr/share/google/google_daemon/manage_addresses.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main driver logic for managing public IPs on GCE instances."""
-
-import logging
-import os
-import sys
-
-def FixPath():
- parent_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- if os.path.isdir(parent_dir):
- sys.path.append(parent_dir)
-
-
-FixPath()
-
-from utils import LockFile
-from utils import System
-from address_manager import AddressManager
-
-
-LOCKFILE = '/var/lock/google-address-manager.lock'
-
-def Main(system=System(), logger=logging.getLogger(), log_handler=None,
- lock_file=LockFile(), lock_fname=None):
- if not log_handler:
- log_handler = system.MakeLoggingHandler(
- 'google-address-manager', logging.handlers.SysLogHandler.LOG_SYSLOG)
- system.SetLoggingHandler(logger, log_handler)
- logging.info('Starting GCE address manager')
-
- if not lock_fname:
- lock_fname = LOCKFILE
- manager = AddressManager(system_module=system)
- lock_file.RunExclusively(lock_fname, manager.SyncAddressesForever)
-
-
-if __name__ == '__main__':
- Main()
diff --git a/google-daemon/usr/share/google/google_daemon/manage_clock_sync.py b/google-daemon/usr/share/google/google_daemon/manage_clock_sync.py
deleted file mode 100755
index c49f699..0000000
--- a/google-daemon/usr/share/google/google_daemon/manage_clock_sync.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/python
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Manages clock syncing after migration on GCE instances."""
-
-import logging
-import os
-import sys
-
-def FixPath():
- parent_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- if os.path.isdir(parent_dir):
- sys.path.append(parent_dir)
-
-
-FixPath()
-
-from utils import LockFile
-from utils import System
-from metadata_watcher import MetadataWatcher
-
-
-LOCKFILE = '/var/lock/google-clock-sync.lock'
-
-
-def HandleClockDriftToken(metadata_watcher, on_change):
- """Watches for and responds to drift-token changes.
-
- Args:
- metadata_watcher: a MetadataWatcher object.
- on_change: a callable to call for any change.
- """
- clock_drift_token_key = 'instance/virtual-clock/drift-token'
-
- def Handler(event):
- on_change(event)
-
- metadata_watcher.WatchMetadataForever(clock_drift_token_key,
- Handler, initial_value='')
-
-
-def OnChange(event):
- """Called when clock drift token changes.
-
- Args:
- event: the new value of the drift token.
- """
- system = System()
- logging.info('Clock drift token has changed: %s', event)
- logging.info('Syncing system time with hardware clock...')
- result = system.RunCommand(['/sbin/hwclock', '--hctosys'])
- if system.RunCommandFailed(result):
- logging.error('Syncing system time failed.')
- else:
- logging.info('Synced system time with hardware clock.')
-
-
-def Main(system=System(), logger=logging.getLogger(), log_handler=None,
- lock_file=LockFile(), lock_fname=None):
- if not log_handler:
- log_handler = system.MakeLoggingHandler(
- 'google-clock-sync', logging.handlers.SysLogHandler.LOG_SYSLOG)
- system.SetLoggingHandler(logger, log_handler)
- logging.info('Starting GCE clock sync')
-
- if not lock_fname:
- lock_fname = LOCKFILE
- watcher = MetadataWatcher()
- lock_file.RunExclusively(lock_fname, HandleClockDriftToken(watcher, OnChange))
-
-
-if __name__ == '__main__':
- Main()
diff --git a/google-daemon/usr/share/google/google_daemon/metadata_watcher.py b/google-daemon/usr/share/google/google_daemon/metadata_watcher.py
deleted file mode 100755
index af0a90a..0000000
--- a/google-daemon/usr/share/google/google_daemon/metadata_watcher.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/python
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import httplib
-import time
-import urllib
-import urllib2
-
-
-METADATA_URL = 'http://metadata.google.internal/computeMetadata/v1/'
-
-
-class Error(Exception):
- pass
-
-
-class UnexpectedStatusException(Error):
- pass
-
-
-class MetadataWatcher(object):
- """Watches for changing metadata."""
-
- def __init__(self, httplib_module=httplib, time_module=time,
- urllib_module=urllib, urllib2_module=urllib2):
- self.httplib = httplib_module
- self.time = time_module
- self.urllib = urllib_module
- self.urllib2 = urllib2_module
-
- def WatchMetadataForever(self, metadata_key, handler, initial_value=None):
- """Watches for a change in the value of metadata.
-
- Args:
- metadata_key: The key identifying which metadata to watch for changes.
- handler: A callable to call when the metadata value changes. Will be passed
- a single parameter, the new value of the metadata.
- initial_value: The expected initial value for the metadata. The handler will
- not be called on the initial metadata request unless the value differs
- from this.
-
- Raises:
- UnexpectedStatusException: If the http request is unsuccessful for an
- unexpected reason.
- """
- params = {
- 'wait_for_change': 'true',
- 'last_etag': 0,
- }
-
- value = initial_value
- while True:
- # start a hanging-GET request for metadata key.
- url = '{base_url}{key}?{params}'.format(
- base_url=METADATA_URL,
- key=metadata_key,
- params=self.urllib.urlencode(params)
- )
- req = self.urllib2.Request(url, headers={'Metadata-Flavor': 'Google'})
-
- try:
- response = self.urllib2.urlopen(req)
- content = response.read()
- status = response.getcode()
- except self.urllib2.HTTPError as e:
- content = None
- status = e.code
-
- if status == self.httplib.SERVICE_UNAVAILABLE:
- self.time.sleep(1)
- continue
- elif status == self.httplib.OK:
- # Extract new metadata value and latest etag.
- new_value = content
- headers = response.info()
- params['last_etag'] = headers['ETag']
- else:
- raise UnexpectedStatusException(status)
-
- # If the metadata value changed, call the appropriate handler.
- if value == initial_value:
- value = new_value
- elif value != new_value:
- value = new_value
- handler(value)
diff --git a/google-daemon/usr/share/google/google_daemon/utils.py b/google-daemon/usr/share/google/google_daemon/utils.py
deleted file mode 100755
index 0c7fe5c..0000000
--- a/google-daemon/usr/share/google/google_daemon/utils.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library functions and interfaces for manipulating accounts."""
-
-import errno
-import fcntl
-import logging
-import logging.handlers
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-
-class RunCommandException(Exception):
- """Could not run a command."""
- pass
-
-
-class System(object):
- """Interface for interacting with the system."""
-
- def __init__(self, subprocess_module=subprocess, os_module=os):
- self.subprocess = subprocess_module
- self.os = os_module
-
- def MakeLoggingHandler(self, prefix, facility):
- """Make a logging handler to send logs to syslog."""
- handler = logging.handlers.SysLogHandler(
- address='/dev/log', facility=facility)
- formatter = logging.Formatter(prefix + ': %(levelname)s %(message)s')
- handler.setFormatter(formatter)
- return handler
-
- def SetLoggingHandler(self, logger, handler):
- """Setup logging w/ a specific handler."""
- handler.setLevel(logging.INFO)
- logger.setLevel(logging.INFO)
- logger.addHandler(handler)
-
- def EnableDebugLogging(self, logger):
- debug_handler = logging.StreamHandler(sys.stdout)
- debug_handler.setLevel(logging.DEBUG)
- logger.addHandler(debug_handler)
- logger.setLevel(logging.DEBUG)
-
- def OpenFile(self, *args, **kwargs):
- return open(*args, **kwargs)
-
- def MoveFile(self, src, dst):
- return shutil.move(src, dst)
-
- def CreateTempFile(self, delete=True):
- return tempfile.NamedTemporaryFile(delete=delete)
-
- def DeleteFile(self, name):
- return os.remove(name)
-
- def UserAdd(self, user, groups):
- logging.info('Creating account %s', user)
-
- # We must set the crypto passwd via useradd to '*' to make ssh work
- # on Linux systems without PAM.
- #
- # Unfortunately, there is no spec that I can find that defines how
- # this stuff is used and from the manpage of shadow it says that "!"
- # or "*" or any other invalid crypt can be used.
- #
- # ssh just takes it upon itself to use "!" as its locked account token:
- # https://github.com/openssh/openssh-portable/blob/master/configure.ac#L705
- #
- # If '!' token is used then it simply denies logins:
- # https://github.com/openssh/openssh-portable/blob/master/auth.c#L151
- #
- # To solve the issue make the passwd '*' which is also recognized as
- # locked but doesn't prevent ssh logins.
- result = self.RunCommand([
- '/usr/sbin/useradd', user, '-m', '-s', '/bin/bash', '-p', '*', '-G',
- ','.join(groups)])
- if self.RunCommandFailed(result, 'Could not create user %s', user):
- return False
- return True
-
- def IsValidSudoersFile(self, filename):
- result = self.RunCommand(['/usr/sbin/visudo', '-c', '-f', filename])
- if result[0] != 0:
- with self.system.OpenFile(filename, 'r') as f:
- contents = f.read()
- self.RunCommandFailed(
- result, 'Could not produce valid sudoers file\n%s' % contents)
- return False
- return True
-
- def IsExecutable(self, path):
- """Return whether path exists and is an executable binary."""
- return self.os.path.isfile(path) and self.os.access(path, os.X_OK)
-
- def RunCommand(self, args):
- """Run a command, return a retcode, stdout, stderr tuple."""
- try:
- p = self.subprocess.Popen(
- args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
- return (p.returncode, stdout, stderr)
- except OSError, e:
- raise RunCommandException('Could not run %s due to %s' % (args, e))
-
- def RunCommandFailed(self, result, *msg_args):
- retcode, stdout, stderr = result
- if retcode != 0:
- logging.warning('%s\nSTDOUT:\n%s\nSTDERR:\n%s\n',
- msg_args[0] % msg_args[1:], stdout, stderr)
- return True
- return False
-
-
-class CouldNotLockException(Exception):
- """Someone else seems to be holding the lock."""
- pass
-
-
-class UnexpectedLockException(Exception):
- """We genuinely failed to lock the file."""
- pass
-
-
-class CouldNotUnlockException(Exception):
- """Someone else seems to be holding the lock."""
- pass
-
-
-class UnexpectedUnlockException(Exception):
- """We genuinely failed to unlock the file."""
- pass
-
-
-class LockFile(object):
- """Lock a file to prevent multiple concurrent executions."""
-
- def __init__(self, fcntl_module=fcntl):
- self.fcntl_module = fcntl_module
-
- def RunExclusively(self, lock_fname, method):
- try:
- self.Lock(lock_fname)
- method()
- self.Unlock()
- except CouldNotLockException:
- logging.warning(
- 'Could not lock %s. Is it locked by another program?',
- lock_fname)
- except UnexpectedLockException as e:
- logging.warning(
- 'Could not lock %s due to %s', lock_fname, e)
- except CouldNotUnlockException:
- logging.warning(
- 'Could not unlock %s. Is it locked by another program?',
- lock_fname)
- except UnexpectedUnlockException as e:
- logging.warning(
- 'Could not unlock %s due to %s', lock_fname, e)
-
- def Lock(self, lock_fname):
- """Lock the lock file."""
- try:
- self.fh = open(lock_fname, 'w+b')
- self.fcntl_module.flock(self.fh.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
- except IOError as e:
- if e.errno == errno.EWOULDBLOCK:
- raise CouldNotLockException()
- raise UnexpectedLockException('Failed to lock: %s' % e)
-
- def Unlock(self):
- """Unlock the lock file."""
- try:
- self.fcntl_module.flock(self.fh.fileno(), fcntl.LOCK_UN|fcntl.LOCK_NB)
- except IOError as e:
- if e.errno == errno.EWOULDBLOCK:
- raise CouldNotUnlockException()
- raise UnexpectedUnlockException('Failed to unlock: %s' % e)
diff --git a/google-startup-scripts/README.md b/google-startup-scripts/README.md
deleted file mode 100644
index 2602143..0000000
--- a/google-startup-scripts/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-## Google Startup Scripts
-Google provides a set of startup scripts that interact with the virtual machine environment. On boot, the startup script `/usr/share/google/onboot` queries the instance metadata for a user-provided startup script to run. User-provided startup scripts can be specified in the instance metadata under `startup-script` or, if the metadata is in a small script or a downloadable file, it can be specified `via startup-script-url`. You can use [gcloud compute](https://cloud.google.com/compute/docs/gcloud-compute/) or the [Google Compute Engine API](https://developers.google.com/compute/docs/reference/latest) to specify a startup script.
-
-For more information on how to use startup scripts, read the [Using Start Up Scripts documentation](https://developers.google.com/compute/docs/howtos/startupscript#storescriptremotely).
-
-Below is an example of metadata that indicates a startup script URL and a startup script file was passed to the instance:
-
- { // instance
- metadata: {
- "kind": "compute#metadata",
- "items": [
- {
- "key": "startup-script-url",
- "value": "http://startup-script-url:
- }
- ]
- }
- }
- { // instance
- metadata: {
- "kind": "compute#metadata",
- "items": [
- {
- "key": "startup-script",
- "value": "#! /bin/python\nprint ‘startup’\n"
- }
- ]
- }
- }
-
-
-Google startup scripts also perform the following actions:
-
-+ __Checks the value of the instance id key__
-
- Startup scripts check the value of the instance ID at:
-
- http://169.254.169.254/computeMetadata/v1/instance/id
-
- and compares it to the last instance ID the disk booted on.
-
-+ __Sets the [hostname](https://github.com/GoogleCloudPlatform/compute-image-packages/blob/master/google-startup-scripts/usr/share/google/set-hostname) from the metadata server via DHCP exit hooks.__
-
-+ __Updates gsutil authentication.__
-
- Startup scripts run `/usr/share/google/boto/boot_setup.py` which configures and copies `/usr/share/google/boto/boto_plugins/compute_auth.py` into the boto plugin directory.
-
-+ __Provides udev rules to give friendly names to disks.__
-
- Google Compute Engine provides `/lib/udev/rules.d/65-gce-disk-naming.rules` in our images.
-
-+ __Safely formats persistent disks via `/usr/share/google/safe_format_and_mount`.__
diff --git a/google-startup-scripts/etc/init.d/google b/google-startup-scripts/etc/init.d/google
deleted file mode 100755
index 469d282..0000000
--- a/google-startup-scripts/etc/init.d/google
+++ /dev/null
@@ -1,75 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: gce_onboot
-# X-Start-Before: ssh
-# Required-Start: $local_fs $network $syslog
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop:
-# Short-Description: Google Compute Engine on-boot services
-# Description: This launches the Google Compute Engine
-# VM initialization scripts.
-### END INIT INFO
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-DESC="Google Compute Engine on-boot services"
-NAME="onboot"
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- /usr/share/google/onboot
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- *) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME start" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-startup-scripts/etc/init.d/google-startup-scripts b/google-startup-scripts/etc/init.d/google-startup-scripts
deleted file mode 100755
index 3a7e051..0000000
--- a/google-startup-scripts/etc/init.d/google-startup-scripts
+++ /dev/null
@@ -1,89 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: gce_run_startup_scripts
-# Required-Start: $all
-# Required-Stop: $remote_fs $syslog docker kubelet
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Google Compute Engine user scripts
-# Description: This runs user-specified VM startup and shutdown scripts.
-### END INIT INFO
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-DESC="Google Compute Engine user startup scripts"
-NAME="run_startup_scripts"
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- /usr/share/google/run-startup-scripts
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- /usr/share/google/run-shutdown-scripts
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- *) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- *) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME start" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-startup-scripts/etc/init/google.conf b/google-startup-scripts/etc/init/google.conf
deleted file mode 100755
index d47fb0d..0000000
--- a/google-startup-scripts/etc/init/google.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# google - Run google startup script
-#
-# Start when rc.local loads, so we run after all the booty stuff.
-start on google-rc-local-has-run
-
-task
-script
- /usr/share/google/onboot
-end script
diff --git a/google-startup-scripts/etc/init/google_run_shutdown_scripts.conf b/google-startup-scripts/etc/init/google_run_shutdown_scripts.conf
deleted file mode 100755
index 69fc61c..0000000
--- a/google-startup-scripts/etc/init/google_run_shutdown_scripts.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# google - Run google shutdown script
-#
-#
-start on starting rc RUNLEVEL=[06]
-task
-
-script
- /usr/bin/logger -s -t google -p local0.info "Running google_run_shutdown_scripts.conf"
- /usr/share/google/run-shutdown-scripts
-end script
diff --git a/google-startup-scripts/etc/init/google_run_startup_scripts.conf b/google-startup-scripts/etc/init/google_run_startup_scripts.conf
deleted file mode 100755
index c81efd8..0000000
--- a/google-startup-scripts/etc/init/google_run_startup_scripts.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# google - Run google startup script
-#
-#
-start on google-rc-local-has-run and google-onboot-has-run
-
-script
- /usr/bin/logger -s -t google -p local0.info "Running google_run_startup_scripts.conf"
- /usr/share/google/run-startup-scripts
- initctl emit --no-wait google-startup-scripts-have-run
-end script
diff --git a/google-startup-scripts/etc/rsyslog.d/90-google.conf b/google-startup-scripts/etc/rsyslog.d/90-google.conf
deleted file mode 100644
index 4448836..0000000
--- a/google-startup-scripts/etc/rsyslog.d/90-google.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# Google Compute Engine default console logging.
-#
-auth,daemon,kern.* /dev/console
-
-# Dump startup script output to /var/log/startupscript.log.
-:syslogtag,startswith,"startupscript" /var/log/startupscript.log
-
-# Dump shutdown script output to /var/log/shutdownscript.log.
-:syslogtag,startswith,"shutdownscript" /var/log/shutdownscript.log
diff --git a/google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset b/google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset
deleted file mode 100644
index f981a63..0000000
--- a/google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset
+++ /dev/null
@@ -1,3 +0,0 @@
-enable google.service
-enable google-shutdown-scripts.service
-enable google-startup-scripts.service
diff --git a/google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service b/google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service
deleted file mode 100644
index 73adab8..0000000
--- a/google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service
+++ /dev/null
@@ -1,15 +0,0 @@
-[Unit]
-Description=Google Compute Engine user shutdown scripts
-After=local-fs.target network-online.target network.target
-After=google.service rsyslog.service
-Wants=local-fs.target network-online.target network.target
-
-[Service]
-ExecStart=/bin/true
-ExecStop=/usr/share/google/run-shutdown-scripts
-Type=oneshot
-RemainAfterExit=true
-TimeoutStopSec=0
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service b/google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service
deleted file mode 100644
index a99a160..0000000
--- a/google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=Google Compute Engine user startup scripts
-After=local-fs.target network-online.target network.target
-After=google.service rsyslog.service
-Wants=local-fs.target network-online.target network.target
-
-[Service]
-ExecStart=/usr/share/google/run-startup-scripts
-KillMode=process
-Type=oneshot
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-startup-scripts/usr/share/google/boto/boot_setup.py b/google-startup-scripts/usr/share/google/boto/boot_setup.py
deleted file mode 100755
index e9f3924..0000000
--- a/google-startup-scripts/usr/share/google/boto/boot_setup.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#! /usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""A simple start up script to set up the system boto.cfg file.
-
-This will hit the metadata server to get the appropriate project id
-and install the compute authenication plugin.
-
-Note that this starts with whatever is in /etc/boto.cfg.template, adds
-to that and then persists it into /etc/boto.cfg. This is done so that
-the system boto.cfg can be removed prior to image packaging.
-"""
-
-from ConfigParser import SafeConfigParser
-import os
-import sys
-import textwrap
-import urllib2
-
-NUMERIC_PROJECT_ID_URL=('http://169.254.169.254/'
- 'computeMetadata/v1/project/numeric-project-id')
-SYSTEM_BOTO_CONFIG_TEMPLATE='/etc/boto.cfg.template'
-SYSTEM_BOTO_CONFIG='/etc/boto.cfg'
-AUTH_PLUGIN_DIR='/usr/share/google/boto/boto_plugins'
-
-
-def GetNumericProjectId():
- """Get the numeric project ID for this VM."""
- try:
- request = urllib2.Request(NUMERIC_PROJECT_ID_URL)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- return urllib2.urlopen(request).read()
- except (urllib2.URLError, urllib2.HTTPError, IOError), e:
- return None
-
-
-def AddConfigFileHeader(fp):
- s = ("""\
- This file is automatically created at boot time by the %s script.
- Do not edit this file directly. If you need to add items to this
- file, create/edit %s instead and then re-run the script."""
- % (os.path.abspath(__file__), SYSTEM_BOTO_CONFIG_TEMPLATE))
- fp.write('\n'.join(['# ' + s for s in textwrap.wrap(textwrap.dedent(s),
- break_on_hyphens=False)]))
- fp.write('\n\n')
-
-
-def main(argv):
- config = SafeConfigParser()
- config.read(SYSTEM_BOTO_CONFIG_TEMPLATE)
-
- # TODO(user): Figure out if we need a retry here.
- project_id = GetNumericProjectId()
- if not project_id:
- # Our project doesn't support service accounts.
- return
-
- if not config.has_section('GSUtil'):
- config.add_section('GSUtil')
- config.set('GSUtil', 'default_project_id', project_id)
- config.set('GSUtil', 'default_api_version', '2')
-
- if not config.has_section('GoogleCompute'):
- config.add_section('GoogleCompute')
- # TODO(user): Plumb a metadata value to set this. We probably want
- # to namespace the metadata values in some way like
- # 'boto_auth.servicee_account'.
- config.set('GoogleCompute', 'service_account', 'default')
-
- if not config.has_section('Plugin'):
- config.add_section('Plugin')
- config.set('Plugin', 'plugin_directory', AUTH_PLUGIN_DIR)
-
- with open(SYSTEM_BOTO_CONFIG, 'w') as configfile:
- AddConfigFileHeader(configfile)
- config.write(configfile)
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py b/google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py
deleted file mode 100644
index 97d3e20..0000000
--- a/google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Authentication module for using Google Compute service accounts."""
-
-import json
-import urllib2
-
-from boto.auth_handler import AuthHandler
-from boto.auth_handler import NotReadyToAuthenticate
-
-META_DATA_SERVER_BASE_URL=(
- 'http://169.254.169.254/computeMetadata/v1')
-
-SERVICE_ACCOUNT_SCOPES_URL=(META_DATA_SERVER_BASE_URL +
- '/instance/service-accounts/%s/scopes?alt=json')
-SERVICE_ACCOUNT_TOKEN_URL=(META_DATA_SERVER_BASE_URL +
- '/instance/service-accounts/%s/token?alt=json')
-
-GS_SCOPES = set([
- 'https://www.googleapis.com/auth/devstorage.read_only',
- 'https://www.googleapis.com/auth/devstorage.read_write',
- 'https://www.googleapis.com/auth/devstorage.full_control',
- ])
-
-
-class ComputeAuth(AuthHandler):
- """Google Compute service account auth handler.
-
- What happens is that the boto library reads the system config file
- (/etc/boto.cfg) and looks at a config value called 'plugin_directory'. It
- then loads the python files in that and find classes derived from
- boto.auth_handler.AuthHandler.
- """
-
- capability = ['google-oauth2', 's3']
-
- def __init__(self, path, config, provider):
- self.service_account = config.get('GoogleCompute', 'service_account', '')
- if provider.name == 'google' and self.service_account:
- self.scopes = self.__GetGSScopes()
- if not self.scopes:
- raise NotReadyToAuthenticate()
- else:
- raise NotReadyToAuthenticate()
-
- def __GetJSONMetadataValue(self, url):
- try:
- request = urllib2.Request(url)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- data = urllib2.urlopen(request).read()
- return json.loads(data)
- except (urllib2.URLError, urllib2.HTTPError, IOError):
- return None
-
- def __GetGSScopes(self):
- """Return all Google Storage scopes available on this VM."""
- scopes = self.__GetJSONMetadataValue(
- SERVICE_ACCOUNT_SCOPES_URL % self.service_account)
- if scopes:
- return list(GS_SCOPES.intersection(set(scopes)))
- return None
-
- def __GetAccessToken(self):
- """Return an oauth2 access token for Google Storage."""
- token_info = self.__GetJSONMetadataValue(
- SERVICE_ACCOUNT_TOKEN_URL % self.service_account)
- if token_info:
- return token_info['access_token']
- return None
-
- def add_auth(self, http_request):
- http_request.headers['Authorization'] = (
- 'OAuth %s' % self.__GetAccessToken())
diff --git a/google-startup-scripts/usr/share/google/fetch_script b/google-startup-scripts/usr/share/google/fetch_script
deleted file mode 100755
index 72ba9ac..0000000
--- a/google-startup-scripts/usr/share/google/fetch_script
+++ /dev/null
@@ -1,148 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Fetch a script from metadata and store it in the /var/run directory.
-declare -r LOGFILE=/var/log/google.log
-
-if [[ -x /usr/bin/logger ]]; then
- declare -r LOGGER=/usr/bin/logger
-else
- declare -r LOGGER=/bin/logger
-fi
-
-declare -r CURL_RETRY_LIMIT=10
-declare -r CURL_TIMEOUT=10
-
-function log() {
- echo "$@" | ${LOGGER} -t google -p daemon.info
- echo "$@" >> ${LOGFILE}
-}
-
-function download_url_with_logfile() {
- local readonly url=$1
- local readonly dest=$2
- local readonly logfile=$3
-
- if [[ "$url" =~ gs://* ]]; then
- log "Downloading url from ${url} to ${dest} using gsutil"
- gsutil cp "${url}" "${dest}" > "${logfile}" 2>&1 && return 0
- log "Failed to download $url"
- return 1
- fi
-
- # Many of the Google Storage URLs are supported below.
- # It is prefered that customers specify their object using
- # its gs://<bucket>/<object> url.
-
- bucket="[a-z0-9][-_.a-z0-9]*[a-z0-9]"
-
- # Accept any non-empty string that doesn't contain a wildcard character
- # gsutil interprets some characters as wildcards
- # These characters in object names make it difficult or impossible
- # to perform various wildcard operations using gsutil
- # For a complete list use "gsutil help naming"
- object="[^\*\?][^\*\?]*"
-
- # For all validation tests:
- # alphanumeric ranges should only include ascii characters
- export LC_COLLATE=C
-
- # Check for the Google Storage URLs:
- # http://<bucket>.storage.googleapis.com/<object>/
- # https://<bucket>.storage.googleapis.com/<object>/
- if [[ "$url" =~ http[s]?://${bucket}\.storage\.googleapis\.com/${object} ]]; then
- log "Downloading url from ${url} to ${dest} using gsutil"
- # Create a url that can be interpreted by gsutil
- gsurl=$(echo "$url" | sed "s/^https\?:\/\/\($bucket\)\.storage\.googleapis\.com\/\($object\)$/gs:\/\/\1\/\2/")
- gsutil cp ${gsurl} ${dest} 2> ${logfile} && return 0
- # Check for the other possible Google Storage URLS:
- # http://storage.googleapis.com/<bucket>/<object>/
- # https://storage.googleapis.com/<bucket>/<object>/
- #
- # The following are deprecated but checked
- # http://commondatastorage.googleapis.com/<bucket>/<object>/
- # https://commondatastorage.googleapis.com/<bucket>/<object>/
- elif [[ "$url" =~ http[s]?://(commondata)?storage\.googleapis\.com/${bucket}/${object} ]]; then
- log "Downloading url from ${url} to ${dest} using gsutil"
- # Create a url that can be interpreted by gsutil
- gsurl=$(echo "$url" | sed "s/^https\?:\/\/\(commondata\|\)storage\.googleapis\.com\/\($bucket\)\/\($object\)$/gs:\/\/\2\/\3/")
- gsutil cp "${gsurl}" "${dest}" 2> "${logfile}" && return 0
- else
- log "URL ${url} is not located in Google Storage"
- fi
-
- # Unauthenticated download of the object.
- log "Downloading url from ${url} to ${dest} using curl"
- curl --max-time "${CURL_TIMEOUT}" --retry "${CURL_RETRY_LIMIT}" \
- 2>> "${logfile}" -o "${dest}" -L -- "${url}" && return 0;
-
- log "Failed to download $url"
- return 1
-}
-
-function download_url() {
- local readonly url=$1
- local readonly dest=$2
- local readonly logfile=$(mktemp)
- download_url_with_logfile "${url}" "${dest}" "${logfile}"
- return_code=$?
- # If the script was unable to download then report to the syslog.
- if [[ "${return_code}" != "0" ]]; then
- log "$(<"${logfile}")"
- else
- rm -f "${logfile}"
- fi
- return "${return_code}"
-}
-
-function get_metadata_attribute() {
- local readonly varname=$1
- /usr/share/google/get_metadata_value "attributes/${varname}"
- return $?
-}
-
-function fetch_script() {
- # Try to use the script-url, then the script metadata.
- # Check the script url first.
- script=$1
- script_type=$2
- url_type="${script_type}-script"
- url="${url_type}-url"
-
- local readonly script_url="$(get_metadata_attribute ${url})"
- if [[ -n "${script_url}" ]]; then
- log "${url} metadata flag: ${script_url}"
- download_url "${script_url}" "${script}"
- if [[ $? != 0 ]]; then
- log "Could not download ${script_type} script ${script_url}."
- else
- log "Successfully downloaded ${script_type} script ${script_url}."
- fi
- else
- local readonly metadata_script="$(get_metadata_attribute ${url_type})"
- if [[ -n "${metadata_script}" ]]; then
- echo "${metadata_script}" > "${script}"
- log "${script_type} script found in metadata."
- else
- log $(curl "http://metadata.google.internal/computeMetadata/v1/instance/?recursive=True" -H "Metadata-Flavor: Google")
- log "No ${script_type} script found in metadata."
- fi
- fi
- [[ -e "${script}" ]] && chmod 700 "${script}"
-
- return 0
-}
-
-fetch_script "$1" "$2"
diff --git a/google-startup-scripts/usr/share/google/first-boot b/google-startup-scripts/usr/share/google/first-boot
deleted file mode 100755
index b346b65..0000000
--- a/google-startup-scripts/usr/share/google/first-boot
+++ /dev/null
@@ -1,94 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run initialization code the first time this image boots on a given instance.
-
-declare -r INSTANCE_FILE=${PREFIX}/var/lib/google/vm-instance-id
-declare -r LOCK_FILE=${INSTANCE_FILE}.lock
-
-mkdir -p ${PREFIX}/var/lib/google/per-instance
-
-function log() {
- if [[ -x ${PREFIX}/usr/bin/logger ]]; then
- echo $* | ${PREFIX}/usr/bin/logger -t first-boot -p auth.info
- else
- echo $* >&2
- fi
-}
-
-function get_instance_id() {
- ${PREFIX}/usr/share/google/get_metadata_value id 2>/dev/null
-}
-
-# Checks the instance id has changed.
-# Exits with return code 0 if the instance id hasn't changed.
-function check_stored_instance_id() {
- local readonly instance_id=$1
-
- if [[ "${instance_id}" == "" ]]; then
- # Cannot determine instance id. Either we're not running on a Compute VM,
- # or networking hasn't started up yet, etc.
- exit 1
- fi
-
- if [[ "${instance_id}" != "unknown-instance" &&
- "${instance_id}" == "$(cat ${INSTANCE_FILE} 2>/dev/null)" ]]; then
- # Instance id is same as on disk.
- exit 1
- fi
-}
-
-# Performs host key setup if the instance has changed.
-# Otherwise we exit with a non-zero return code.
-function manage_stored_instance_id() {
- local readonly instance_id=$1
-
- # Create a subshell to manage the lock file. The file lock is released
- # when the subshell exits.
- (
- # Open LOCK_FILE on FD 200 and lock it. This prevents concurrent calls
- # to regenerate host keys that spam console output.
- flock -e 200
-
- # Checks whether the instance has changed.
- # If the instance hasn't changed, exit the script.
- check_stored_instance_id ${instance_id}
-
- # If the instance hasn't changed, we have now exited the subshell.
- # Since the instance changed, we do host key regeneration.
- log "Running first-boot"
-
- # Regenerate host keys for ssh.
- if [[ -x ${PREFIX}/usr/share/google/regenerate-host-keys ]]; then
- ${PREFIX}/usr/share/google/regenerate-host-keys
- fi
-
- # We are booting this instance for the first time.
- echo ${instance_id} > ${INSTANCE_FILE}
- ) 200> ${LOCK_FILE}
-
- return $?
-}
-
-declare -r INSTANCE_ID=$(get_instance_id)
-
-manage_stored_instance_id ${INSTANCE_ID}
-if [[ $? != 0 ]]; then
- # The instance hasn't changed so exit.
- exit 0
-fi
-
-# Make a per-instance data directory.
-mkdir -p ${PREFIX}/var/lib/google/per-instance/${INSTANCE_ID}
diff --git a/google-startup-scripts/usr/share/google/get_metadata_value b/google-startup-scripts/usr/share/google/get_metadata_value
deleted file mode 100755
index c4e0eb6..0000000
--- a/google-startup-scripts/usr/share/google/get_metadata_value
+++ /dev/null
@@ -1,73 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Get a metadata value from the metadata server.
-declare -r VARNAME=$1
-declare -r MDS_PREFIX=http://metadata.google.internal/computeMetadata/v1
-declare -r MDS_TRIES=${MDS_TRIES:-100}
-
-function print_metadata_value() {
- local readonly tmpfile=$(mktemp)
- http_code=$(curl -f "${1}" -H "Metadata-Flavor: Google" -w "%{http_code}" \
- -s -o ${tmpfile} 2>/dev/null)
- local readonly return_code=$?
- # If the command completed successfully, print the metadata value to stdout.
- if [[ ${return_code} == 0 && ${http_code} == 200 ]]; then
- cat ${tmpfile}
- fi
- rm -f ${tmpfile}
- return ${return_code}
-}
-
-function print_metadata_value_if_exists() {
- local return_code=1
- local readonly url=$1
- print_metadata_value ${url}
- return_code=$?
- return ${return_code}
-}
-
-function get_metadata_value() {
- local readonly varname=$1
- # Print the instance metadata value.
- print_metadata_value_if_exists ${MDS_PREFIX}/instance/${varname}
- return_code=$?
- # If the instance doesn't have the value, try the project.
- if [[ ${return_code} != 0 ]]; then
- print_metadata_value_if_exists ${MDS_PREFIX}/project/${varname}
- return_code=$?
- fi
- return ${return_code}
-}
-
-function get_metadata_value_with_retries() {
- local return_code=1 # General error code.
- for ((count=0; count <= ${MDS_TRIES}; count++)); do
- get_metadata_value $VARNAME
- return_code=$?
- case $return_code in
- # No error. We're done.
- 0) exit ${return_code};;
- # Failed to resolve host or connect to host. Retry.
- 6|7) sleep 0.3; continue;;
- # A genuine error. Exit.
- *) exit ${return_code};
- esac
- done
- # Exit with the last return code we got.
- exit ${return_code}
-}
-
-get_metadata_value_with_retries
diff --git a/google-startup-scripts/usr/share/google/onboot b/google-startup-scripts/usr/share/google/onboot
deleted file mode 100755
index 482d384..0000000
--- a/google-startup-scripts/usr/share/google/onboot
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Prep the image for Google Compute services.
-#
-# Do NOT "set -e"
-
-# Exit out early if we've run before.
-declare -r RUNFILE=/var/run/google.onboot
-if [ -f ${RUNFILE} ]; then
- exit 0
-fi
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-
-declare -r LOGFILE=/var/log/google.log
-
-if [ -x /usr/bin/logger ]; then
- declare -r LOGGER=/usr/bin/logger
-else
- declare -r LOGGER=/bin/logger
-fi
-
-declare -r BOTO_SETUP_SCRIPT=/usr/share/google/boto/boot_setup.py
-declare -r GOOGLE_ENVIRONMENT=/var/run/google.environment
-
-function log() {
- echo $* | ${LOGGER} -t google -p daemon.info
- echo $* >> ${LOGFILE}
-}
-
-function set_interrupts() {
- if [[ -x /usr/share/google/set-interrupts ]]; then
- /usr/share/google/set-interrupts
- fi
-}
-
-function virtionet_irq_affinity() {
- if [[ -x /usr/share/google/virtionet-irq-affinity ]]; then
- /usr/share/google/virtionet-irq-affinity
- fi
-}
-
-function first_boot() {
- if [[ -x /usr/share/google/first-boot ]]; then
- /usr/share/google/first-boot
- fi
-}
-
-function get_metadata_value() {
- local readonly varname=$1
- /usr/share/google/get_metadata_value ${varname}
- return $?
-}
-
-function do_environment() {
- echo "INSTANCE_ID=$(get_metadata_value id)" > ${GOOGLE_ENVIRONMENT}
-}
-
-function do_init() {
- log "onboot initializing"
-
- do_environment
-
- # If it exists, run the boto bootstrap script. This will set things
- # up so that gsutil will just work with any provisioned service
- # account.
- if [ -x ${BOTO_SETUP_SCRIPT} ]; then
- log "Running Boto setup script at ${BOTO_SETUP_SCRIPT}"
- ${BOTO_SETUP_SCRIPT} >> ${LOGFILE} 2>&1
- fi
-
- return 0
-}
-
-function print_ssh_key_fingerprints() {
- log "SSH public key fingerprints"
-
- if [ -e /etc/ssh/ssh_host_rsa_key.pub ]; then
- log "RSA public key"
- ssh-keygen -lf /etc/ssh/ssh_host_rsa_key.pub
- else
- log "No RSA public key found."
- fi
-
- if [ -e /etc/ssh/ssh_host_dsa_key.pub ]; then
- log "DSA public key"
- ssh-keygen -lf /etc/ssh/ssh_host_dsa_key.pub
- else
- log "No DSA public key found."
- fi
-
- if [ -e /etc/ssh/ssh_host_ecdsa_key.pub ]; then
- log "ECDSA public key"
- ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub
- else
- log "No ECDSA public key found."
- fi
-
- return 0
-}
-
-function check_for_connection() {
- local count=0
- local return_code=0
-
- log "Checking for metadata server connection."
- while true; do
- ((count++))
- MDS_TRIES=1 /usr/share/google/get_metadata_value "?recursive=True"
- return_code=$?
- case ${return_code} in
- # No error. Connection is active.
- 0) break;;
- # Failed to resolve host or connect to host. Retry indefinitely.
- 6|7) sleep 1.0
- log "Waiting for metadata server, attempt ${count}"
- # After 7 minutes, add a console message denoting a probable network
- # issue. On systems using dhclient there is an attempt to obtain an IP
- # for 60 seconds followed by a 5 minute wait period. After 7 minutes,
- # this cycle will have run through twice. After this period of time, it
- # is not known when a DHCP lease might be obtained and the network
- # interface fully operational.
- if ((count >= 7*60+1)); then
- log "There is likely a problem with the network."
- fi
- continue;;
- # A genuine error but a connection exists.
- *)
- log "Check for connection non-fatal error getting metadata ${return_code}"
- break;;
- esac
- done
- # Return the last return code we got.
- return ${return_code}
-}
-
-set_interrupts
-virtionet_irq_affinity
-check_for_connection
-first_boot
-do_init
-print_ssh_key_fingerprints
-
-if [ -x /sbin/initctl ]; then
- /sbin/initctl emit --no-wait google-onboot-has-run
-fi
-
-# Indicate that we've run already.
-touch ${RUNFILE}
diff --git a/google-startup-scripts/usr/share/google/regenerate-host-keys b/google-startup-scripts/usr/share/google/regenerate-host-keys
deleted file mode 100755
index fb9d7fd..0000000
--- a/google-startup-scripts/usr/share/google/regenerate-host-keys
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Regenerates the SSH host keys when the VM is restarted with a new IP
-# address. Booting a VM from an image with a known SSH key allows a
-# number of attacks, so this script regenerates the host key whenever
-# the IP address changes. (This applies on firstboot, but also if the
-# VM disk has been used for another image.)
-
-log() {
- logger -t regenerate-host-keys -p auth.info -s "$@"
-}
-
-sshd_cmd() {
- local cmd=$1
- log "${cmd}ing sshd"
- if [[ -x /etc/init.d/ssh || -f /etc/init/ssh.conf ]]; then
- service ssh ${cmd}
- fi
- if [[ -x /etc/init.d/sshd || -f /etc/init/sshd.conf ]]; then
- service sshd ${cmd}
- fi
-}
-
-generate_key() {
- local key_type=$1
- local key_dest=$2
- local tmp_dir=$(mktemp -d /tmp/keystore.XXXXXXXX)
- local tmp_file="/${tmp_dir}/keyfile.$$";
- local log_file=$(mktemp);
- log "Regenerating sshd key ${key_dest}"
- ssh-keygen -N '' -t ${key_type} -f ${tmp_file} > ${log_file} 2>&1
- if [[ $? == 0 ]]; then
- rm -f ${key_dest} ${key_dest}.pub
- cp -f ${tmp_file} ${key_dest}
- cp -f ${tmp_file}.pub ${key_dest}.pub
- else
- log "Could not create sshd key ${key_dest}"
- log "$(cat ${log_file})"
- fi
- rm -rf ${tmp_dir}
- rm -f ${log_file}
-}
-
-regenerate_host_keys() {
- log "Regenerating SSH Host Keys for: $new_ip_address (previously $old_ip_address)."
- rm -f /etc/ssh/ssh_host_key /etc/ssh_host_key.pub # SSH1 RSA key.
- for key_file in /etc/ssh/ssh_host_*_key; do
- # Parse out the type of key, matching the * in the for loop command above.
- key_type=$(basename "${key_file}" _key)
- key_type=${key_type#ssh_host_}
-
- generate_key "${key_type}" "${key_file}"
- done
- # Allow sshd to come up if we were suppressing it.
- if [[ $(cat /etc/ssh/sshd_not_to_be_run 2>/dev/null) == "GOOGLE" ]]; then
- rm -f /etc/ssh/sshd_not_to_be_run
- fi
- if [[ -x /bin/systemctl ]]; then
- exit
- else
- # Start sshd if it was not running.
- sshd_cmd start
- # Reload sshd config if it already was running.
- sshd_cmd reload
- fi
-}
-
-regenerate_host_keys
diff --git a/google-startup-scripts/usr/share/google/run-scripts b/google-startup-scripts/usr/share/google/run-scripts
deleted file mode 100755
index 46af979..0000000
--- a/google-startup-scripts/usr/share/google/run-scripts
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run startup scripts that should happen "Late" at boot.
-# Run shutdown scripts that should happen as soon as the instances
-# begin to power down.
-#
-# Do NOT "set -e"
-declare -r SCRIPT=$1
-declare -r SCRIPT_TYPE=$2
-
-if [ -x /usr/bin/logger ]; then
- declare -r LOGGER=/usr/bin/logger
-else
- declare -r LOGGER=/bin/logger
-fi
-
-LOG_CMD="${LOGGER} -t ${SCRIPT_TYPE}script -p daemon.info"
-
-function log() {
- echo "$@" | ${LOG_CMD}
-}
-
-declare -r GOOGLE_ENVIRONMENT=/var/run/google.environment
-
-function copy_and_run() {
- local source=$1
- local dest=$(mktemp)
- cat "${source}" >> "${dest}"
- chmod u+x "${dest}"
- log "Running ${SCRIPT_TYPE} script ${source}"
- "${dest}" 2>&1 | ${LOG_CMD}
- log "Finished running ${SCRIPT_TYPE} script ${source}"
- rm -f "${dest}"
-}
-
-if [[ -e "${SCRIPT}" ]]; then
- (
- [ -r ${GOOGLE_ENVIRONMENT} ] && source ${GOOGLE_ENVIRONMENT};
- copy_and_run "${SCRIPT}"
- )
-fi
diff --git a/google-startup-scripts/usr/share/google/run-shutdown-scripts b/google-startup-scripts/usr/share/google/run-shutdown-scripts
deleted file mode 100755
index 61377e9..0000000
--- a/google-startup-scripts/usr/share/google/run-shutdown-scripts
+++ /dev/null
@@ -1,31 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run shutdown scripts that should happen as soon as the instances
-# begin to power down.
-#
-# Do NOT "set -e"
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-
-declare -r SHUTDOWN_SCRIPT=/var/run/google.shutdown.script
-
-# NOTE
-# Make sure that the shutdown script completes within 90 seconds, so
-# that the OS has time to complete its shutdown, including flushing
-# buffers to disk.
-#
-# The shutdown script blocks other shutdown operations from proceeding.
-/usr/share/google/fetch_script ${SHUTDOWN_SCRIPT} shutdown
-/usr/share/google/run-scripts ${SHUTDOWN_SCRIPT} shutdown
diff --git a/google-startup-scripts/usr/share/google/run-startup-scripts b/google-startup-scripts/usr/share/google/run-startup-scripts
deleted file mode 100755
index b9e2667..0000000
--- a/google-startup-scripts/usr/share/google/run-startup-scripts
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run startup scripts that should happen "Late" at boot.
-#
-# Do NOT "set -e"
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-
-declare -r STARTUP_SCRIPT=/var/run/google.startup.script
-
-# Make sure all udev changes settle before running startup scripts.
-udevadm settle
-
-/usr/share/google/fetch_script ${STARTUP_SCRIPT} startup
-/usr/share/google/run-scripts ${STARTUP_SCRIPT} startup
diff --git a/google-startup-scripts/usr/share/google/safe_format_and_mount b/google-startup-scripts/usr/share/google/safe_format_and_mount
deleted file mode 100755
index 8e68037..0000000
--- a/google-startup-scripts/usr/share/google/safe_format_and_mount
+++ /dev/null
@@ -1,152 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Mount a disk, formatting it if necessary. If the disk looks like it may
-# have been formatted before, we will not format it.
-#
-# This script uses blkid and file to search for magic "formatted" bytes
-# at the beginning of the disk. Furthermore, it attempts to use fsck to
-# repair the filesystem before formatting it.
-
-FSCK=fsck.ext4
-MOUNT_OPTIONS="discard,defaults"
-MKFS="mkfs.ext4 -E lazy_itable_init=0,lazy_journal_init=0 -F"
-if [ -f /etc/redhat-release ]; then
- if grep -q '6\..' /etc/redhat-release; then
- # lazy_journal_init is not recognized in redhat 6
- MKFS="mkfs.ext4 -E lazy_itable_init=0 -F"
- elif grep -q '7\..' /etc/redhat-release; then
- FSCK=fsck.xfs
- MKFS=mkfs.xfs
- fi
-fi
-
-LOGTAG=safe_format_and_mount
-LOGFACILITY=user
-
-function log() {
- local readonly severity=$1; shift;
- logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@"
-}
-
-function log_command() {
- local readonly log_file=$(mktemp)
- local readonly retcode
- log info "Running: $*"
- $* > ${log_file} 2>&1
- retcode=$?
- # only return the last 1000 lines of the logfile, just in case it's HUGE.
- tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s
- rm -f ${log_file}
- return ${retcode}
-}
-
-function help() {
- cat >&2 <<EOF
-$0 [-f fsck_cmd] [-m mkfs_cmd] [-o mount_opts] <device> <mountpoint>
-EOF
- exit 0
-}
-
-while getopts ":hf:o:m:" opt; do
- case $opt in
- h) help;;
- f) FSCK=$OPTARG;;
- o) MOUNT_OPTIONS=$OPTARG;;
- m) MKFS=$OPTARG;;
- -) break;;
- \?) log error "Invalid option: -${OPTARG}"; exit 1;;
- :) log "Option -${OPTARG} requires an argument."; exit 1;;
- esac
-done
-
-shift $(($OPTIND - 1))
-readonly DISK=$1
-readonly MOUNTPOINT=$2
-
-[[ -z ${DISK} ]] && help
-[[ -z ${MOUNTPOINT} ]] && help
-
-function disk_looks_unformatted() {
- blkid ${DISK}
- if [[ $? == 0 ]]; then
- return 0
- fi
-
- local readonly file_type=$(file --special-files ${DISK})
- case ${file_type} in
- *filesystem*)
- return 0;;
- esac
-
- return 1
-}
-
-function format_disk() {
- log_command ${MKFS} ${DISK}
-}
-
-function try_repair_disk() {
- log_command ${FSCK} -a ${DISK}
- local readonly fsck_return=$?
- if [[ ${fsck_return} -ge 8 ]]; then
- log error "Fsck could not correct errors on ${DISK}"
- return 1
- fi
- if [[ ${fsck_return} -gt 0 ]]; then
- log warning "Fsck corrected errors on ${DISK}"
- fi
- return 0
-}
-
-function try_mount() {
- local mount_retcode
- try_repair_disk
-
- log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
- mount_retcode=$?
- if [[ ${mount_retcode} == 0 ]]; then
- return 0
- fi
-
- # Check to see if it looks like a filesystem before formatting it.
- disk_looks_unformatted ${DISK}
- if [[ $? == 0 ]]; then
- log error "Disk ${DISK} looks formatted but won't mount. Giving up."
- return ${mount_retcode}
- fi
-
- # The disk looks like it's not been formatted before.
- format_disk
- if [[ $? != 0 ]]; then
- log error "Format of ${DISK} failed."
- fi
-
- log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
- mount_retcode=$?
- if [[ ${mount_retcode} == 0 ]]; then
- return 0
- fi
- log error "Tried everything we could, but could not mount ${DISK}."
- return ${mount_retcode}
-}
-
-log warn "====================================================================="
-log warn "WARNING: safe_format_and_mount is deprecated."
-log warn "See https://cloud.google.com/compute/docs/disks/persistent-disks"
-log warn "for additional instructions."
-log warn "====================================================================="
-try_mount
-exit $?
diff --git a/google-startup-scripts/usr/share/google/set-interrupts b/google-startup-scripts/usr/share/google/set-interrupts
deleted file mode 100755
index 36ccaec..0000000
--- a/google-startup-scripts/usr/share/google/set-interrupts
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-total_cpus=`nproc`
-
-config_nvme()
-{
- current_cpu=0
- for dev in /sys/bus/pci/drivers/nvme/*
- do
- if [ ! -d $dev ]
- then
- continue
- fi
- for irq_info in $dev/msi_irqs/*
- do
- if [ ! -f $irq_info ]
- then
- continue
- fi
- current_cpu=$((current_cpu % total_cpus))
- cpu_mask=`printf "%x" $((1<<current_cpu))`
- irq=$(basename $irq_info)$a
- echo Setting IRQ $irq smp_affinity to $cpu_mask
- echo $cpu_mask > /proc/irq/$irq/smp_affinity
- current_cpu=$((current_cpu+1))
- done
- done
-}
-
-config_scsi()
-{
- irqs=()
- for device in /sys/bus/virtio/drivers/virtio_scsi/virtio*
- do
- ssd=0
- for target_path in $device/host*/target*/*
- do
- if [ ! -f $target_path/model ]
- then
- continue
- fi
- model=$(cat $target_path/model)
- if [[ $model =~ .*EphemeralDisk.* ]]
- then
- ssd=1
- for queue_path in $target_path/block/sd*/queue
- do
- echo noop > $queue_path/scheduler
- echo 0 > $queue_path/add_random
- echo 512 > $queue_path/nr_requests
- echo 0 > $queue_path/rotational
- echo 0 > $queue_path/rq_affinity
- echo 1 > $queue_path/nomerges
- done
- fi
- done
- if [[ $ssd == 1 ]]
- then
- request_queue=$(basename $device)-request
- irq=$(cat /proc/interrupts |grep $request_queue| awk '{print $1}'| sed 's/://')
- irqs+=($irq)
- fi
- done
- irq_count=${#irqs[@]}
- if [ $irq_count != 0 ]
- then
- stride=$((total_cpus / irq_count))
- stride=$((stride < 1 ? 1 : stride))
- current_cpu=0
- for irq in "${irqs[@]}"
- do
- current_cpu=$(($current_cpu % $total_cpus))
- cpu_mask=`printf "%x" $((1<<$current_cpu))`
- echo Setting IRQ $irq smp_affinity to $cpu_mask
- echo $cpu_mask > /proc/irq/$irq/smp_affinity
- current_cpu=$((current_cpu+stride))
- done
- fi
-}
-
-config_nvme
-config_scsi
diff --git a/legacy/gcimagebundle/gcimagebundlelib/__init__.py b/google_compute_engine/__init__.py
index e69de29..e69de29 100644
--- a/legacy/gcimagebundle/gcimagebundlelib/__init__.py
+++ b/google_compute_engine/__init__.py
diff --git a/google_compute_engine/accounts/__init__.py b/google_compute_engine/accounts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google_compute_engine/accounts/__init__.py
diff --git a/google_compute_engine/accounts/accounts_daemon.py b/google_compute_engine/accounts/accounts_daemon.py
new file mode 100755
index 0000000..7ef82a0
--- /dev/null
+++ b/google_compute_engine/accounts/accounts_daemon.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Manage user accounts on a Google Compute Engine instances."""
+
+import datetime
+import json
+import logging.handlers
+import optparse
+
+from google_compute_engine import config_manager
+from google_compute_engine import file_utils
+from google_compute_engine import logger
+from google_compute_engine import metadata_watcher
+from google_compute_engine.accounts import accounts_utils
+
+LOCKFILE = '/var/lock/google_accounts.lock'
+
+
+class AccountsDaemon(object):
+ """Manage user accounts based on changes to metadata."""
+
+ invalid_users = set()
+
+ def __init__(self, groups=None, remove=False, debug=False):
+ """Constructor.
+
+ Args:
+ groups: string, a comma separated list of groups.
+ remove: bool, True if deprovisioning a user should be destructive.
+ debug: bool, True if debug output should write to the console.
+ """
+ facility = logging.handlers.SysLogHandler.LOG_DAEMON
+ self.logger = logger.Logger(
+ name='google-accounts', debug=debug, facility=facility)
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+ self.utils = accounts_utils.AccountsUtils(
+ logger=self.logger, groups=groups, remove=remove)
+ try:
+ with file_utils.LockFile(LOCKFILE):
+ self.logger.info('Starting Google Accounts daemon.')
+ self.watcher.WatchMetadata(self.HandleAccounts, recursive=True)
+ except (IOError, OSError) as e:
+ self.logger.warning(str(e))
+
+ def _HasExpired(self, key):
+ """Check whether an SSH key has expired.
+
+ Uses Google-specific semantics of the OpenSSH public key format's comment
+ field to determine if an SSH key is past its expiration timestamp, and
+ therefore no longer to be trusted. This format is still subject to change.
+ Reliance on it in any way is at your own risk.
+
+ Args:
+ key: string, a single public key entry in OpenSSH public key file format.
+ This will be checked for Google-specific comment semantics, and if
+ present, those will be analysed.
+
+ Returns:
+ bool, True if the key has Google-specific comment semantics and has an
+ expiration timestamp in the past, or False otherwise.
+ """
+ self.logger.debug('Processing key: %s.', key)
+
+ try:
+ schema, json_str = key.split(None, 3)[2:]
+ except (ValueError, AttributeError):
+ self.logger.debug('No schema identifier. Not expiring key.')
+ return False
+
+ if schema != 'google-ssh':
+ self.logger.debug('Invalid schema %s. Not expiring key.', schema)
+ return False
+
+ try:
+ json_obj = json.loads(json_str)
+ except ValueError:
+ self.logger.debug('Invalid JSON %s. Not expiring key.', json_str)
+ return False
+
+ if 'expireOn' not in json_obj:
+ self.logger.debug('No expiration timestamp. Not expiring key.')
+ return False
+
+ expire_str = json_obj['expireOn']
+ format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ try:
+ expire_time = datetime.datetime.strptime(expire_str, format_str)
+ except ValueError:
+ self.logger.warning(
+ 'Expiration timestamp "%s" not in format %s. Not expiring key.',
+ expire_str, format_str)
+ return False
+
+ # Expire the key if and only if we have exceeded the expiration timestamp.
+ return datetime.datetime.utcnow() > expire_time
+
+ def _ParseAccountsData(self, account_data):
+ """Parse the SSH key data into a user map.
+
+ Args:
+ account_data: string, the metadata server SSH key attributes data.
+
+ Returns:
+ dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
+ """
+ if not account_data:
+ return {}
+ lines = [line for line in account_data.splitlines() if line]
+ user_map = {}
+ for line in lines:
+ split_line = line.split(':', 1)
+ if len(split_line) != 2:
+ self.logger.info('SSH key is not a complete entry: %s.', split_line)
+ continue
+ user, key = split_line
+ if self._HasExpired(key):
+ self.logger.debug('Expired SSH key for user %s: %s.', user, key)
+ continue
+ if user not in user_map:
+ user_map[user] = []
+ user_map[user].append(key)
+ logging.debug('User accounts: %s.', user_map)
+ return user_map
+
+ def _GetAccountsData(self, metadata_dict):
+ """Get the user accounts specified in metadata server contents.
+
+ Args:
+ metadata_dict: json, the deserialized contents of the metadata server.
+
+ Returns:
+ dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
+ """
+ metadata_dict = metadata_dict or {}
+
+ try:
+ instance_data = metadata_dict['instance']['attributes']
+ except KeyError:
+ instance_data = {}
+ self.logger.warning('Instance attributes were not found.')
+
+ try:
+ project_data = metadata_dict['project']['attributes']
+ except KeyError:
+ project_data = {}
+ self.logger.warning('Project attributes were not found.')
+ valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
+ block_project = instance_data.get('block-project-ssh-keys', '').lower()
+ if block_project != 'true' and not instance_data.get('sshKeys'):
+ valid_keys.append(project_data.get('ssh-keys'))
+ valid_keys.append(project_data.get('sshKeys'))
+ accounts_data = '\n'.join([key for key in valid_keys if key])
+ return self._ParseAccountsData(accounts_data)
+
+ def _UpdateUsers(self, update_users):
+ """Provision and update Linux user accounts based on account metadata.
+
+ Args:
+ update_users: dict, authorized users mapped to their public SSH keys.
+ """
+ for user, ssh_keys in update_users.items():
+ if not user or user in self.invalid_users:
+ continue
+ if not self.utils.UpdateUser(user, ssh_keys):
+ self.invalid_users.add(user)
+
+ def _RemoveUsers(self, remove_users):
+ """Deprovision Linux user accounts that do not appear in account metadata.
+
+ Args:
+ remove_users: list, the username strings of the Linux accounts to remove.
+ """
+ for username in remove_users:
+ self.utils.RemoveUser(username)
+ self.invalid_users -= set(remove_users)
+
+ def HandleAccounts(self, result):
+ """Called when there are changes to the contents of the metadata server.
+
+ Args:
+ result: json, the deserialized contents of the metadata server.
+ """
+ self.logger.debug('Checking for changes to user accounts.')
+ configured_users = self.utils.GetConfiguredUsers()
+ desired_users = self._GetAccountsData(result)
+ remove_users = sorted(set(configured_users) - set(desired_users.keys()))
+ self._UpdateUsers(desired_users)
+ self._RemoveUsers(remove_users)
+ self.utils.SetConfiguredUsers(desired_users.keys())
+
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option('-d', '--debug', action='store_true', dest='debug',
+ help='print debug output to the console.')
+ (options, _) = parser.parse_args()
+ instance_config = config_manager.ConfigManager()
+ if instance_config.GetOptionBool('Daemons', 'accounts_daemon'):
+ AccountsDaemon(
+ groups=instance_config.GetOptionString('Accounts', 'groups'),
+ remove=instance_config.GetOptionBool('Accounts', 'deprovision_remove'),
+ debug=bool(options.debug))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google_compute_engine/accounts/accounts_utils.py b/google_compute_engine/accounts/accounts_utils.py
new file mode 100644
index 0000000..d6cc96d
--- /dev/null
+++ b/google_compute_engine/accounts/accounts_utils.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for provisioning or deprovisioning a Linux user account."""
+
+import grp
+import os
+import pwd
+import re
+import shutil
+import subprocess
+import tempfile
+
+from google_compute_engine import file_utils
+
+USER_REGEX = re.compile(r'\A[A-Za-z0-9._][A-Za-z0-9._-]*\Z')
+
+
+class AccountsUtils(object):
+ """System user account configuration utilities."""
+
+ google_comment = '# Added by Google'
+
+ def __init__(self, logger, groups=None, remove=False):
+ """Constructor.
+
+ Args:
+ logger: logger object, used to write to SysLog and serial port.
+ groups: string, a comma separated list of groups.
+ remove: bool, True if deprovisioning a user should be destructive.
+ """
+ self.logger = logger
+ self.google_sudoers_group = 'google-sudoers'
+ self.google_sudoers_file = '/etc/sudoers.d/google_sudoers'
+ self.google_users_dir = '/var/lib/google'
+ self.google_users_file = os.path.join(self.google_users_dir, 'google_users')
+
+ self._CreateSudoersGroup()
+ self.groups = groups.split(',') if groups else []
+ self.groups.append(self.google_sudoers_group)
+ self.groups = filter(self._GetGroup, self.groups)
+ self.remove = remove
+
+ def _GetGroup(self, group):
+ """Retrieve a Linux group.
+
+ Args:
+ group: string, the name of the Linux group to retrieve.
+
+ Returns:
+ grp.struct_group, the Linux group or None if it does not exist.
+ """
+ try:
+ return grp.getgrnam(group)
+ except KeyError:
+ return None
+
+ def _CreateSudoersGroup(self):
+ """Create a Linux group for Google added sudo user accounts."""
+ if not self._GetGroup(self.google_sudoers_group):
+ try:
+ subprocess.check_call(['groupadd', self.google_sudoers_group])
+ except subprocess.CalledProcessError as e:
+ self.logger.warning('Could not create the sudoers group. %s.', str(e))
+
+ if not os.path.exists(self.google_sudoers_file):
+ with open(self.google_sudoers_file, 'w') as group:
+ message = '%{0} ALL=(ALL:ALL) NOPASSWD:ALL'.format(
+ self.google_sudoers_group)
+ group.write(message)
+
+ file_utils.SetPermissions(
+ self.google_sudoers_file, mode=0o440, uid=0, gid=0)
+
+ def _GetUser(self, user):
+ """Retrieve a Linux user account.
+
+ Args:
+ user: string, the name of the Linux user account to retrieve.
+
+ Returns:
+ pwd.struct_passwd, the Linux user or None if it does not exist.
+ """
+ try:
+ return pwd.getpwnam(user)
+ except KeyError:
+ return None
+
+ def _AddUser(self, user):
+ """Configure a Linux user account.
+
+ Args:
+ user: string, the name of the Linux user account to create.
+
+ Returns:
+ bool, True if user creation succeeded.
+ """
+ self.logger.info('Creating a new user account for %s.', user)
+
+ # The encrypted password is set to '*' for SSH on Linux systems
+ # without PAM.
+ #
+ # SSH uses '!' as its locked account token:
+ # https://github.com/openssh/openssh-portable/blob/master/configure.ac
+ #
+ # When the token is specified, SSH denies login:
+ # https://github.com/openssh/openssh-portable/blob/master/auth.c
+ #
+ # To solve the issue, make the password '*' which is also recognized
+ # as locked but does not prevent SSH login.
+ command = ['useradd', '-m', '-s', '/bin/bash', '-p', '*', user]
+ try:
+ subprocess.check_call(command)
+ except subprocess.CalledProcessError as e:
+ self.logger.warning('Could not create user %s. %s.', user, str(e))
+ return False
+ else:
+ self.logger.info('Created user account %s.', user)
+ return True
+
+ def _UpdateUserGroups(self, user, groups):
+ """Update group membership for a Linux user.
+
+ Args:
+ user: string, the name of the Linux user account.
+ groups: list, the group names to add the user as a member.
+
+ Returns:
+ bool, True if user update succeeded.
+ """
+ self.logger.debug('Updating user %s with groups %s.', user, groups)
+ command = ['usermod', '-G', ','.join(groups), user]
+ try:
+ subprocess.check_call(command)
+ except subprocess.CalledProcessError as e:
+ self.logger.warning('Could not update user %s. %s.', user, str(e))
+ return False
+ else:
+ self.logger.debug('Updated user account %s.', user)
+ return True
+
+ def _UpdateAuthorizedKeys(self, user, ssh_keys):
+ """Update the authorized keys file for a Linux user with a list of SSH keys.
+
+ Args:
+ user: string, the name of the Linux user account.
+ ssh_keys: list, the SSH key strings associated with the user.
+
+ Raises:
+ IOError, raised when there is an exception updating a file.
+ """
+ pw_entry = self._GetUser(user)
+ if not pw_entry:
+ return
+
+ uid = pw_entry.pw_uid
+ gid = pw_entry.pw_gid
+ home_dir = pw_entry.pw_dir
+ ssh_dir = os.path.join(home_dir, '.ssh')
+ file_utils.SetPermissions(
+ home_dir, mode=0o755, uid=uid, gid=gid, mkdir=True)
+ file_utils.SetPermissions(
+ ssh_dir, mode=0o700, uid=uid, gid=gid, mkdir=True)
+
+ # Not all sshd's support multiple authorized_keys files so we have to
+ # share one with the user. We add each of our entries as follows:
+ # # Added by Google
+ # authorized_key_entry
+ authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys')
+ prefix = self.logger.name + '-'
+ with tempfile.NamedTemporaryFile(
+ mode='w', prefix=prefix, delete=True) as updated_keys:
+ updated_keys_file = updated_keys.name
+ if os.path.exists(authorized_keys_file):
+ lines = open(authorized_keys_file).readlines()
+ else:
+ lines = []
+
+ google_lines = set()
+ for i, line in enumerate(lines):
+ if line.startswith(self.google_comment):
+ google_lines.update([i, i+1])
+
+ # Write user's authorized key entries.
+ for i, line in enumerate(lines):
+ if i not in google_lines and line:
+ line += '\n' if not line.endswith('\n') else ''
+ updated_keys.write(line)
+
+ # Write the Google authorized key entries at the end of the file.
+ # Each entry is preceded by '# Added by Google'.
+ for ssh_key in ssh_keys:
+ ssh_key += '\n' if not ssh_key.endswith('\n') else ''
+ updated_keys.write('%s\n' % self.google_comment)
+ updated_keys.write(ssh_key)
+
+ # Write buffered data to the updated keys file without closing it and
+ # update the Linux user's authorized keys file.
+ updated_keys.flush()
+ shutil.copy(updated_keys_file, authorized_keys_file)
+
+ file_utils.SetPermissions(
+ authorized_keys_file, mode=0o600, uid=uid, gid=gid)
+
+ def _RemoveAuthorizedKeys(self, user):
+ """Remove a Linux user account's authorized keys file to prevent login.
+
+ Args:
+ user: string, the Linux user account to remove access.
+ """
+ pw_entry = self._GetUser(user)
+ if not pw_entry:
+ return
+
+ home_dir = pw_entry.pw_dir
+ authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys')
+ if os.path.exists(authorized_keys_file):
+ try:
+ os.remove(authorized_keys_file)
+ except OSError as e:
+ message = 'Could not remove authorized keys for user %s. %s.'
+ self.logger.warning(message, user, str(e))
+
+ def GetConfiguredUsers(self):
+ """Retrieve the list of configured Google user accounts.
+
+ Returns:
+ list, the username strings of users congfigured by Google.
+ """
+ if os.path.exists(self.google_users_file):
+ users = open(self.google_users_file).readlines()
+ else:
+ users = []
+ return [user.strip() for user in users]
+
+ def SetConfiguredUsers(self, users):
+ """Set the list of configured Google user accounts.
+
+ Args:
+ users: list, the username strings of the Linux accounts.
+ """
+ prefix = self.logger.name + '-'
+ with tempfile.NamedTemporaryFile(
+ mode='w', prefix=prefix, delete=True) as updated_users:
+ updated_users_file = updated_users.name
+ for user in users:
+ updated_users.write(user + '\n')
+ updated_users.flush()
+ if not os.path.exists(self.google_users_dir):
+ os.makedirs(self.google_users_dir)
+ shutil.copy(updated_users_file, self.google_users_file)
+
+ file_utils.SetPermissions(self.google_users_file, mode=0o600, uid=0, gid=0)
+
+ def UpdateUser(self, user, ssh_keys):
+ """Update a Linux user with authorized SSH keys.
+
+ Args:
+ user: string, the name of the Linux user account.
+ ssh_keys: list, the SSH key strings associated with the user.
+
+ Returns:
+ bool, True if the user account updated successfully.
+ """
+ if not bool(USER_REGEX.match(user)):
+ self.logger.warning('Invalid user account name %s.', user)
+ return False
+ if not self._GetUser(user) and not self._AddUser(user):
+ return False
+ if not self._UpdateUserGroups(user, self.groups):
+ return False
+
+ # Don't try to manage account SSH keys with a shell set to disable
+ # logins. This helps avoid problems caused by operator and root sharing
+ # a home directory in CentOS and RHEL.
+ pw_entry = self._GetUser(user)
+ if pw_entry and pw_entry.pw_shell == '/sbin/nologin':
+ message = 'Not updating user %s. User set /sbin/nologin as login shell.'
+ self.logger.debug(message, user)
+ return True
+
+ try:
+ self._UpdateAuthorizedKeys(user, ssh_keys)
+ except IOError as e:
+ message = 'Could not update the authorized keys file for user %s. %s.'
+ self.logger.warning(message, user, str(e))
+ return False
+ else:
+ return True
+
+ def RemoveUser(self, user):
+ """Remove a Linux user account.
+
+ Args:
+ user: string, the Linux user account to remove.
+ """
+ self.logger.info('Removing user %s.', user)
+ if self.remove:
+ command = ['userdel', '-r', user]
+ try:
+ subprocess.check_call(command)
+ except subprocess.CalledProcessError as e:
+ self.logger.warning('Could not remove user %s. %s.', user, str(e))
+ else:
+ self.logger.info('Removed user account %s.', user)
+ self._RemoveAuthorizedKeys(user)
diff --git a/google_compute_engine/accounts/tests/accounts_daemon_test.py b/google_compute_engine/accounts/tests/accounts_daemon_test.py
new file mode 100644
index 0000000..775ae10
--- /dev/null
+++ b/google_compute_engine/accounts/tests/accounts_daemon_test.py
@@ -0,0 +1,317 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for accounts_daemon.py module."""
+
+import datetime
+
+from google_compute_engine.accounts import accounts_daemon
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class AccountsDaemonTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_logger = mock.Mock()
+ self.mock_watcher = mock.Mock()
+ self.mock_utils = mock.Mock()
+
+ self.mock_setup = mock.create_autospec(accounts_daemon.AccountsDaemon)
+ self.mock_setup.logger = self.mock_logger
+ self.mock_setup.watcher = self.mock_watcher
+ self.mock_setup.utils = self.mock_utils
+
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.accounts_utils')
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.metadata_watcher')
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.logger')
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.file_utils')
+ def testAccountsDaemon(self, mock_lock, mock_logger, mock_watcher,
+ mock_utils):
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mocks.attach_mock(mock_utils, 'utils')
+ with mock.patch.object(
+ accounts_daemon.AccountsDaemon, 'HandleAccounts') as mock_handle:
+ accounts_daemon.AccountsDaemon(groups='foo,bar', remove=True, debug=True)
+ expected_calls = [
+ mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.utils.AccountsUtils(
+ logger=mock_logger_instance, groups='foo,bar', remove=True),
+ mock.call.lock.LockFile(accounts_daemon.LOCKFILE),
+ mock.call.lock.LockFile().__enter__(),
+ mock.call.logger.Logger().info(mock.ANY),
+ mock.call.watcher.MetadataWatcher().WatchMetadata(
+ mock_handle, recursive=True),
+ mock.call.lock.LockFile().__exit__(None, None, None),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.accounts_utils')
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.metadata_watcher')
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.logger')
+ @mock.patch('google_compute_engine.accounts.accounts_daemon.file_utils')
+ def testAccountsDaemonError(self, mock_lock, mock_logger, mock_watcher,
+ mock_utils):
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mocks.attach_mock(mock_utils, 'utils')
+ mock_lock.LockFile.side_effect = IOError('Test Error')
+ with mock.patch.object(accounts_daemon.AccountsDaemon, 'HandleAccounts'):
+ accounts_daemon.AccountsDaemon()
+ expected_calls = [
+ mock.call.logger.Logger(
+ name=mock.ANY, debug=False, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.utils.AccountsUtils(
+ logger=mock_logger_instance, groups=None, remove=False),
+ mock.call.lock.LockFile(accounts_daemon.LOCKFILE),
+ mock.call.logger.Logger().warning('Test Error'),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ def testHasExpired(self):
+
+ def _GetTimestamp(days):
+ """Create a timestamp in the correct format with a days offset.
+
+ Args:
+ days: int, number of days to add to the current date.
+
+ Returns:
+ string, a timestamp with the format '%Y-%m-%dT%H:%M:%S+0000'.
+ """
+ format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ today = datetime.datetime.now()
+ timestamp = today + datetime.timedelta(days=days)
+ return timestamp.strftime(format_str)
+
+ ssh_keys = {
+ None: False,
+ '': False,
+ 'Invalid': False,
+ 'user:ssh-rsa key user@domain.com': False,
+ 'user:ssh-rsa key google {"expireOn":"%s"}' % _GetTimestamp(-1): False,
+ 'user:ssh-rsa key google-ssh': False,
+ 'user:ssh-rsa key google-ssh {invalid:json}': False,
+ 'user:ssh-rsa key google-ssh {"userName":"user"}': False,
+ 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % _GetTimestamp(1): False,
+ 'user:xyz key google-ssh {"expireOn":"%s"}' % _GetTimestamp(-1): True,
+ }
+
+ for key, expired in ssh_keys.items():
+ self.assertEqual(
+ accounts_daemon.AccountsDaemon._HasExpired(self.mock_setup, key),
+ expired)
+
+ def testParseAccountsData(self):
+ user_map = {
+ 'a': ['1', '2'],
+ 'b': ['3', '4', '5'],
+ }
+ accounts_data = 'skip\n'
+ for user, keys in user_map.items():
+ for key in keys:
+ accounts_data += '%s:%s\n' % (user, key)
+ # Make the _HasExpired function treat odd numbers as expired SSH keys.
+ self.mock_setup._HasExpired.side_effect = lambda key: int(key) % 2 == 0
+
+ self.assertEqual(
+ accounts_daemon.AccountsDaemon._ParseAccountsData(
+ self.mock_setup, None), {})
+ self.assertEqual(
+ accounts_daemon.AccountsDaemon._ParseAccountsData(
+ self.mock_setup, ''), {})
+ expected_users = {'a': ['1'], 'b': ['3', '5']}
+ self.assertEqual(accounts_daemon.AccountsDaemon._ParseAccountsData(
+ self.mock_setup, accounts_data), expected_users)
+
+ def testGetAccountsData(self):
+
+ def _AssertAccountsData(data, expected):
+ """Test the correct accounts data is returned.
+
+ Args:
+ data: dictionary, the faux metadata server contents.
+ expected: list, the faux SSH keys expected to be set.
+ """
+ accounts_daemon.AccountsDaemon._GetAccountsData(self.mock_setup, data)
+ if expected:
+ call_args, _ = self.mock_setup._ParseAccountsData.call_args
+ actual = call_args[0]
+ self.assertEqual(set(actual.split()), set(expected))
+ else:
+ self.mock_setup._ParseAccountsData.assert_called_once_with(expected)
+ self.mock_setup._ParseAccountsData.reset_mock()
+
+ data = None
+ _AssertAccountsData(data, '')
+
+ data = {'test': 'data'}
+ _AssertAccountsData(data, '')
+
+ data = {'instance': {'attributes': {}}}
+ _AssertAccountsData(data, '')
+
+ data = {'instance': {'attributes': {'ssh-keys': '1'}}}
+ _AssertAccountsData(data, ['1'])
+
+ data = {'instance': {'attributes': {'ssh-keys': '1', 'sshKeys': '2'}}}
+ _AssertAccountsData(data, ['1', '2'])
+
+ data = {'project': {'attributes': {'ssh-keys': '1'}}}
+ _AssertAccountsData(data, ['1'])
+
+ data = {'project': {'attributes': {'ssh-keys': '1', 'sshKeys': '2'}}}
+ _AssertAccountsData(data, ['1', '2'])
+
+ data = {
+ 'instance': {
+ 'attributes': {
+ 'ssh-keys': '1',
+ 'sshKeys': '2',
+ },
+ },
+ 'project': {
+ 'attributes': {
+ 'ssh-keys': '3',
+ },
+ },
+ }
+ _AssertAccountsData(data, ['1', '2'])
+
+ data = {
+ 'instance': {
+ 'attributes': {
+ 'ssh-keys': '1',
+ 'block-project-ssh-keys': 'false',
+ },
+ },
+ 'project': {
+ 'attributes': {
+ 'ssh-keys': '2',
+ },
+ },
+ }
+ _AssertAccountsData(data, ['1', '2'])
+
+ data = {
+ 'instance': {
+ 'attributes': {
+ 'ssh-keys': '1',
+ 'block-project-ssh-keys': 'true',
+ },
+ },
+ 'project': {
+ 'attributes': {
+ 'ssh-keys': '2',
+ },
+ },
+ }
+ _AssertAccountsData(data, ['1'])
+
+ data = {
+ 'instance': {
+ 'attributes': {
+ 'ssh-keys': '1',
+ 'block-project-ssh-keys': 'false',
+ },
+ },
+ 'project': {
+ 'attributes': {
+ 'ssh-keys': '2',
+ 'sshKeys': '3',
+ },
+ },
+ }
+ _AssertAccountsData(data, ['1', '2', '3'])
+
+ def testUpdateUsers(self):
+ update_users = {
+ 'a': '1',
+ 'b': '2',
+ 'c': '3',
+ 'invalid': '4',
+ 'valid': '5',
+ }
+ self.mock_setup.invalid_users = set(['invalid'])
+ # Make UpdateUser succeed for fake names longer than one character.
+ self.mock_utils.UpdateUser.side_effect = lambda user, _: len(user) > 1
+ accounts_daemon.AccountsDaemon._UpdateUsers(self.mock_setup, update_users)
+ expected_calls = [
+ mock.call('a', '1'),
+ mock.call('b', '2'),
+ mock.call('c', '3'),
+ mock.call('valid', '5'),
+ ]
+ self.mock_utils.UpdateUser.assert_has_calls(expected_calls, any_order=True)
+ self.assertEqual(
+ self.mock_utils.UpdateUser.call_count, len(expected_calls))
+ self.assertEqual(
+ self.mock_setup.invalid_users, set(['invalid', 'a', 'b', 'c']))
+
+ def testRemoveUsers(self):
+ remove_users = ['a', 'b', 'c', 'valid']
+ self.mock_setup.invalid_users = set(['invalid', 'a', 'b', 'c'])
+ accounts_daemon.AccountsDaemon._RemoveUsers(self.mock_setup, remove_users)
+ expected_calls = [
+ mock.call('a'),
+ mock.call('b'),
+ mock.call('c'),
+ mock.call('valid'),
+ ]
+ self.mock_utils.RemoveUser.assert_has_calls(expected_calls)
+ self.assertEqual(self.mock_setup.invalid_users, set(['invalid']))
+
+ def testHandleAccounts(self):
+ configured = ['c', 'c', 'b', 'b', 'a', 'a']
+ desired = {'d': '1', 'c': '2'}
+ mocks = mock.Mock()
+ mocks.attach_mock(self.mock_utils, 'utils')
+ mocks.attach_mock(self.mock_setup, 'setup')
+ self.mock_utils.GetConfiguredUsers.return_value = configured
+ self.mock_setup._GetAccountsData.return_value = desired
+ result = 'result'
+ expected_add = ['c', 'd']
+ expected_remove = ['a', 'b']
+
+ accounts_daemon.AccountsDaemon.HandleAccounts(self.mock_setup, result)
+ expected_calls = [
+ mock.call.setup.logger.debug(mock.ANY),
+ mock.call.utils.GetConfiguredUsers(),
+ mock.call.setup._GetAccountsData(result),
+ mock.call.setup._UpdateUsers(desired),
+ mock.call.setup._RemoveUsers(mock.ANY),
+ mock.call.utils.SetConfiguredUsers(mock.ANY),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+ call_args, _ = self.mock_utils.SetConfiguredUsers.call_args
+ self.assertEqual(set(call_args[0]), set(expected_add))
+ call_args, _ = self.mock_setup._RemoveUsers.call_args
+ self.assertEqual(set(call_args[0]), set(expected_remove))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/accounts/tests/accounts_utils_test.py b/google_compute_engine/accounts/tests/accounts_utils_test.py
new file mode 100644
index 0000000..20ea62d
--- /dev/null
+++ b/google_compute_engine/accounts/tests/accounts_utils_test.py
@@ -0,0 +1,585 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for accounts_utils.py module."""
+
+import subprocess
+
+from google_compute_engine.accounts import accounts_utils
+from google_compute_engine.test_compat import builtin
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class AccountsUtilsTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_logger = mock.Mock()
+ self.sudoers_group = 'google-sudoers'
+ self.sudoers_file = '/sudoers/file'
+ self.users_dir = '/users'
+ self.users_file = '/users/file'
+
+ self.mock_utils = mock.create_autospec(accounts_utils.AccountsUtils)
+ self.mock_utils.google_comment = accounts_utils.AccountsUtils.google_comment
+ self.mock_utils.google_sudoers_group = self.sudoers_group
+ self.mock_utils.google_sudoers_file = self.sudoers_file
+ self.mock_utils.google_users_dir = self.users_dir
+ self.mock_utils.google_users_file = self.users_file
+ self.mock_utils.logger = self.mock_logger
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.AccountsUtils._GetGroup')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.AccountsUtils._CreateSudoersGroup')
+ def testAccountsUtils(self, mock_create, mock_group):
+ mock_logger = mock.Mock()
+ mock_group.side_effect = lambda group: 'google' in group
+
+ utils = accounts_utils.AccountsUtils(
+ logger=mock_logger, groups='foo,google,bar', remove=True)
+ mock_create.assert_called_once_with()
+ self.assertEqual(utils.logger, mock_logger)
+ self.assertEqual(sorted(utils.groups), ['google', 'google-sudoers'])
+ self.assertTrue(utils.remove)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.grp')
+ def testGetGroup(self, mock_grp):
+ mock_grp.getgrnam.return_value = 'Test'
+ self.assertEqual(
+ accounts_utils.AccountsUtils._GetGroup(self.mock_utils, 'valid'),
+ 'Test')
+ mock_grp.getgrnam.side_effect = KeyError('Test Error')
+ self.assertEqual(
+ accounts_utils.AccountsUtils._GetGroup(self.mock_utils, 'invalid'),
+ None)
+ expected_calls = [
+ mock.call.getgrnam('valid'),
+ mock.call.getgrnam('invalid'),
+ ]
+ self.assertEqual(mock_grp.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testCreateSudoersGroup(self, mock_exists, mock_call, mock_permissions):
+ mock_open = mock.mock_open()
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_call, 'call')
+ mocks.attach_mock(mock_permissions, 'permissions')
+ mocks.attach_mock(self.mock_utils._GetGroup, 'group')
+ mocks.attach_mock(self.mock_logger, 'logger')
+ self.mock_utils._GetGroup.return_value = False
+ mock_exists.return_value = False
+ command = ['groupadd', self.sudoers_group]
+
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils)
+ mock_open().write.assert_called_once_with(mock.ANY)
+
+ expected_calls = [
+ mock.call.group(self.sudoers_group),
+ mock.call.call(command),
+ mock.call.exists(self.sudoers_file),
+ mock.call.permissions(self.sudoers_file, mode=0o440, uid=0, gid=0),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testCreateSudoersGroupSkip(self, mock_exists, mock_call,
+ mock_permissions):
+ mock_open = mock.mock_open()
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_call, 'call')
+ mocks.attach_mock(mock_permissions, 'permissions')
+ mocks.attach_mock(self.mock_utils._GetGroup, 'group')
+ mocks.attach_mock(self.mock_logger, 'logger')
+ self.mock_utils._GetGroup.return_value = True
+ mock_exists.return_value = True
+
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils)
+ mock_open().write.assert_not_called()
+
+ expected_calls = [
+ mock.call.group(self.sudoers_group),
+ mock.call.exists(self.sudoers_file),
+ mock.call.permissions(self.sudoers_file, mode=0o440, uid=0, gid=0),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testCreateSudoersGroupError(self, mock_exists, mock_call,
+ mock_permissions):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_call, 'call')
+ mocks.attach_mock(mock_permissions, 'permissions')
+ mocks.attach_mock(self.mock_utils._GetGroup, 'group')
+ mocks.attach_mock(self.mock_logger, 'logger')
+ self.mock_utils._GetGroup.return_value = False
+ mock_exists.return_value = True
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+ command = ['groupadd', self.sudoers_group]
+
+ accounts_utils.AccountsUtils._CreateSudoersGroup(self.mock_utils)
+ expected_calls = [
+ mock.call.group(self.sudoers_group),
+ mock.call.call(command),
+ mock.call.logger.warning(mock.ANY, mock.ANY),
+ mock.call.exists(self.sudoers_file),
+ mock.call.permissions(self.sudoers_file, mode=0o440, uid=0, gid=0),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.pwd')
+ def testGetUser(self, mock_pwd):
+ mock_pwd.getpwnam.return_value = 'Test'
+ self.assertEqual(
+ accounts_utils.AccountsUtils._GetUser(self.mock_utils, 'valid'),
+ 'Test')
+ mock_pwd.getpwnam.side_effect = KeyError('Test Error')
+ self.assertEqual(
+ accounts_utils.AccountsUtils._GetUser(self.mock_utils, 'invalid'),
+ None)
+ expected_calls = [
+ mock.call.getpwnam('valid'),
+ mock.call.getpwnam('invalid'),
+ ]
+ self.assertEqual(mock_pwd.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testAddUser(self, mock_call):
+ user = 'user'
+ command = ['useradd', '-m', '-s', '/bin/bash', '-p', '*', user]
+
+ self.assertTrue(
+ accounts_utils.AccountsUtils._AddUser(self.mock_utils, user))
+ mock_call.assert_called_once_with(command)
+ expected_calls = [mock.call.info(mock.ANY, user)] * 2
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testAddUserError(self, mock_call):
+ user = 'user'
+ command = ['useradd', '-m', '-s', '/bin/bash', '-p', '*', user]
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+
+ self.assertFalse(
+ accounts_utils.AccountsUtils._AddUser(self.mock_utils, user))
+ mock_call.assert_called_once_with(command)
+ expected_calls = [
+ mock.call.info(mock.ANY, user),
+ mock.call.warning(mock.ANY, user, mock.ANY),
+ ]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testUpdateUserGroups(self, mock_call):
+ user = 'user'
+ groups = ['a', 'b', 'c']
+ command = ['usermod', '-G', 'a,b,c', user]
+
+ self.assertTrue(
+ accounts_utils.AccountsUtils._UpdateUserGroups(
+ self.mock_utils, user, groups))
+ mock_call.assert_called_once_with(command)
+ expected_calls = [
+ mock.call.debug(mock.ANY, user, groups),
+ mock.call.debug(mock.ANY, user),
+ ]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testUpdateUserGroupsError(self, mock_call):
+ user = 'user'
+ groups = ['a', 'b', 'c']
+ command = ['usermod', '-G', 'a,b,c', user]
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+
+ self.assertFalse(
+ accounts_utils.AccountsUtils._UpdateUserGroups(
+ self.mock_utils, user, groups))
+ mock_call.assert_called_once_with(command)
+ expected_calls = [
+ mock.call.debug(mock.ANY, user, groups),
+ mock.call.warning(mock.ANY, user, mock.ANY),
+ ]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testUpdateAuthorizedKeys(self, mock_exists, mock_tempfile, mock_copy,
+ mock_permissions):
+ mock_open = mock.mock_open()
+ user = 'user'
+ ssh_keys = ['Google key 1', 'Google key 2']
+ temp_dest = '/tmp/dest'
+ pw_uid = 1
+ pw_gid = 2
+ pw_dir = '/home'
+ ssh_dir = '/home/.ssh'
+ authorized_keys_file = '/home/.ssh/authorized_keys'
+ pw_entry = accounts_utils.pwd.struct_passwd(
+ ('', '', pw_uid, pw_gid, '', pw_dir, ''))
+ self.mock_utils._GetUser.return_value = pw_entry
+ mock_exists.return_value = True
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.__enter__.return_value.name = temp_dest
+ self.mock_logger.name = 'test'
+
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ mock_open().readlines.return_value = [
+ 'User key a\n',
+ 'User key b\n',
+ '\n',
+ self.mock_utils.google_comment + '\n',
+ 'Google key a\n',
+ self.mock_utils.google_comment + '\n',
+ 'Google key b\n',
+ 'User key c\n',
+ ]
+ accounts_utils.AccountsUtils._UpdateAuthorizedKeys(
+ self.mock_utils, user, ssh_keys)
+
+ expected_calls = [
+ mock.call(mode='w', prefix='test-', delete=True),
+ mock.call.__enter__(),
+ mock.call.__enter__().write('User key a\n'),
+ mock.call.__enter__().write('User key b\n'),
+ mock.call.__enter__().write('\n'),
+ mock.call.__enter__().write('User key c\n'),
+ mock.call.__enter__().write(self.mock_utils.google_comment + '\n'),
+ mock.call.__enter__().write('Google key 1\n'),
+ mock.call.__enter__().write(self.mock_utils.google_comment + '\n'),
+ mock.call.__enter__().write('Google key 2\n'),
+ mock.call.__enter__().flush(),
+ mock.call.__exit__(None, None, None),
+ ]
+ self.assertEqual(mock_tempfile.mock_calls, expected_calls)
+ mock_copy.assert_called_once_with(temp_dest, authorized_keys_file)
+ expected_calls = [
+ mock.call(pw_dir, mode=0o755, uid=pw_uid, gid=pw_gid, mkdir=True),
+ mock.call(ssh_dir, mode=0o700, uid=pw_uid, gid=pw_gid, mkdir=True),
+ mock.call(authorized_keys_file, mode=0o600, uid=pw_uid, gid=pw_gid),
+ ]
+ self.assertEqual(mock_permissions.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testUpdateAuthorizedKeysNoKeys(self, mock_exists, mock_tempfile,
+ mock_copy, mock_permissions):
+ user = 'user'
+ ssh_keys = ['Google key 1']
+ temp_dest = '/tmp/dest'
+ pw_uid = 1
+ pw_gid = 2
+ pw_dir = '/home'
+ ssh_dir = '/home/.ssh'
+ authorized_keys_file = '/home/.ssh/authorized_keys'
+ pw_entry = accounts_utils.pwd.struct_passwd(
+ ('', '', pw_uid, pw_gid, '', pw_dir, ''))
+ self.mock_utils._GetUser.return_value = pw_entry
+ mock_exists.return_value = False
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.__enter__.return_value.name = temp_dest
+ self.mock_logger.name = 'test'
+
+ # The authorized keys file does not exist so write a new one.
+ accounts_utils.AccountsUtils._UpdateAuthorizedKeys(
+ self.mock_utils, user, ssh_keys)
+ expected_calls = [
+ mock.call(mode='w', prefix='test-', delete=True),
+ mock.call.__enter__(),
+ mock.call.__enter__().write(self.mock_utils.google_comment + '\n'),
+ mock.call.__enter__().write('Google key 1\n'),
+ mock.call.__enter__().flush(),
+ mock.call.__exit__(None, None, None),
+ ]
+ self.assertEqual(mock_tempfile.mock_calls, expected_calls)
+ mock_copy.assert_called_once_with(temp_dest, authorized_keys_file)
+ expected_calls = [
+ mock.call(pw_dir, mode=0o755, uid=pw_uid, gid=pw_gid, mkdir=True),
+ mock.call(ssh_dir, mode=0o700, uid=pw_uid, gid=pw_gid, mkdir=True),
+ mock.call(authorized_keys_file, mode=0o600, uid=pw_uid, gid=pw_gid),
+ ]
+ self.assertEqual(mock_permissions.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ def testUpdateAuthorizedKeysNoUser(self, mock_permissions):
+ user = 'user'
+ ssh_keys = ['key']
+ self.mock_utils._GetUser.return_value = None
+
+ # The user does not exist, so do not write authorized keys.
+ accounts_utils.AccountsUtils._UpdateAuthorizedKeys(
+ self.mock_utils, user, ssh_keys)
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ mock_permissions.assert_not_called()
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.remove')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testRemoveAuthorizedKeys(self, mock_exists, mock_remove):
+ user = 'user'
+ pw_dir = '/home'
+ authorized_keys_file = '/home/.ssh/authorized_keys'
+ pw_entry = accounts_utils.pwd.struct_passwd(
+ ('', '', '', '', '', pw_dir, ''))
+ self.mock_utils._GetUser.return_value = pw_entry
+ mock_exists.return_value = True
+
+ accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user)
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ mock_exists.assert_called_once_with(authorized_keys_file)
+ mock_remove.assert_called_once_with(authorized_keys_file)
+ self.mock_logger.warning.assert_not_called()
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.remove')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testRemoveAuthorizedKeysNoKeys(self, mock_exists, mock_remove):
+ user = 'user'
+ pw_dir = '/home'
+ authorized_keys_file = '/home/.ssh/authorized_keys'
+ pw_entry = accounts_utils.pwd.struct_passwd(
+ ('', '', '', '', '', pw_dir, ''))
+ self.mock_utils._GetUser.return_value = pw_entry
+ mock_exists.return_value = False
+
+ accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user)
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ mock_exists.assert_called_once_with(authorized_keys_file)
+ mock_remove.assert_not_called()
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testRemoveAuthorizedKeysNoUser(self, mock_exists):
+ user = 'user'
+ self.mock_utils._GetUser.return_value = None
+
+ accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user)
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ mock_exists.assert_not_called()
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.remove')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testRemoveAuthorizedKeysError(self, mock_exists, mock_remove):
+ user = 'user'
+ pw_dir = '/home'
+ authorized_keys_file = '/home/.ssh/authorized_keys'
+ pw_entry = accounts_utils.pwd.struct_passwd(
+ ('', '', '', '', '', pw_dir, ''))
+ self.mock_utils._GetUser.return_value = pw_entry
+ mock_exists.return_value = True
+ mock_remove.side_effect = OSError('Test Error')
+
+ accounts_utils.AccountsUtils._RemoveAuthorizedKeys(self.mock_utils, user)
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ mock_exists.assert_called_once_with(authorized_keys_file)
+ mock_remove.assert_called_once_with(authorized_keys_file)
+ self.mock_logger.warning.assert_called_once_with(mock.ANY, user, mock.ANY)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testGetConfiguredUsers(self, mock_exists):
+ mock_open = mock.mock_open()
+ mock_exists.return_value = True
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ mock_open().readlines.return_value = ['a\n', 'b\n', 'c\n', '\n']
+ self.assertEqual(
+ accounts_utils.AccountsUtils.GetConfiguredUsers(self.mock_utils),
+ ['a', 'b', 'c', ''])
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ def testGetConfiguredUsersEmpty(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertEqual(
+ accounts_utils.AccountsUtils.GetConfiguredUsers(self.mock_utils), [])
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.makedirs')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.os.path.exists')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.shutil.copy')
+ @mock.patch('google_compute_engine.accounts.accounts_utils.tempfile.NamedTemporaryFile')
+ def testSetConfiguredUsers(self, mock_tempfile, mock_copy, mock_permissions,
+ mock_exists, mock_makedirs):
+ temp_dest = '/temp/dest'
+ users = ['a', 'b', 'c']
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.__enter__.return_value.name = temp_dest
+ mock_exists.return_value = False
+ self.mock_logger.name = 'test'
+
+ accounts_utils.AccountsUtils.SetConfiguredUsers(self.mock_utils, users)
+
+ expected_calls = [
+ mock.call(mode='w', prefix='test-', delete=True),
+ mock.call.__enter__(),
+ mock.call.__enter__().write('a\n'),
+ mock.call.__enter__().write('b\n'),
+ mock.call.__enter__().write('c\n'),
+ mock.call.__enter__().flush(),
+ mock.call.__exit__(None, None, None),
+ ]
+ self.assertEqual(mock_tempfile.mock_calls, expected_calls)
+ mock_makedirs.assert_called_once_with(self.users_dir)
+ mock_copy.assert_called_once_with(temp_dest, self.users_file)
+ mock_permissions.assert_called_once_with(
+ self.users_file, mode=0o600, uid=0, gid=0)
+
+ def testUpdateUser(self):
+ valid_users = [
+ 'user',
+ '_',
+ '.',
+ '.abc_',
+ '_abc-',
+ 'ABC',
+ 'A_.-',
+ ]
+ groups = ['a', 'b', 'c']
+ keys = ['Key 1', 'Key 2']
+ pw_entry = accounts_utils.pwd.struct_passwd(tuple(['']*7))
+ self.mock_utils.groups = groups
+ self.mock_utils._GetUser.return_value = pw_entry
+ self.mock_utils._AddUser.return_value = True
+ self.mock_utils._UpdateUserGroups.return_value = True
+ for user in valid_users:
+ self.assertTrue(
+ accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, keys))
+ self.mock_utils._UpdateAuthorizedKeys.assert_called_once_with(user, keys)
+ self.mock_utils._UpdateAuthorizedKeys.reset_mock()
+ self.mock_logger.warning.assert_not_called()
+
+ def testUpdateUserInvalidUser(self):
+ self.mock_utils._GetUser = mock.Mock()
+ invalid_users = [
+ '',
+ '!#$%^',
+ '-abc',
+ '#abc',
+ '^abc',
+ 'abc*xyz',
+ 'abc xyz',
+ 'xyz*',
+ 'xyz$',
+ ]
+ for user in invalid_users:
+ self.assertFalse(
+ accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, []))
+ self.mock_logger.warning.assert_called_once_with(mock.ANY, user)
+ self.mock_logger.reset_mock()
+ self.mock_utils._GetUser.assert_not_called()
+
+ def testUpdateUserFailedAddUser(self):
+ self.mock_utils._UpdateUserGroups = mock.Mock()
+ user = 'user'
+ self.mock_utils._GetUser.return_value = False
+ self.mock_utils._AddUser.return_value = False
+
+ self.assertFalse(
+ accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, []))
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ self.mock_utils._AddUser.assert_called_once_with(user)
+ self.mock_utils._UpdateUserGroups.assert_not_called()
+
+ def testUpdateUserFailedUpdateGroups(self):
+ user = 'user'
+ groups = ['a', 'b', 'c']
+ self.mock_utils.groups = groups
+ self.mock_utils._GetUser.return_value = False
+ self.mock_utils._AddUser.return_value = True
+ self.mock_utils._UpdateUserGroups.return_value = False
+
+ self.assertFalse(
+ accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, []))
+ self.mock_utils._GetUser.assert_called_once_with(user)
+ self.mock_utils._AddUser.assert_called_once_with(user)
+ self.mock_utils._UpdateUserGroups.assert_called_once_with(user, groups)
+
+ def testUpdateUserNoLogin(self):
+ self.mock_utils._UpdateAuthorizedKeys = mock.Mock()
+ user = 'user'
+ groups = ['a', 'b', 'c']
+ pw_shell = '/sbin/nologin'
+ pw_entry = accounts_utils.pwd.struct_passwd(
+ ('', '', '', '', '', '', pw_shell))
+ self.mock_utils.groups = groups
+ self.mock_utils._GetUser.return_value = pw_entry
+ self.mock_utils._UpdateUserGroups.return_value = True
+
+ self.assertTrue(
+ accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, []))
+ self.mock_utils._UpdateAuthorizedKeys.assert_not_called()
+
+ def testUpdateUserError(self):
+ user = 'user'
+ groups = ['a', 'b', 'c']
+ keys = ['Key 1', 'Key 2']
+ pw_entry = accounts_utils.pwd.struct_passwd(tuple(['']*7))
+ self.mock_utils.groups = groups
+ self.mock_utils._GetUser.return_value = pw_entry
+ self.mock_utils._AddUser.return_value = True
+ self.mock_utils._UpdateAuthorizedKeys.side_effect = IOError('Test Error')
+
+ self.assertFalse(
+ accounts_utils.AccountsUtils.UpdateUser(self.mock_utils, user, keys))
+ self.mock_logger.warning.assert_called_once_with(mock.ANY, user, mock.ANY)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testRemoveUser(self, mock_call):
+ user = 'user'
+ self.mock_utils.remove = False
+
+ accounts_utils.AccountsUtils.RemoveUser(self.mock_utils, user)
+ self.mock_utils._RemoveAuthorizedKeys.assert_called_once_with(user)
+ mock_call.assert_not_called()
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testRemoveUserForce(self, mock_call):
+ user = 'user'
+ command = ['userdel', '-r', user]
+ self.mock_utils.remove = True
+
+ accounts_utils.AccountsUtils.RemoveUser(self.mock_utils, user)
+ mock_call.assert_called_once_with(command)
+ expected_calls = [mock.call.info(mock.ANY, user)] * 2
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+ self.mock_utils._RemoveAuthorizedKeys.assert_called_once_with(user)
+
+ @mock.patch('google_compute_engine.accounts.accounts_utils.subprocess.check_call')
+ def testRemoveUserError(self, mock_call):
+ user = 'user'
+ command = ['userdel', '-r', user]
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+ self.mock_utils.remove = True
+
+ accounts_utils.AccountsUtils.RemoveUser(self.mock_utils, user)
+ mock_call.assert_called_once_with(command)
+ expected_calls = [
+ mock.call.info(mock.ANY, user),
+ mock.call.warning(mock.ANY, user, mock.ANY),
+ ]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+ self.mock_utils._RemoveAuthorizedKeys.assert_called_once_with(user)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/boto/__init__.py b/google_compute_engine/boto/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google_compute_engine/boto/__init__.py
diff --git a/google_compute_engine/boto/boto_config.py b/google_compute_engine/boto/boto_config.py
new file mode 100644
index 0000000..fe68159
--- /dev/null
+++ b/google_compute_engine/boto/boto_config.py
@@ -0,0 +1,87 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library used to set up the system boto.cfg file.
+
+If a project ID is not provided, this request the project ID from the
+metadata server and install the compute authentication plugin.
+
+Note the config starts with the content in /etc/boto.cfg.template,
+overrides settings, and then persists it into /etc/boto.cfg. This
+is done so that the system boto.cfg can be removed prior to image
+packaging.
+"""
+
+import os
+
+from google_compute_engine import config_manager
+from google_compute_engine import logger
+from google_compute_engine import metadata_watcher
+
+
+class BotoConfig(object):
+ """Creates a boto config file for standalone GSUtil."""
+
+ boto_config = '/etc/boto.cfg'
+ boto_config_template = '/etc/boto.cfg.template'
+ boto_config_script = os.path.abspath(__file__)
+ boto_config_header = (
+ 'This file is automatically created at boot time by the %s script. Do '
+ 'not edit this file directly. If you need to add items to this file, '
+ 'create or edit %s instead and then re-run the script.')
+
+ def __init__(self, project_id=None):
+ """Constructor.
+
+ Args:
+ project_id: string, the project ID to use in the config file.
+ """
+ self.logger = logger.Logger(name='boto-setup')
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+ self._CreateConfig(project_id)
+
+ def _GetNumericProjectId(self):
+ """Get the numeric project ID for this VM.
+
+ Returns:
+ string, the numeric project ID if one is found.
+ """
+ project_id = 'project/numeric-project-id'
+ return self.watcher.GetMetadata(metadata_key=project_id, recursive=False)
+
+ def _CreateConfig(self, project_id):
+ """Create the boto config to support standalone GSUtil.
+
+ Args:
+ project_id: string, the project ID to use in the config file.
+ """
+ project_id = project_id or self._GetNumericProjectId()
+
+ # Our project doesn't support service accounts.
+ if not project_id:
+ return
+
+ self.boto_config_header %= (
+ self.boto_config_script, self.boto_config_template)
+ config = config_manager.ConfigManager(
+ config_file=self.boto_config_template,
+ config_header=self.boto_config_header)
+ boto_dir = os.path.dirname(self.boto_config_script)
+
+ config.SetOption('GSUtil', 'default_project_id', project_id)
+ config.SetOption('GSUtil', 'default_api_version', '2')
+ config.SetOption('GoogleCompute', 'service_account', 'default')
+ config.SetOption('Plugin', 'plugin_directory', boto_dir)
+ config.WriteConfig(config_file=self.boto_config)
diff --git a/google_compute_engine/boto/compute_auth.py b/google_compute_engine/boto/compute_auth.py
new file mode 100644
index 0000000..6956619
--- /dev/null
+++ b/google_compute_engine/boto/compute_auth.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Authentication module for using Google Compute service accounts."""
+
+from boto import auth_handler
+from google_compute_engine import logger
+from google_compute_engine import metadata_watcher
+
+GS_SCOPES = set([
+ 'https://www.googleapis.com/auth/devstorage.read_only',
+ 'https://www.googleapis.com/auth/devstorage.read_write',
+ 'https://www.googleapis.com/auth/devstorage.full_control',
+])
+
+
+class ComputeAuth(auth_handler.AuthHandler):
+ """Google Compute service account auth handler.
+
+ The boto library reads the system config file (/etc/boto.cfg) and looks
+ at a config value called 'plugin_directory'. It then loads the Python
+ files and find classes derived from boto.auth_handler.AuthHandler.
+ """
+
+ capability = ['google-oauth2', 's3']
+
+ def __init__(self, path, config, provider):
+ self.logger = logger.Logger(name='compute-auth')
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+ self.service_account = config.get('GoogleCompute', 'service_account', '')
+ self.scopes = None
+ if provider.name == 'google' and self.service_account:
+ self.scopes = self._GetGsScopes()
+ if not self.scopes:
+ raise auth_handler.NotReadyToAuthenticate()
+
+ def _GetGsScopes(self):
+ """Return all Google Storage scopes available on this VM."""
+ scopes_key = 'instance/service-accounts/%s/scopes' % self.service_account
+ scopes = self.watcher.GetMetadata(metadata_key=scopes_key, recursive=False)
+ return list(GS_SCOPES.intersection(set(scopes))) if scopes else None
+
+ def _GetAccessToken(self):
+ """Return an oauth2 access token for Google Storage."""
+ token_key = 'instance/service-accounts/%s/token' % self.service_account
+ token = self.watcher.GetMetadata(metadata_key=token_key, recursive=False)
+ return token['access_token'] if token else None
+
+ def add_auth(self, http_request):
+ http_request.headers['Authorization'] = 'OAuth %s' % self._GetAccessToken()
diff --git a/google_compute_engine/boto/tests/boto_config_test.py b/google_compute_engine/boto/tests/boto_config_test.py
new file mode 100644
index 0000000..1ee7e52
--- /dev/null
+++ b/google_compute_engine/boto/tests/boto_config_test.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for boto_config.py module."""
+
+from google_compute_engine.boto import boto_config
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class BotoConfigTest(unittest.TestCase):
+
+ def setUp(self):
+ self.project_id = 'project'
+ boto_config.BotoConfig.boto_config = 'config'
+ boto_config.BotoConfig.boto_config_template = 'template'
+ boto_config.BotoConfig.boto_config_script = '/tmp/test.py'
+ boto_config.BotoConfig.boto_config_header = '%s %s'
+
+ @mock.patch('google_compute_engine.boto.boto_config.metadata_watcher')
+ @mock.patch('google_compute_engine.boto.boto_config.logger')
+ @mock.patch('google_compute_engine.boto.boto_config.config_manager')
+ def testCreateConfig(self, mock_config, mock_logger, mock_watcher):
+ mock_config_instance = mock.Mock()
+ mock_config.ConfigManager.return_value = mock_config_instance
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_config.ConfigManager, 'config')
+ mocks.attach_mock(mock_config_instance.SetOption, 'set')
+ mocks.attach_mock(mock_config_instance.WriteConfig, 'write')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+
+ boto_config.BotoConfig(self.project_id)
+ expected_calls = [
+ mock.call.logger.Logger(name=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.config(
+ config_file='template', config_header='/tmp/test.py template'),
+ mock.call.set('GSUtil', 'default_project_id', self.project_id),
+ mock.call.set('GSUtil', 'default_api_version', '2'),
+ mock.call.set('GoogleCompute', 'service_account', 'default'),
+ mock.call.set('Plugin', 'plugin_directory', '/tmp'),
+ mock.call.write(config_file='config'),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.boto.boto_config.metadata_watcher')
+ @mock.patch('google_compute_engine.boto.boto_config.config_manager')
+ def testCreateConfigProjectId(self, mock_config, mock_watcher):
+ mock_config_instance = mock.Mock()
+ mock_config.ConfigManager.return_value = mock_config_instance
+ mock_watcher_instance = mock.Mock()
+ mock_watcher.MetadataWatcher.return_value = mock_watcher_instance
+ mock_watcher_instance.GetMetadata.return_value = self.project_id
+
+ boto_config.BotoConfig()
+ mock_watcher_instance.GetMetadata.assert_called_once_with(
+ metadata_key='project/numeric-project-id', recursive=False)
+ expected_calls = [
+ mock.call('GSUtil', 'default_project_id', self.project_id),
+ ]
+ mock_config_instance.SetOption.assert_has_calls(expected_calls)
+
+ @mock.patch('google_compute_engine.boto.boto_config.metadata_watcher')
+ @mock.patch('google_compute_engine.boto.boto_config.config_manager')
+ def testCreateConfigExit(self, mock_config, mock_watcher):
+ mock_config_instance = mock.Mock()
+ mock_config.ConfigManager.return_value = mock_config_instance
+ mock_watcher_instance = mock.Mock()
+ mock_watcher.MetadataWatcher.return_value = mock_watcher_instance
+ mock_watcher_instance.GetMetadata.return_value = None
+
+ boto_config.BotoConfig()
+ mock_watcher_instance.GetMetadata.assert_called_once_with(
+ metadata_key='project/numeric-project-id', recursive=False)
+ mock_config.SetOption.assert_not_called()
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/boto/tests/compute_auth_test.py b/google_compute_engine/boto/tests/compute_auth_test.py
new file mode 100644
index 0000000..1e5b54a
--- /dev/null
+++ b/google_compute_engine/boto/tests/compute_auth_test.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for compute_auth.py module."""
+
+from google_compute_engine.boto import compute_auth
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class ComputeAuthTest(unittest.TestCase):
+
+ def setUp(self):
+ self.service_account = 'service_account'
+ self.mock_config = mock.Mock()
+ self.mock_config.get.return_value = self.service_account
+ self.mock_provider = mock.Mock()
+ self.mock_provider.name = 'google'
+
+ @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher')
+ @mock.patch('google_compute_engine.boto.compute_auth.logger')
+ def testCreateConfig(self, mock_logger, mock_watcher):
+ scopes = list(compute_auth.GS_SCOPES)[1:2]
+ mock_watcher.GetMetadata.return_value = scopes
+ mock_watcher.MetadataWatcher.return_value = mock_watcher
+ scopes_key = 'instance/service-accounts/%s/scopes' % self.service_account
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(self.mock_config, 'config')
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+
+ mock_compute_auth = compute_auth.ComputeAuth(
+ None, self.mock_config, self.mock_provider)
+ expected_calls = [
+ mock.call.logger.Logger(name=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.config.get('GoogleCompute', 'service_account', ''),
+ mock.call.watcher.GetMetadata(metadata_key=scopes_key, recursive=False),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+ self.assertEqual(mock_compute_auth.scopes, scopes)
+
+ def testCreateConfigException(self):
+ self.mock_config.get.return_value = None
+
+ with self.assertRaises(compute_auth.auth_handler.NotReadyToAuthenticate):
+ compute_auth.ComputeAuth(None, self.mock_config, self.mock_provider)
+
+ @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher')
+ def testGetAccessToken(self, mock_watcher):
+ mock_watcher.MetadataWatcher.return_value = mock_watcher
+ mock_watcher.GetMetadata.side_effect = [
+ list(compute_auth.GS_SCOPES), # The Google Storage scopes.
+ {'access_token': 'token'}, # The access token.
+ {}, # The access token second query.
+ ]
+ mock_compute_auth = compute_auth.ComputeAuth(
+ None, self.mock_config, self.mock_provider)
+ self.assertEqual(mock_compute_auth._GetAccessToken(), 'token')
+ self.assertEqual(mock_compute_auth._GetAccessToken(), None)
+
+ token_key = 'instance/service-accounts/%s/token' % self.service_account
+ expected_calls = [
+ mock.ANY,
+ mock.call(metadata_key=token_key, recursive=False),
+ mock.call(metadata_key=token_key, recursive=False),
+ ]
+ self.assertEqual(mock_watcher.GetMetadata.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.boto.compute_auth.metadata_watcher')
+ def testAddAuth(self, mock_watcher):
+ mock_request = mock.Mock()
+ mock_request.headers = {}
+ mock_watcher.MetadataWatcher.return_value = mock_watcher
+ mock_watcher.GetMetadata.side_effect = [
+ list(compute_auth.GS_SCOPES), # The Google Storage scopes.
+ {'access_token': 'token'}, # The access token.
+ ]
+ mock_compute_auth = compute_auth.ComputeAuth(
+ None, self.mock_config, self.mock_provider)
+ mock_compute_auth.add_auth(mock_request)
+ self.assertEqual(mock_request.headers['Authorization'], 'OAuth token')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/clock_skew/__init__.py b/google_compute_engine/clock_skew/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google_compute_engine/clock_skew/__init__.py
diff --git a/google_compute_engine/clock_skew/clock_skew_daemon.py b/google_compute_engine/clock_skew/clock_skew_daemon.py
new file mode 100755
index 0000000..d228795
--- /dev/null
+++ b/google_compute_engine/clock_skew/clock_skew_daemon.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Manage clock skew after migration on a Google Compute Engine instance."""
+
+import logging.handlers
+import optparse
+import subprocess
+
+from google_compute_engine import config_manager
+from google_compute_engine import file_utils
+from google_compute_engine import logger
+from google_compute_engine import metadata_watcher
+
+LOCKFILE = '/var/lock/google_clock_skew.lock'
+
+
+class ClockSkewDaemon(object):
+ """Responds to drift-token changes."""
+
+ drift_token = 'instance/virtual-clock/drift-token'
+
+ def __init__(self, debug=False):
+ """Constructor.
+
+ Args:
+ debug: bool, True if debug output should write to the console.
+ """
+ facility = logging.handlers.SysLogHandler.LOG_DAEMON
+ self.logger = logger.Logger(
+ name='google-clock-skew', debug=debug, facility=facility)
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+ try:
+ with file_utils.LockFile(LOCKFILE):
+ self.logger.info('Starting Google Clock Skew daemon.')
+ self.watcher.WatchMetadata(
+ self.HandleClockSync, metadata_key=self.drift_token,
+ recursive=False)
+ except (IOError, OSError) as e:
+ self.logger.warning(str(e))
+
+ def HandleClockSync(self, response):
+ """Called when clock drift token changes.
+
+ Args:
+ response: string, the metadata response with the new drift token value.
+ """
+ self.logger.info('Clock drift token has changed: %s.', response)
+ command = ['/sbin/hwclock', '--hctosys']
+ try:
+ subprocess.check_call(command)
+ except subprocess.CalledProcessError:
+ self.logger.warning('Failed to sync system time with hardware clock.')
+ else:
+ self.logger.info('Synced system time with hardware clock.')
+
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option('-d', '--debug', action='store_true', dest='debug',
+ help='print debug output to the console.')
+ (options, _) = parser.parse_args()
+ instance_config = config_manager.ConfigManager()
+ if instance_config.GetOptionBool('Daemons', 'clock_skew_daemon'):
+ ClockSkewDaemon(debug=bool(options.debug))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py b/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py
new file mode 100644
index 0000000..ad18d54
--- /dev/null
+++ b/google_compute_engine/clock_skew/tests/clock_skew_daemon_test.py
@@ -0,0 +1,107 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for clock_skew_daemon.py module."""
+
+import subprocess
+
+from google_compute_engine.clock_skew import clock_skew_daemon
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class ClockSkewDaemonTest(unittest.TestCase):
+
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.metadata_watcher')
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.logger.Logger')
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.file_utils.LockFile')
+ def testClockSkewDaemon(self, mock_lock, mock_logger, mock_watcher):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ metadata_key = clock_skew_daemon.ClockSkewDaemon.drift_token
+ mock_logger.return_value = mock_logger
+ mock_watcher.MetadataWatcher.return_value = mock_watcher
+ with mock.patch.object(
+ clock_skew_daemon.ClockSkewDaemon, 'HandleClockSync') as mock_handle:
+ clock_skew_daemon.ClockSkewDaemon()
+ expected_calls = [
+ mock.call.logger(name=mock.ANY, debug=False, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger),
+ mock.call.lock(clock_skew_daemon.LOCKFILE),
+ mock.call.lock().__enter__(),
+ mock.call.logger.info(mock.ANY),
+ mock.call.watcher.WatchMetadata(
+ mock_handle, metadata_key=metadata_key, recursive=False),
+ mock.call.lock().__exit__(None, None, None),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.metadata_watcher')
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.logger.Logger')
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.file_utils.LockFile')
+ def testClockSkewDaemonError(self, mock_lock, mock_logger, mock_watcher):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mock_lock.side_effect = IOError('Test Error')
+ mock_logger.return_value = mock_logger
+ with mock.patch.object(
+ clock_skew_daemon.ClockSkewDaemon, 'HandleClockSync'):
+ clock_skew_daemon.ClockSkewDaemon(debug=True)
+ expected_calls = [
+ mock.call.logger(name=mock.ANY, debug=True, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger),
+ mock.call.lock(clock_skew_daemon.LOCKFILE),
+ mock.call.logger.warning('Test Error'),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.subprocess.check_call')
+ def testHandleClockSync(self, mock_call):
+ command = ['/sbin/hwclock', '--hctosys']
+ mock_sync = mock.create_autospec(clock_skew_daemon.ClockSkewDaemon)
+ mock_logger = mock.Mock()
+ mock_sync.logger = mock_logger
+
+ clock_skew_daemon.ClockSkewDaemon.HandleClockSync(mock_sync, 'Response')
+ mock_call.assert_called_once_with(command)
+ expected_calls = [
+ mock.call.info(mock.ANY, 'Response'),
+ mock.call.info(mock.ANY),
+ ]
+ self.assertEqual(mock_logger.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.clock_skew.clock_skew_daemon.subprocess.check_call')
+ def testHandleClockSyncError(self, mock_call):
+ command = ['/sbin/hwclock', '--hctosys']
+ mock_sync = mock.create_autospec(clock_skew_daemon.ClockSkewDaemon)
+ mock_logger = mock.Mock()
+ mock_sync.logger = mock_logger
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+
+ clock_skew_daemon.ClockSkewDaemon.HandleClockSync(mock_sync, 'Response')
+ mock_call.assert_called_once_with(command)
+ expected_calls = [
+ mock.call.info(mock.ANY, 'Response'),
+ mock.call.warning(mock.ANY),
+ ]
+ self.assertEqual(mock_logger.mock_calls, expected_calls)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/compat.py b/google_compute_engine/compat.py
new file mode 100644
index 0000000..824724a
--- /dev/null
+++ b/google_compute_engine/compat.py
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module for resolving compatibility issues between Python 2 and Python 3."""
+
+import sys
+
+if sys.version_info >= (3,):
+ # Python 3 imports.
+ import configparser as parser
+ import http.client as httpclient
+ import urllib.error as urlerror
+ import urllib.parse as urlparse
+ import urllib.request as urlrequest
+ import urllib.request as urlretrieve
+else:
+ # Python 2 imports.
+ import ConfigParser as parser
+ import httplib as httpclient
+ import urllib as urlparse
+ import urllib as urlretrieve
+ import urllib2 as urlrequest
+ import urllib2 as urlerror
diff --git a/google_compute_engine/config_manager.py b/google_compute_engine/config_manager.py
new file mode 100644
index 0000000..358a97f
--- /dev/null
+++ b/google_compute_engine/config_manager.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library for retrieving and modifying configuration settings."""
+
+import os
+import textwrap
+
+from google_compute_engine import file_utils
+from google_compute_engine.compat import parser
+
+CONFIG = '/etc/default/instance_configs.cfg'
+
+
+class ConfigManager(object):
+ """Process the configuration defaults."""
+
+ def __init__(self, config_file=None, config_header=None):
+ """Constructor.
+
+ Args:
+ config_file: string, the location of the config file.
+ config_header: string, the message to write at the top of the config.
+ """
+ self.config_file = config_file or CONFIG
+ self.config_header = config_header
+ self.config = parser.SafeConfigParser()
+ self.config.read(self.config_file)
+
+ def _AddHeader(self, fp):
+ """Create a file header in the config.
+
+ Args:
+ fp: int, a file pointer for writing the header.
+ """
+ text = textwrap.wrap(
+ textwrap.dedent(self.config_header), break_on_hyphens=False)
+ fp.write('\n'.join(['# ' + line for line in text]))
+ fp.write('\n\n')
+
+ def GetOptionString(self, section, option):
+ """Get the value of an option in the config file.
+
+ Args:
+ section: string, the section of the config file to check.
+ option: string, the option to retrieve the value of.
+
+ Returns:
+ string, the value of the option or None if the option doesn't exist.
+ """
+ if self.config.has_option(section, option):
+ return self.config.get(section, option)
+ else:
+ return None
+
+ def GetOptionBool(self, section, option):
+ """Get the value of an option in the config file.
+
+ Args:
+ section: string, the section of the config file to check.
+ option: string, the option to retrieve the value of.
+
+ Returns:
+ bool, True if the option is enabled.
+ """
+ return (self.config.has_option(section, option) and
+ self.config.getboolean(section, option))
+
+ def SetOption(self, section, option, value, overwrite=True):
+ """Set the value of an option in the config file.
+
+ Args:
+ section: string, the section of the config file to check.
+ option: string, the option to set the value of.
+ value: string, the value to set the option.
+ overwrite: bool, True to overwrite an existing value in the config file.
+ """
+ if not overwrite and self.config.has_option(section, option):
+ return
+ if not self.config.has_section(section):
+ self.config.add_section(section)
+ self.config.set(section, option, str(value))
+
+ def WriteConfig(self, config_file=None):
+ """Write the config values to a given file.
+
+ Args:
+ config_file: string, the file location of the config file to write.
+ """
+ config_file = config_file or self.config_file
+ config_name = os.path.splitext(os.path.basename(config_file))[0]
+ config_lock = '/var/lock/google_%s.lock' % config_name
+ with file_utils.LockFile(config_lock):
+ with open(config_file, 'w') as config_fp:
+ if self.config_header:
+ self._AddHeader(config_fp)
+ self.config.write(config_fp)
diff --git a/google_compute_engine/file_utils.py b/google_compute_engine/file_utils.py
new file mode 100644
index 0000000..ba6ebb2
--- /dev/null
+++ b/google_compute_engine/file_utils.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library providing file utilities for setting permissions and locking."""
+
+import contextlib
+import errno
+import fcntl
+import os
+import subprocess
+
+
+def _SetSELinuxContext(path):
+ """Set the appropriate SELinux context, if SELinux tools are installed.
+
+ Calls /sbin/restorecon on the provided path to set the SELinux context as
+ specified by policy. This call does not operate recursively.
+
+ Only some OS configurations use SELinux. It is therefore acceptable for
+ restorecon to be missing, in which case we do nothing.
+
+ Args:
+ path: string, the path on which to fix the SELinux context.
+ """
+ restorecon = '/sbin/restorecon'
+ if os.path.isfile(restorecon) and os.access(restorecon, os.X_OK):
+ subprocess.call([restorecon, path])
+
+
+def SetPermissions(path, mode=None, uid=None, gid=None, mkdir=False):
+ """Set the permissions and ownership of a path.
+
+ Args:
+ path: string, the path for which owner ID and group ID needs to be setup.
+ mode: octal string, the permissions to set on the path.
+ uid: int, the owner ID to be set for the path.
+ gid: int, the group ID to be set for the path.
+ mkdir: bool, True if the directory needs to be created.
+ """
+ if mkdir and not os.path.exists(path):
+ os.mkdir(path, mode or 0o777)
+ elif mode:
+ os.chmod(path, mode)
+ if uid and gid:
+ os.chown(path, uid, gid)
+ _SetSELinuxContext(path)
+
+
+def Lock(fd, path, blocking):
+ """Lock the provided file descriptor.
+
+ Args:
+ fd: int, the file descriptor of the file to lock.
+ path: string, the name of the file to lock.
+ blocking: bool, whether the function should return immediately.
+
+ Raises:
+ IOError, raised from flock while attempting to lock a file.
+ """
+ operation = fcntl.LOCK_EX if blocking else fcntl.LOCK_EX | fcntl.LOCK_NB
+ try:
+ fcntl.flock(fd, operation)
+ except IOError as e:
+ if e.errno == errno.EWOULDBLOCK:
+ raise IOError('Exception locking %s. File already locked.' % path)
+ else:
+ raise IOError('Exception locking %s. %s.' % (path, str(e)))
+
+
+def Unlock(fd, path):
+ """Release the lock on the file.
+
+ Args:
+ fd: int, the file descriptor of the file to unlock.
+ path: string, the name of the file to lock.
+
+ Raises:
+ IOError, raised from flock while attempting to release a file lock.
+ """
+ try:
+ fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB)
+ except IOError as e:
+ if e.errno == errno.EWOULDBLOCK:
+ raise IOError('Exception unlocking %s. Locked by another process.' % path)
+ else:
+ raise IOError('Exception unlocking %s. %s.' % (path, str(e)))
+
+
+@contextlib.contextmanager
+def LockFile(path, blocking=False):
+ """Interface to flock-based file locking to prevent concurrent executions.
+
+ Args:
+ path: string, the name of the file to lock.
+ blocking: bool, whether the function should return immediately.
+
+ Yields:
+ None, yields when a lock on the file is obtained.
+
+ Raises:
+ IOError, raised from flock locking operations on a file.
+ OSError, raised from file operations.
+ """
+ fd = os.open(path, os.O_CREAT)
+ try:
+ Lock(fd, path, blocking)
+ yield
+ finally:
+ try:
+ Unlock(fd, path)
+ finally:
+ os.close(fd)
diff --git a/google_compute_engine/instance_setup/__init__.py b/google_compute_engine/instance_setup/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google_compute_engine/instance_setup/__init__.py
diff --git a/google_compute_engine/instance_setup/instance_config.py b/google_compute_engine/instance_setup/instance_config.py
new file mode 100644
index 0000000..2d39283
--- /dev/null
+++ b/google_compute_engine/instance_setup/instance_config.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library used to set up the instance defaults file.
+
+Note that this starts with whatever is in
+/etc/defaults/instance_config.cfg.template and then persists it into
+/etc/defaults/instance_config.cfg. This is done so that the system
+instance_config.cfg can be removed prior to image packaging.
+"""
+
+import os
+
+from google_compute_engine import config_manager
+from google_compute_engine.compat import parser
+
+
+class InstanceConfig(config_manager.ConfigManager):
+ """Creates a defaults config file for instance configuration."""
+
+ instance_config = '/etc/default/instance_configs.cfg'
+ instance_config_template = '/etc/default/instance_configs.cfg.template'
+ instance_config_script = os.path.abspath(__file__)
+ instance_config_header = (
+ 'This file is automatically created at boot time by the %s script. Do '
+ 'not edit this file directly. If you need to add items to this file, '
+ 'create or edit %s instead and then re-run the script.')
+ instance_config_options = {
+ 'Accounts': {
+ 'deprovision_remove': 'false',
+ 'groups': 'adm,dip,lxd,plugdev,video',
+ },
+ 'Daemons': {
+ 'accounts_daemon': 'true',
+ 'clock_skew_daemon': 'true',
+ 'ip_forwarding_daemon': 'true',
+ },
+ 'Instance': {
+ 'instance_id': '0',
+ },
+ 'InstanceSetup': {
+ 'optimize_local_ssd': 'true',
+ 'network_enabled': 'true',
+ 'set_boto_config': 'true',
+ 'set_host_keys': 'true',
+ 'set_multiqueue': 'true',
+ },
+ 'IpForwarding': {
+ 'ethernet_proto_id': '66',
+ },
+ 'MetadataScripts': {
+ 'startup': 'true',
+ 'shutdown': 'true',
+ },
+ }
+
+ def __init__(self):
+ """Constructor.
+
+ Inherit from the ConfigManager class. Read the template for instance
+ defaults and write new sections and options. This prevents package
+ updates from overriding user set defaults.
+ """
+ self.instance_config_header %= (
+ self.instance_config_script, self.instance_config_template)
+ super(InstanceConfig, self).__init__(
+ config_file=self.instance_config_template,
+ config_header=self.instance_config_header)
+
+ if os.path.exists(self.instance_config):
+ config = parser.SafeConfigParser()
+ config.read(self.instance_config)
+ defaults = dict((s, dict(config.items(s))) for s in config.sections())
+ else:
+ defaults = self.instance_config_options
+
+ for section, options in sorted(defaults.items()):
+ for option, value in sorted(options.items()):
+ super(InstanceConfig, self).SetOption(
+ section, option, value, overwrite=False)
+
+ def WriteConfig(self):
+ """Write the config values to the instance defaults file."""
+ super(InstanceConfig, self).WriteConfig(config_file=self.instance_config)
diff --git a/google_compute_engine/instance_setup/instance_setup.py b/google_compute_engine/instance_setup/instance_setup.py
new file mode 100755
index 0000000..7f615b0
--- /dev/null
+++ b/google_compute_engine/instance_setup/instance_setup.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Run initialization code the first time the instance boots."""
+
+import logging.handlers
+import optparse
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+
+from google_compute_engine import file_utils
+from google_compute_engine import logger
+from google_compute_engine import metadata_watcher
+
+from google_compute_engine.boto import boto_config
+from google_compute_engine.instance_setup import instance_config
+
+
+class InstanceSetup(object):
+ """Initialize the instance the first time it boots."""
+
+ def __init__(self, debug=False):
+ """Constructor.
+
+ Args:
+ debug: bool, True if debug output should write to the console.
+ """
+ facility = logging.handlers.SysLogHandler.LOG_DAEMON
+ self.logger = logger.Logger(
+ name='instance-setup', debug=debug, facility=facility)
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+ self.metadata_dict = None
+ self.instance_config = instance_config.InstanceConfig()
+
+ if self.instance_config.GetOptionBool(
+ 'InstanceSetup', 'optimize_local_ssd'):
+ self._RunScript('optimize_local_ssd')
+ if self.instance_config.GetOptionBool('InstanceSetup', 'set_multiqueue'):
+ self._RunScript('set_multiqueue')
+ if self.instance_config.GetOptionBool('InstanceSetup', 'network_enabled'):
+ while not self.metadata_dict:
+ self.metadata_dict = self.watcher.GetMetadata()
+ if self.instance_config.GetOptionBool('InstanceSetup', 'set_host_keys'):
+ self._SetSshHostKeys()
+ if self.instance_config.GetOptionBool('InstanceSetup', 'set_boto_config'):
+ self._SetupBotoConfig()
+ try:
+ self.instance_config.WriteConfig()
+ except (IOError, OSError) as e:
+ self.logger.warning(str(e))
+
+ def _RunScript(self, script):
+ """Run a script and log the streamed script output.
+
+ Args:
+ script: string, the file location of an executable script.
+ """
+ process = subprocess.Popen(
+ script, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
+ while True:
+ for line in iter(process.stdout.readline, b''):
+ self.logger.info(line.decode('utf-8').rstrip('\n'))
+ if process.poll() is not None:
+ break
+
+ def _GetInstanceId(self):
+ """Get the instance ID for this VM.
+
+ Returns:
+ string, the instance ID for the VM.
+ """
+ try:
+ return str(self.metadata_dict['instance']['id'])
+ except KeyError:
+ self.logger.warning('Instance ID was not found in metadata.')
+ return None
+
+ def _GenerateSshKey(self, key_type, key_dest):
+ """Generate a new SSH key.
+
+ Args:
+ key_type: string, the type of the SSH key.
+ key_dest: string, a file location to store the SSH key.
+ """
+ # Create a temporary file to save create the RSA keys.
+ with tempfile.NamedTemporaryFile(prefix=key_type, delete=True) as temp:
+ temp_key = temp.name
+
+ command = ['ssh-keygen', '-t', key_type, '-f', temp_key, '-N', '', '-q']
+ try:
+ self.logger.info('Generating SSH key %s.', key_dest)
+ subprocess.check_call(command)
+ except subprocess.CalledProcessError:
+ self.logger.warning('Could not create SSH key %s.', key_dest)
+ return
+
+ shutil.move(temp_key, key_dest)
+ shutil.move('%s.pub' % temp_key, '%s.pub' % key_dest)
+
+ file_utils.SetPermissions(key_dest, mode=0o600)
+ file_utils.SetPermissions('%s.pub' % key_dest, mode=0o644)
+
+ def _StartSshd(self):
+ """Initialize the SSH daemon."""
+ # Exit as early as possible.
+ # Instance setup systemd scripts block sshd from starting.
+ if os.path.exists('/bin/systemctl'):
+ return
+ elif (os.path.exists('/etc/init.d/ssh') or
+ os.path.exists('/etc/init/ssh.conf')):
+ subprocess.call(['service', 'ssh', 'start'])
+ subprocess.call(['service', 'ssh', 'reload'])
+ elif (os.path.exists('/etc/init.d/sshd') or
+ os.path.exists('/etc/init/sshd.conf')):
+ subprocess.call(['service', 'sshd', 'start'])
+ subprocess.call(['service', 'sshd', 'reload'])
+
+ def _SetSshHostKeys(self):
+ """Regenerates SSH host keys when the VM is restarted with a new IP address.
+
+ Booting a VM from an image with a known SSH key allows a number of attacks.
+ This function will regenerating the host key whenever the IP address
+ changes. This applies the first time the instance is booted, and each time
+ the disk is used to boot a new instance.
+ """
+ section = 'Instance'
+ instance_id = self._GetInstanceId()
+ if instance_id != self.instance_config.GetOptionString(
+ section, 'instance_id'):
+ self.logger.info('Generating SSH host keys for instance %s.', instance_id)
+ file_regex = re.compile(r'ssh_host_(?P<type>[a-z0-9]*)_key\Z')
+ key_dir = '/etc/ssh'
+ key_files = [f for f in os.listdir(key_dir) if file_regex.match(f)]
+ for key_file in key_files:
+ key_type = file_regex.match(key_file).group('type')
+ key_dest = os.path.join(key_dir, key_file)
+ self._GenerateSshKey(key_type, key_dest)
+ self._StartSshd()
+ self.instance_config.SetOption(section, 'instance_id', str(instance_id))
+
+ def _GetNumericProjectId(self):
+ """Get the numeric project ID.
+
+ Returns:
+ string, the numeric project ID.
+ """
+ try:
+ return str(self.metadata_dict['project']['numericProjectId'])
+ except KeyError:
+ self.logger.warning('Numeric project ID was not found in metadata.')
+ return None
+
+ def _SetupBotoConfig(self):
+ """Set the boto config so GSUtil works with provisioned service accounts."""
+ project_id = self._GetNumericProjectId()
+ try:
+ boto_config.BotoConfig(project_id)
+ except (IOError, OSError) as e:
+ self.logger.warning(str(e))
+
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option('-d', '--debug', action='store_true', dest='debug',
+ help='print debug output to the console.')
+ (options, _) = parser.parse_args()
+ InstanceSetup(debug=bool(options.debug))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google_compute_engine/instance_setup/tests/instance_config_test.py b/google_compute_engine/instance_setup/tests/instance_config_test.py
new file mode 100644
index 0000000..6e98f19
--- /dev/null
+++ b/google_compute_engine/instance_setup/tests/instance_config_test.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for instance_config.py module."""
+
+from google_compute_engine.instance_setup import instance_config
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class InstanceConfigTest(unittest.TestCase):
+
+ def setUp(self):
+ instance_config.InstanceConfig.instance_config = 'config'
+ instance_config.InstanceConfig.instance_config_template = 'template'
+ instance_config.InstanceConfig.instance_config_script = '/tmp/test.py'
+ instance_config.InstanceConfig.instance_config_header = '%s %s'
+ instance_config.InstanceConfig.instance_config_options = {
+ 'third': {
+ 'e': '3',
+ 'c': '1',
+ 'd': '2',
+ },
+ 'first': {
+ 'a': 'false',
+ },
+ 'second': {
+ 'b': 'true',
+ },
+ }
+
+ @mock.patch('google_compute_engine.instance_setup.instance_config.os.path.exists')
+ @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.SetOption')
+ @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.__init__')
+ def testInstanceConfig(self, mock_init, mock_set, mock_exists):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_init, 'init')
+ mocks.attach_mock(mock_set, 'set')
+ mocks.attach_mock(mock_exists, 'exists')
+ mock_exists.return_value = False
+
+ instance_config.InstanceConfig()
+ expected_calls = [
+ mock.call.init(
+ config_file='template', config_header='/tmp/test.py template'),
+ mock.call.exists('config'),
+ mock.call.set('first', 'a', 'false', overwrite=False),
+ mock.call.set('second', 'b', 'true', overwrite=False),
+ mock.call.set('third', 'c', '1', overwrite=False),
+ mock.call.set('third', 'd', '2', overwrite=False),
+ mock.call.set('third', 'e', '3', overwrite=False),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_config.os.path.exists')
+ @mock.patch('google_compute_engine.instance_setup.instance_config.parser')
+ @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.SetOption')
+ @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.__init__')
+ def testInstanceConfigExists(self, mock_init, mock_set, mock_parser, mock_exists):
+ mock_config = mock.create_autospec(instance_config.parser.SafeConfigParser)
+ mock_config.read = mock.Mock()
+ mock_config.sections = mock.Mock()
+ mock_config.sections.return_value = ['a', 'b']
+ mock_config.items = lambda key: {'key: %s' % key: 'value: %s' % key}
+ mock_parser.SafeConfigParser.return_value = mock_config
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_init, 'init')
+ mocks.attach_mock(mock_set, 'set')
+ mocks.attach_mock(mock_parser, 'parser')
+ mocks.attach_mock(mock_exists, 'exists')
+ mock_exists.return_value = True
+
+ instance_config.InstanceConfig()
+ expected_calls = [
+ mock.call.init(
+ config_file='template', config_header='/tmp/test.py template'),
+ mock.call.exists('config'),
+ mock.call.parser.SafeConfigParser(),
+ mock.call.parser.SafeConfigParser().read('config'),
+ mock.call.parser.SafeConfigParser().sections(),
+ mock.call.set('a', 'key: a', 'value: a', overwrite=False),
+ mock.call.set('b', 'key: b', 'value: b', overwrite=False),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_config.config_manager.ConfigManager.WriteConfig')
+ def testWriteConfig(self, mock_write):
+ mock_config = instance_config.InstanceConfig()
+ instance_config.InstanceConfig.WriteConfig(mock_config)
+ mock_write.assert_called_once_with(config_file='config')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/instance_setup/tests/instance_setup_test.py b/google_compute_engine/instance_setup/tests/instance_setup_test.py
new file mode 100644
index 0000000..c4469da
--- /dev/null
+++ b/google_compute_engine/instance_setup/tests/instance_setup_test.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for instance_setup.py module."""
+
+import subprocess
+
+from google_compute_engine.instance_setup import instance_setup
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class InstanceSetupTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_instance_config = mock.Mock()
+ self.mock_logger = mock.Mock()
+ self.mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
+ self.mock_setup.instance_config = self.mock_instance_config
+ self.mock_setup.logger = self.mock_logger
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
+ def testInstanceSetup(self, mock_logger, mock_watcher, mock_config):
+ mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mocks.attach_mock(mock_config, 'config')
+ mocks.attach_mock(mock_setup, 'setup')
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mock_watcher_instance = mock.Mock()
+ mock_watcher_instance.GetMetadata.side_effect = [{}, {'hello': 'world'}]
+ mock_watcher.MetadataWatcher.return_value = mock_watcher_instance
+ mock_config_instance = mock.Mock()
+ mock_config_instance.GetOptionBool.return_value = True
+ mock_config.InstanceConfig.return_value = mock_config_instance
+
+ instance_setup.InstanceSetup.__init__(mock_setup)
+ expected_calls = [
+ # Setup and reading the configuration file.
+ mock.call.logger.Logger(
+ name=mock.ANY, debug=False, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.config.InstanceConfig(),
+ # Setup for local SSD.
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'optimize_local_ssd'),
+ mock.call.setup._RunScript('optimize_local_ssd'),
+ # Setup for multiqueue virtio driver.
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'set_multiqueue'),
+ mock.call.setup._RunScript('set_multiqueue'),
+ # Check network access for reaching the metadata server.
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'network_enabled'),
+ # Retry metadata requests until network is available.
+ mock.call.watcher.MetadataWatcher().GetMetadata(),
+ mock.call.watcher.MetadataWatcher().GetMetadata(),
+ # Setup for SSH host keys if necessary.
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'set_host_keys'),
+ mock.call.setup._SetSshHostKeys(),
+ # Setup for the boto config if necessary.
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'set_boto_config'),
+ mock.call.setup._SetupBotoConfig(),
+ # Write the updated config file.
+ mock.call.config.InstanceConfig().WriteConfig(),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+ self.assertEqual(mock_setup.metadata_dict, {'hello': 'world'})
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.instance_config')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.metadata_watcher')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.logger')
+ def testInstanceSetupException(self, mock_logger, mock_watcher, mock_config):
+ mock_setup = mock.create_autospec(instance_setup.InstanceSetup)
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mocks.attach_mock(mock_config, 'config')
+ mocks.attach_mock(mock_setup, 'setup')
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mock_config_instance = mock.Mock()
+ mock_config_instance.GetOptionBool.return_value = False
+ mock_config_instance.WriteConfig.side_effect = IOError('Test Error')
+ mock_config.InstanceConfig.return_value = mock_config_instance
+
+ instance_setup.InstanceSetup.__init__(mock_setup)
+ expected_calls = [
+ mock.call.logger.Logger(
+ name=mock.ANY, debug=False, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.config.InstanceConfig(),
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'optimize_local_ssd'),
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'set_multiqueue'),
+ mock.call.config.InstanceConfig().GetOptionBool(
+ 'InstanceSetup', 'network_enabled'),
+ mock.call.config.InstanceConfig().WriteConfig(),
+ mock.call.logger.Logger().warning('Test Error'),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+ self.assertIsNone(mock_setup.metadata_dict)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess')
+ def testRunScript(self, mock_subprocess):
+ mock_readline = mock.Mock()
+ mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')]
+ mock_stdout = mock.Mock()
+ mock_stdout.readline = mock_readline
+ mock_process = mock.Mock()
+ mock_process.poll.return_value = 0
+ mock_process.stdout = mock_stdout
+ mock_subprocess.Popen.return_value = mock_process
+ script = '/tmp/script.py'
+
+ instance_setup.InstanceSetup._RunScript(self.mock_setup, script)
+ expected_calls = [mock.call('a'), mock.call('b')]
+ self.assertEqual(self.mock_logger.info.mock_calls, expected_calls)
+ mock_subprocess.Popen.assert_called_once_with(
+ script, shell=True, stderr=mock_subprocess.STDOUT,
+ stdout=mock_subprocess.PIPE)
+ mock_process.poll.assert_called_once_with()
+
+ def testGetInstanceId(self):
+ self.mock_setup.metadata_dict = {'instance': {'attributes': {}, 'id': 123}}
+ self.assertEqual(
+ instance_setup.InstanceSetup._GetInstanceId(self.mock_setup), '123')
+ self.mock_logger.warning.assert_not_called()
+
+ def testGetInstanceIdNotFound(self):
+ self.mock_setup.metadata_dict = {'instance': {'attributes': {}}}
+ self.assertIsNone(
+ instance_setup.InstanceSetup._GetInstanceId(self.mock_setup))
+ self.assertEqual(self.mock_logger.warning.call_count, 1)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.file_utils.SetPermissions')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.shutil.move')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.tempfile.NamedTemporaryFile')
+ def testGenerateSshKey(self, mock_tempfile, mock_call, mock_move,
+ mock_permissions):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_tempfile, 'tempfile')
+ mocks.attach_mock(mock_call, 'call')
+ mocks.attach_mock(mock_move, 'move')
+ mocks.attach_mock(mock_permissions, 'permissions')
+ mocks.attach_mock(self.mock_logger, 'logger')
+ key_type = 'key-type'
+ key_dest = '/key/dest'
+ temp_dest = '/tmp/dest'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.__enter__.return_value.name = temp_dest
+
+ instance_setup.InstanceSetup._GenerateSshKey(
+ self.mock_setup, key_type, key_dest)
+ expected_calls = [
+ mock.call.tempfile(prefix=key_type, delete=True),
+ mock.call.tempfile.__enter__(),
+ mock.call.tempfile.__exit__(None, None, None),
+ mock.call.logger.info(mock.ANY, key_dest),
+ mock.call.call(
+ ['ssh-keygen', '-t', key_type, '-f', temp_dest, '-N', '', '-q']),
+ mock.call.move(temp_dest, key_dest),
+ mock.call.move('%s.pub' % temp_dest, '%s.pub' % key_dest),
+ mock.call.permissions(key_dest, mode=0o600),
+ mock.call.permissions('%s.pub' % key_dest, mode=0o644),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.check_call')
+ def testGenerateSshKeyProcessError(self, mock_call):
+ key_type = 'key-type'
+ key_dest = '/key/dest'
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+
+ instance_setup.InstanceSetup._GenerateSshKey(
+ self.mock_setup, key_type, key_dest)
+ self.mock_logger.info.assert_called_once_with(mock.ANY, key_dest)
+ self.mock_logger.warning.assert_called_once_with(mock.ANY, key_dest)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
+ def testStartSshdSysVinit(self, mock_exists, mock_call):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_call, 'call')
+ mock_exists.side_effect = [False, False, True]
+
+ instance_setup.InstanceSetup._StartSshd(self.mock_setup)
+ expected_calls = [
+ mock.call.exists('/bin/systemctl'),
+ mock.call.exists('/etc/init.d/ssh'),
+ mock.call.exists('/etc/init/ssh.conf'),
+ mock.call.call(['service', 'ssh', 'start']),
+ mock.call.call(['service', 'ssh', 'reload']),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
+ def testStartSshdUpstart(self, mock_exists, mock_call):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_call, 'call')
+ mock_exists.side_effect = [False, False, False, False, True]
+
+ instance_setup.InstanceSetup._StartSshd(self.mock_setup)
+ expected_calls = [
+ mock.call.exists('/bin/systemctl'),
+ mock.call.exists('/etc/init.d/ssh'),
+ mock.call.exists('/etc/init/ssh.conf'),
+ mock.call.exists('/etc/init.d/sshd'),
+ mock.call.exists('/etc/init/sshd.conf'),
+ mock.call.call(['service', 'sshd', 'start']),
+ mock.call.call(['service', 'sshd', 'reload']),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.subprocess.call')
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.os.path.exists')
+ def testStartSshdSystemd(self, mock_exists, mock_call):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_call, 'call')
+ mock_exists.return_value = True
+
+ instance_setup.InstanceSetup._StartSshd(self.mock_setup)
+ expected_calls = [mock.call.exists('/bin/systemctl')]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ def testSetSshHostKeys(self):
+ self.mock_instance_config.GetOptionString.return_value = '123'
+ mock_instance_id = mock.Mock()
+ mock_instance_id.return_value = '123'
+ self.mock_setup._GetInstanceId = mock_instance_id
+
+ instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup)
+ self.mock_instance_config.SetOption.assert_not_called()
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.os.listdir')
+ def testSetSshHostKeysFirstBoot(self, mock_listdir):
+ self.mock_instance_config.GetOptionString.return_value = None
+ mock_instance_id = mock.Mock()
+ mock_instance_id.return_value = '123'
+ self.mock_setup._GetInstanceId = mock_instance_id
+ mock_generate_key = mock.Mock()
+ self.mock_setup._GenerateSshKey = mock_generate_key
+ mock_listdir.return_value = [
+ 'ssh_config',
+ 'ssh_host_rsa_key',
+ 'ssh_host_dsa_key.pub',
+ 'ssh_host_ed25519_key',
+ 'ssh_host_ed25519_key.pub',
+ 'ssh_host_rsa_key',
+ 'ssh_host_rsa_key.pub',
+ ]
+
+ instance_setup.InstanceSetup._SetSshHostKeys(self.mock_setup)
+ expected_calls = [
+ mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'),
+ mock.call('ed25519', '/etc/ssh/ssh_host_ed25519_key'),
+ mock.call('rsa', '/etc/ssh/ssh_host_rsa_key'),
+ ]
+ self.assertEqual(mock_generate_key.mock_calls, expected_calls)
+ self.mock_instance_config.SetOption.assert_called_once_with(
+ 'Instance', 'instance_id', '123')
+
+ def testGetNumericProjectId(self):
+ self.mock_setup.metadata_dict = {
+ 'project': {
+ 'attributes': {},
+ 'numericProjectId': 123,
+ }
+ }
+ self.assertEqual(
+ instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup),
+ '123')
+ self.mock_logger.warning.assert_not_called()
+
+ def testGetNumericProjectIdNotFound(self):
+ self.mock_setup.metadata_dict = {'project': {'attributes': {}}}
+ self.assertIsNone(
+ instance_setup.InstanceSetup._GetNumericProjectId(self.mock_setup))
+ self.assertEqual(self.mock_logger.warning.call_count, 1)
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig')
+ def testSetupBotoConfig(self, mock_boto):
+ mock_project_id = mock.Mock()
+ mock_project_id.return_value = '123'
+ self.mock_setup._GetNumericProjectId = mock_project_id
+ instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup)
+ mock_boto.assert_called_once_with('123')
+
+ @mock.patch('google_compute_engine.instance_setup.instance_setup.boto_config.BotoConfig')
+ def testSetupBotoConfigLocked(self, mock_boto):
+ mock_boto.side_effect = IOError('Test Error')
+ instance_setup.InstanceSetup._SetupBotoConfig(self.mock_setup)
+ self.mock_logger.warning.assert_called_once_with('Test Error')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/ip_forwarding/__init__.py b/google_compute_engine/ip_forwarding/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google_compute_engine/ip_forwarding/__init__.py
diff --git a/google_compute_engine/ip_forwarding/ip_forwarding_daemon.py b/google_compute_engine/ip_forwarding/ip_forwarding_daemon.py
new file mode 100755
index 0000000..e1abe4a
--- /dev/null
+++ b/google_compute_engine/ip_forwarding/ip_forwarding_daemon.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Manage IP forwarding on a Google Compute Engine instance.
+
+Fetch a list of public endpoint IPs from the metadata server, compare it with
+the IPs configured on eth0, and add or remove addresses from eth0 to make them
+match. Only remove those which match our proto code.
+
+Command used to add IPs:
+ ip route add to local $IP/32 dev eth0 proto 66
+Command used to fetch list of configured IPs:
+ ip route ls table local type local dev eth0 scope host proto 66
+"""
+
+import logging.handlers
+import optparse
+
+from google_compute_engine import config_manager
+from google_compute_engine import file_utils
+from google_compute_engine import logger
+from google_compute_engine import metadata_watcher
+
+from google_compute_engine.ip_forwarding import ip_forwarding_utils
+
+LOCKFILE = '/var/lock/google_ip_forwarding.lock'
+
+
+class IpForwardingDaemon(object):
+ """Manage IP forwarding based on changes to forwarded IPs metadata."""
+
+ forwarded_ips = 'instance/network-interfaces/0/forwarded-ips'
+
+ def __init__(self, proto_id=None, debug=False):
+ """Constructor.
+
+ Args:
+ proto_id: string, the routing protocol identifier for Google IP changes.
+ debug: bool, True if debug output should write to the console.
+ """
+ facility = logging.handlers.SysLogHandler.LOG_DAEMON
+ self.logger = logger.Logger(
+ name='google-ip-forwarding', debug=debug, facility=facility)
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+ self.utils = ip_forwarding_utils.IpForwardingUtils(
+ logger=self.logger, proto_id=proto_id)
+ try:
+ with file_utils.LockFile(LOCKFILE):
+ self.logger.info('Starting Google IP Forwarding daemon.')
+ self.watcher.WatchMetadata(
+ self.HandleForwardedIps, metadata_key=self.forwarded_ips,
+ recursive=True)
+ except (IOError, OSError) as e:
+ self.logger.warning(str(e))
+
+ def _LogForwardedIpChanges(self, configured, desired, to_add, to_remove):
+ """Log the planned IP address changes.
+
+ Args:
+ configured: list, the IP address strings already configured.
+ desired: list, the IP address strings that will be configured.
+ to_add: list, the forwarded IP address strings to configure.
+ to_remove: list, the forwarded IP address strings to delete.
+ """
+ if not to_add and not to_remove:
+ return
+ self.logger.info(
+ 'Changing forwarded IPs from %s to %s by adding %s and removing %s.',
+ configured or None, desired or None, to_add or None, to_remove or None)
+
+ def _AddForwardedIps(self, forwarded_ips):
+ """Configure the forwarded IP address on the network interface.
+
+ Args:
+ forwarded_ips: list, the forwarded IP address strings to configure.
+ """
+ for address in forwarded_ips:
+ self.utils.AddForwardedIp(address)
+
+ def _RemoveForwardedIps(self, forwarded_ips):
+ """Remove the forwarded IP addresses from the network interface.
+
+ Args:
+ forwarded_ips: list, the forwarded IP address strings to delete.
+ """
+ for address in forwarded_ips:
+ self.utils.RemoveForwardedIp(address)
+
+ def HandleForwardedIps(self, result):
+ """Called when forwarded IPs metadata changes.
+
+ Args:
+ result: string, the metadata response with the new forwarded IP addresses.
+ """
+ desired = self.utils.ParseForwardedIps(result)
+ configured = self.utils.GetForwardedIps()
+ to_add = sorted(set(desired) - set(configured))
+ to_remove = sorted(set(configured) - set(desired))
+ self._LogForwardedIpChanges(configured, desired, to_add, to_remove)
+ self._AddForwardedIps(to_add)
+ self._RemoveForwardedIps(to_remove)
+
+
+def main():
+ parser = optparse.OptionParser()
+ parser.add_option('-d', '--debug', action='store_true', dest='debug',
+ help='print debug output to the console.')
+ (options, _) = parser.parse_args()
+ instance_config = config_manager.ConfigManager()
+ if instance_config.GetOptionBool('Daemons', 'ip_forwarding_daemon'):
+ IpForwardingDaemon(
+ proto_id=instance_config.GetOptionString(
+ 'IpForwarding', 'ethernet_proto_id'),
+ debug=bool(options.debug))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google_compute_engine/ip_forwarding/ip_forwarding_utils.py b/google_compute_engine/ip_forwarding/ip_forwarding_utils.py
new file mode 100644
index 0000000..dee62f1
--- /dev/null
+++ b/google_compute_engine/ip_forwarding/ip_forwarding_utils.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for configuring IP address forwarding."""
+
+import re
+import subprocess
+
+IP_REGEX = re.compile(r'\A(\d{1,3}\.){3}\d{1,3}\Z')
+
+
+class IpForwardingUtils(object):
+ """System IP address configuration utilities."""
+
+ def __init__(self, logger, proto_id=None):
+ """Constructor.
+
+ Args:
+ logger: logger object, used to write to SysLog and serial port.
+ proto_id: string, the routing protocol identifier for Google IP changes.
+ """
+ self.logger = logger
+ self.options = {
+ 'dev': self._GetDefaultInterface(),
+ 'proto': proto_id or '66',
+ 'scope': 'host',
+ }
+
+ def _RunIpRoute(self, args=None, options=None):
+ """Run a command with IP route and return the response.
+
+ Args:
+ args: list, the string ip route command args to execute.
+ options: dict, the string parameters to append to the ip route command.
+
+ Returns:
+ string, the standard output from the ip route command execution.
+ """
+ args = args or []
+ options = options or {}
+ command = ['ip', 'route']
+ command.extend(args)
+ for item in options.items():
+ command.extend(item)
+ try:
+ process = subprocess.Popen(
+ command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ except OSError as e:
+ self.logger.warning('Exception running %s. %s.', command, str(e))
+ else:
+ if process.returncode:
+ message = 'Non-zero exit status running %s. %s.'
+ self.logger.warning(message, command, stderr.strip())
+ else:
+ return stdout
+ return ''
+
+ def _GetDefaultInterface(self):
+ """Get the name of the default network interface.
+
+ Returns:
+ string, the name of the default network interface.
+ """
+ result = self._RunIpRoute(args=['list'])
+ for route in result.decode('utf-8').split('\n'):
+ fields = route.split()
+ if fields and fields[0] == 'default' and 'dev' in fields:
+ index = fields.index('dev') + 1
+ return fields[index] if index < len(fields) else 'eth0'
+ return 'eth0'
+
+ def ParseForwardedIps(self, forwarded_ips):
+ """Parse and validate forwarded IP addresses.
+
+ Args:
+ forwarded_ips: list, the IP address strings to parse.
+
+ Returns:
+ list, the valid IP address strings.
+ """
+ addresses = []
+ forwarded_ips = forwarded_ips or []
+ for ip in forwarded_ips:
+ if ip and IP_REGEX.match(ip):
+ addresses.append(ip)
+ else:
+ self.logger.warning('Could not parse IP address: "%s".', ip)
+ return addresses
+
+ def GetForwardedIps(self):
+ """Retrieve the list of configured forwarded IP addresses.
+
+ Returns:
+ list, the IP address strings.
+ """
+ args = ['ls', 'table', 'local', 'type', 'local']
+ result = self._RunIpRoute(args=args, options=self.options)
+ return self.ParseForwardedIps(result.split())
+
+ def AddForwardedIp(self, address):
+ """Configure a new IP address on the network interface.
+
+ Args:
+ address: string, the IP address to configure.
+ """
+ args = ['add', 'to', 'local', '%s/32' % address]
+ self._RunIpRoute(args=args, options=self.options)
+
+ def RemoveForwardedIp(self, address):
+ """Delete an IP address on the network interface.
+
+ Args:
+ address: string, the IP address to configure.
+ """
+ args = ['delete', 'to', 'local', '%s/32' % address]
+ self._RunIpRoute(args=args, options=self.options)
diff --git a/google_compute_engine/ip_forwarding/tests/ip_forwarding_daemon_test.py b/google_compute_engine/ip_forwarding/tests/ip_forwarding_daemon_test.py
new file mode 100644
index 0000000..769eebf
--- /dev/null
+++ b/google_compute_engine/ip_forwarding/tests/ip_forwarding_daemon_test.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for ip_forwarding_daemon.py module."""
+
+from google_compute_engine.ip_forwarding import ip_forwarding_daemon
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class IpForwardingDaemonTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_logger = mock.Mock()
+ self.mock_watcher = mock.Mock()
+ self.mock_utils = mock.Mock()
+
+ self.mock_setup = mock.create_autospec(
+ ip_forwarding_daemon.IpForwardingDaemon)
+ self.mock_setup.logger = self.mock_logger
+ self.mock_setup.watcher = self.mock_watcher
+ self.mock_setup.utils = self.mock_utils
+
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.ip_forwarding_utils')
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.metadata_watcher')
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.logger')
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.file_utils')
+ def testIpForwardingDaemon(self, mock_lock, mock_logger, mock_watcher,
+ mock_utils):
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_utils, 'utils')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ metadata_key = ip_forwarding_daemon.IpForwardingDaemon.forwarded_ips
+ with mock.patch.object(
+ ip_forwarding_daemon.IpForwardingDaemon,
+ 'HandleForwardedIps') as mock_handle:
+ ip_forwarding_daemon.IpForwardingDaemon(proto_id='66', debug=True)
+ expected_calls = [
+ mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.utils.IpForwardingUtils(
+ logger=mock_logger_instance, proto_id='66'),
+ mock.call.lock.LockFile(ip_forwarding_daemon.LOCKFILE),
+ mock.call.lock.LockFile().__enter__(),
+ mock.call.logger.Logger().info(mock.ANY),
+ mock.call.watcher.MetadataWatcher().WatchMetadata(
+ mock_handle, metadata_key=metadata_key, recursive=True),
+ mock.call.lock.LockFile().__exit__(None, None, None),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.ip_forwarding_utils')
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.metadata_watcher')
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.logger')
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_daemon.file_utils')
+ def testIpForwardingDaemonError(self, mock_lock, mock_logger, mock_watcher,
+ mock_utils):
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_utils, 'utils')
+ mocks.attach_mock(mock_watcher, 'watcher')
+ mock_lock.LockFile.side_effect = IOError('Test Error')
+ with mock.patch.object(
+ ip_forwarding_daemon.IpForwardingDaemon, 'HandleForwardedIps'):
+ ip_forwarding_daemon.IpForwardingDaemon()
+ expected_calls = [
+ mock.call.logger.Logger(
+ name=mock.ANY, debug=False, facility=mock.ANY),
+ mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
+ mock.call.utils.IpForwardingUtils(
+ logger=mock_logger_instance, proto_id=None),
+ mock.call.lock.LockFile(ip_forwarding_daemon.LOCKFILE),
+ mock.call.logger.Logger().warning('Test Error'),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ def testLogForwardedIpChanges(self):
+ ip_forwarding_daemon.IpForwardingDaemon._LogForwardedIpChanges(
+ self.mock_setup, [], [], [], [])
+ ip_forwarding_daemon.IpForwardingDaemon._LogForwardedIpChanges(
+ self.mock_setup, ['a'], ['a'], [], [])
+ ip_forwarding_daemon.IpForwardingDaemon._LogForwardedIpChanges(
+ self.mock_setup, ['a'], [], [], ['a'])
+ ip_forwarding_daemon.IpForwardingDaemon._LogForwardedIpChanges(
+ self.mock_setup, ['a', 'b'], ['b'], [], ['a'])
+ ip_forwarding_daemon.IpForwardingDaemon._LogForwardedIpChanges(
+ self.mock_setup, ['a'], ['b'], ['b'], ['a'])
+ expected_calls = [
+ mock.call.info(mock.ANY, ['a'], None, None, ['a']),
+ mock.call.info(mock.ANY, ['a', 'b'], ['b'], None, ['a']),
+ mock.call.info(mock.ANY, ['a'], ['b'], ['b'], ['a']),
+ ]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ def testAddForwardedIp(self):
+ ip_forwarding_daemon.IpForwardingDaemon._AddForwardedIps(
+ self.mock_setup, [])
+ self.assertEqual(self.mock_utils.mock_calls, [])
+
+ ip_forwarding_daemon.IpForwardingDaemon._AddForwardedIps(
+ self.mock_setup, ['a', 'b', 'c'])
+ expected_calls = [
+ mock.call.AddForwardedIp('a'),
+ mock.call.AddForwardedIp('b'),
+ mock.call.AddForwardedIp('c'),
+ ]
+ self.assertEqual(self.mock_utils.mock_calls, expected_calls)
+
+ def testRemoveForwardedIp(self):
+ ip_forwarding_daemon.IpForwardingDaemon._RemoveForwardedIps(
+ self.mock_setup, [])
+ self.assertEqual(self.mock_utils.mock_calls, [])
+
+ ip_forwarding_daemon.IpForwardingDaemon._RemoveForwardedIps(
+ self.mock_setup, ['a', 'b', 'c'])
+ expected_calls = [
+ mock.call.RemoveForwardedIp('a'),
+ mock.call.RemoveForwardedIp('b'),
+ mock.call.RemoveForwardedIp('c'),
+ ]
+ self.assertEqual(self.mock_utils.mock_calls, expected_calls)
+
+ def testHandleForwardedIps(self):
+ configured = ['c', 'c', 'b', 'b', 'a', 'a']
+ desired = ['d', 'd', 'c']
+ mocks = mock.Mock()
+ mocks.attach_mock(self.mock_utils, 'utils')
+ mocks.attach_mock(self.mock_setup, 'setup')
+ self.mock_utils.ParseForwardedIps.return_value = desired
+ self.mock_utils.GetForwardedIps.return_value = configured
+ result = 'result'
+ expected_add = ['d']
+ expected_remove = ['a', 'b']
+
+ ip_forwarding_daemon.IpForwardingDaemon.HandleForwardedIps(
+ self.mock_setup, result)
+ expected_calls = [
+ mock.call.utils.ParseForwardedIps(result),
+ mock.call.utils.GetForwardedIps(),
+ mock.call.setup._LogForwardedIpChanges(
+ configured, desired, expected_add, expected_remove),
+ mock.call.setup._AddForwardedIps(expected_add),
+ mock.call.setup._RemoveForwardedIps(expected_remove),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/ip_forwarding/tests/ip_forwarding_utils_test.py b/google_compute_engine/ip_forwarding/tests/ip_forwarding_utils_test.py
new file mode 100644
index 0000000..0d32939
--- /dev/null
+++ b/google_compute_engine/ip_forwarding/tests/ip_forwarding_utils_test.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for ip_forwarding_utils.py module."""
+
+from google_compute_engine.ip_forwarding import ip_forwarding_utils
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class IpForwardingUtilsTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_logger = mock.Mock()
+ self.options = {'hello': 'world'}
+ self.mock_utils = ip_forwarding_utils.IpForwardingUtils(self.mock_logger)
+ self.mock_utils.options = self.options
+
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_utils.subprocess')
+ def testRunIpRoute(self, mock_subprocess):
+ mock_process = mock.Mock()
+ mock_process.returncode = 0
+ mock_process.communicate.return_value = ('out', '')
+ mock_subprocess.Popen.return_value = mock_process
+ args = ['foo', 'bar']
+ options = {'one': 'two'}
+
+ self.assertEqual(
+ self.mock_utils._RunIpRoute(args=args, options=options), 'out')
+ command = ['ip', 'route', 'foo', 'bar', 'one', 'two']
+ mock_subprocess.Popen.assert_called_once_with(
+ command, stdout=mock_subprocess.PIPE, stderr=mock_subprocess.PIPE)
+ mock_process.communicate.assert_called_once_with()
+ self.mock_logger.warning.assert_not_called()
+
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_utils.subprocess')
+ def testRunIpRouteReturnCode(self, mock_subprocess):
+ mock_process = mock.Mock()
+ mock_process.returncode = 1
+ mock_process.communicate.return_value = ('out', 'error\n')
+ mock_subprocess.Popen.return_value = mock_process
+
+ self.assertEqual(
+ self.mock_utils._RunIpRoute(args=['foo', 'bar'], options=self.options),
+ '')
+ command = ['ip', 'route', 'foo', 'bar', 'hello', 'world']
+ self.mock_logger.warning.assert_called_once_with(mock.ANY, command, 'error')
+
+ @mock.patch('google_compute_engine.ip_forwarding.ip_forwarding_utils.subprocess')
+ def testRunIpRouteException(self, mock_subprocess):
+ mock_subprocess.Popen.side_effect = OSError('Test Error')
+
+ self.assertEqual(
+ self.mock_utils._RunIpRoute(args=['foo', 'bar'], options=self.options),
+ '')
+ command = ['ip', 'route', 'foo', 'bar', 'hello', 'world']
+ self.mock_logger.warning.assert_called_once_with(
+ mock.ANY, command, 'Test Error')
+
+ def testGetDefaultInterface(self):
+ mock_run = mock.Mock()
+ mock_run.side_effect = [
+ bytes(b''),
+ bytes(b'invalid route\n'),
+ bytes(b'default invalid interface\n'),
+ bytes(b'default dev\n'),
+ bytes(b'\n\n\ndefault dev interface\n\n\n'),
+ bytes(b'default via ip dev interface\nip default eth0\n'),
+ bytes(b'ip default eth0\ndefault via ip dev interface\n'),
+ ]
+ self.mock_utils._RunIpRoute = mock_run
+
+ # Invalid routes default to 'eth0'.
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'eth0')
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'eth0')
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'eth0')
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'eth0')
+
+ # Valid routes where the expected response is 'interface'.
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'interface')
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'interface')
+ self.assertEqual(self.mock_utils._GetDefaultInterface(), 'interface')
+
+ def testParseForwardedIps(self):
+ self.assertEqual(self.mock_utils.ParseForwardedIps(None), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps([]), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps([None]), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps(['invalid']), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps(['1a1a1a1']), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps(['1.1.1.1.1']), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps(['1111.1.1.1']), [])
+ self.assertEqual(self.mock_utils.ParseForwardedIps(['1.1.1.1111']), [])
+ expected_calls = [
+ mock.call.warning(mock.ANY, None),
+ mock.call.warning(mock.ANY, 'invalid'),
+ mock.call.warning(mock.ANY, '1a1a1a1'),
+ mock.call.warning(mock.ANY, '1.1.1.1.1'),
+ mock.call.warning(mock.ANY, '1111.1.1.1'),
+ mock.call.warning(mock.ANY, '1.1.1.1111'),
+ ]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ def testParseForwardedIpsComplex(self):
+ forwarded_ips = {
+ '{{}}\n\"hello\"\n!@#$%^&*()\n\n': False,
+ '1111.1.1.1': False,
+ '1.1.1.1': True,
+ 'hello': False,
+ '123.123.123.123': True,
+ '1.1.1.': False,
+ '1.1.1.a': False,
+ None: False,
+ '1.0.0.0': True,
+ }
+ input_ips = forwarded_ips.keys()
+ valid_ips = [ip for ip, valid in forwarded_ips.items() if valid]
+ invalid_ips = [ip for ip, valid in forwarded_ips.items() if not valid]
+
+ self.assertEqual(self.mock_utils.ParseForwardedIps(input_ips), valid_ips)
+ expected_calls = [mock.call.warning(mock.ANY, ip) for ip in invalid_ips]
+ self.assertEqual(self.mock_logger.mock_calls, expected_calls)
+
+ def testGetForwardedIps(self):
+ mock_run = mock.Mock()
+ mock_run.return_value = ''
+ mock_parse = mock.Mock()
+ mock_parse.return_value = ['Test']
+ self.mock_utils._RunIpRoute = mock_run
+ self.mock_utils.ParseForwardedIps = mock_parse
+
+ self.assertEqual(self.mock_utils.GetForwardedIps(), ['Test'])
+ mock_run.assert_called_once_with(
+ args=['ls', 'table', 'local', 'type', 'local'], options=self.options)
+ mock_parse.assert_called_once_with([])
+
+ def testGetForwardedIpsSplit(self):
+ mock_run = mock.Mock()
+ mock_run.return_value = 'a\nb\n'
+ mock_parse = mock.Mock()
+ self.mock_utils._RunIpRoute = mock_run
+ self.mock_utils.ParseForwardedIps = mock_parse
+
+ self.mock_utils.GetForwardedIps()
+ mock_parse.assert_called_once_with(['a', 'b'])
+
+ def testAddForwardedIp(self):
+ mock_run = mock.Mock()
+ self.mock_utils._RunIpRoute = mock_run
+
+ self.mock_utils.AddForwardedIp('1.1.1.1')
+ mock_run.assert_called_once_with(
+ args=['add', 'to', 'local', '1.1.1.1/32'], options=self.options)
+
+ def testRemoveForwardedIp(self):
+ mock_run = mock.Mock()
+ self.mock_utils._RunIpRoute = mock_run
+
+ self.mock_utils.RemoveForwardedIp('1.1.1.1')
+ mock_run.assert_called_once_with(
+ args=['delete', 'to', 'local', '1.1.1.1/32'], options=self.options)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/logger.py b/google_compute_engine/logger.py
new file mode 100644
index 0000000..a8e49a3
--- /dev/null
+++ b/google_compute_engine/logger.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library for logging text to SysLog and the serial console."""
+
+import logging
+import logging.handlers
+
+
+def Logger(name, debug=False, facility=None):
+ """Get a logging object with handlers for sending logs to SysLog.
+
+ Args:
+ name: string, the name of the logger which will be added to log entries.
+ debug: bool, True if debug output should write to the console.
+ facility: int, an encoding of the SysLog handler's facility and priority.
+
+ Returns:
+ logging object, an object for logging entries.
+ """
+ logger = logging.getLogger(name)
+ logger.handlers = []
+ logger.propagate = False
+ logger.setLevel(logging.DEBUG)
+ formatter = logging.Formatter(name + ': %(levelname)s %(message)s')
+
+ if debug:
+ # Create a handler for console logging.
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(logging.DEBUG)
+ console_handler.setFormatter(formatter)
+ logger.addHandler(console_handler)
+
+ if facility:
+ # Create a handler for sending logs to SysLog.
+ syslog_handler = logging.handlers.SysLogHandler(
+ address='/dev/log', facility=facility)
+ syslog_handler.setLevel(logging.INFO)
+ syslog_handler.setFormatter(formatter)
+ logger.addHandler(syslog_handler)
+
+ return logger
diff --git a/google_compute_engine/metadata_scripts/__init__.py b/google_compute_engine/metadata_scripts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/__init__.py
diff --git a/google_compute_engine/metadata_scripts/script_executor.py b/google_compute_engine/metadata_scripts/script_executor.py
new file mode 100644
index 0000000..1293833
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/script_executor.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Execute user provided metadata scripts."""
+
+import os
+import stat
+import subprocess
+
+
+class ScriptExecutor(object):
+ """A class for executing user provided metadata scripts."""
+
+ def __init__(self, logger, script_type):
+ """Constructor.
+
+ Args:
+ logger: logger object, used to write to SysLog and serial port.
+ script_type: string, the type of the script we are running.
+ """
+ self.logger = logger
+ self.script_type = script_type
+
+ def _MakeExecutable(self, metadata_script):
+ """Add executable permissions to a file.
+
+ Args:
+ metadata_script: string, the path to the executable file.
+ """
+ mode = os.stat(metadata_script).st_mode
+ os.chmod(metadata_script, mode | stat.S_IEXEC)
+
+ def _RunScript(self, metadata_key, metadata_script):
+ """Run a script and log the streamed script output.
+
+ Args:
+ metadata_key: string, the key specifing the metadata script.
+ metadata_script: string, the file location of an executable script.
+ """
+ process = subprocess.Popen(
+ metadata_script, shell=True, stderr=subprocess.STDOUT,
+ stdout=subprocess.PIPE)
+ while True:
+ for line in iter(process.stdout.readline, b''):
+ message = line.decode('utf-8').rstrip('\n')
+ if message:
+ self.logger.info('%s: %s', metadata_key, message)
+ if process.poll() is not None:
+ break
+ self.logger.info('%s: Return code %s.', metadata_key, process.returncode)
+
+ def RunScripts(self, script_dict):
+ """Run the metadata scripts; execute a URL script first if one is provided.
+
+ Args:
+ script_dict: a dictionary mapping metadata keys to script files.
+ """
+ metadata_types = ['%s-script-url', '%s-script']
+ metadata_keys = [key % self.script_type for key in metadata_types]
+ metadata_keys = [key for key in metadata_keys if script_dict.get(key)]
+ if not metadata_keys:
+ self.logger.info('No %s scripts found in metadata.', self.script_type)
+ for metadata_key in metadata_keys:
+ metadata_script = script_dict.get(metadata_key)
+ self._MakeExecutable(metadata_script)
+ self._RunScript(metadata_key, metadata_script)
diff --git a/google_compute_engine/metadata_scripts/script_manager.py b/google_compute_engine/metadata_scripts/script_manager.py
new file mode 100755
index 0000000..511cb6b
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/script_manager.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Manage the retrieval and excution of metadata scripts."""
+
+import contextlib
+import logging.handlers
+import optparse
+import shutil
+import tempfile
+
+from google_compute_engine import config_manager
+from google_compute_engine import logger
+
+from google_compute_engine.metadata_scripts import script_executor
+from google_compute_engine.metadata_scripts import script_retriever
+
+
+@contextlib.contextmanager
+def _CreateTempDir(prefix):
+ """Context manager for creating a temporary directory.
+
+ Args:
+ prefix: string, the prefix for the temporary directory.
+
+ Yields:
+ string, the temporary directory created.
+ """
+ temp_dir = tempfile.mkdtemp(prefix=prefix + '-')
+ try:
+ yield temp_dir
+ finally:
+ shutil.rmtree(temp_dir)
+
+
+class ScriptManager(object):
+ """A class for retrieving and executing metadata scripts."""
+
+ def __init__(self, script_type, debug=False):
+ """Constructor.
+
+ Args:
+ script_type: string, the metadata script type to run.
+ debug: bool, True if debug output should write to the console.
+ """
+ self.script_type = script_type
+ name = '%s-script' % self.script_type
+ facility = logging.handlers.SysLogHandler.LOG_DAEMON
+ self.logger = logger.Logger(name=name, debug=debug, facility=facility)
+ self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)
+ self.executor = script_executor.ScriptExecutor(self.logger, script_type)
+ self._RunScripts()
+
+ def _RunScripts(self):
+ with _CreateTempDir(self.script_type) as dest_dir:
+ try:
+ self.logger.info('Starting %s scripts.', self.script_type)
+ script_dict = self.retriever.GetScripts(dest_dir)
+ self.executor.RunScripts(script_dict)
+ finally:
+ self.logger.info('Finished running %s scripts.', self.script_type)
+
+
+def main():
+ script_types = ('startup', 'shutdown')
+ parser = optparse.OptionParser()
+ parser.add_option('-d', '--debug', action='store_true', dest='debug',
+ help='print debug output to the console.')
+ parser.add_option('--script-type', dest='script_type',
+ help='metadata script type.')
+ (options, _) = parser.parse_args()
+ if options.script_type and options.script_type.lower() in script_types:
+ script_type = options.script_type.lower()
+ else:
+ valid_args = ', '.join(script_types)
+ message = 'No valid argument specified. Options: [%s].' % valid_args
+ raise ValueError(message)
+
+ instance_config = config_manager.ConfigManager()
+ if instance_config.GetOptionBool('MetadataScripts', script_type):
+ ScriptManager(script_type, debug=bool(options.debug))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/google_compute_engine/metadata_scripts/script_retriever.py b/google_compute_engine/metadata_scripts/script_retriever.py
new file mode 100644
index 0000000..a5c77a5
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/script_retriever.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Retrieve and store user provided metadata scripts."""
+
+import re
+import socket
+import subprocess
+import tempfile
+
+from google_compute_engine import metadata_watcher
+from google_compute_engine.compat import httpclient
+from google_compute_engine.compat import urlerror
+from google_compute_engine.compat import urlretrieve
+
+
+class ScriptRetriever(object):
+ """A class for retrieving and storing user provided metadata scripts."""
+
+ def __init__(self, logger, script_type):
+ """Constructor.
+
+ Args:
+ logger: logger object, used to write to SysLog and serial port.
+ script_type: string, the metadata script type to run.
+ """
+ self.logger = logger
+ self.script_type = script_type
+ self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
+
+ def _DownloadGsUrl(self, url, dest_dir):
+ """Download a Google Storage URL using gsutil.
+
+ Args:
+ url: string, the URL to download.
+ dest_dir: string, the path to a directory for storing metadata scripts.
+
+ Returns:
+ string, the path to the file storing the metadata script.
+ """
+ dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
+ dest_file.close()
+ dest = dest_file.name
+
+ self.logger.info('Downloading url from %s to %s using gsutil.', url, dest)
+ try:
+ subprocess.check_call(['gsutil', 'cp', url, dest])
+ return dest
+ except subprocess.CalledProcessError as e:
+ self.logger.warning(
+ 'Could not download %s using gsutil. %s.', url, str(e))
+ except Exception as e:
+ self.logger.warning(
+ 'Exception downloading %s using gsutil. %s.', url, str(e))
+ return None
+
+ def _DownloadUrl(self, url, dest_dir):
+ """Download a script from a given URL.
+
+ Args:
+ url: string, the URL to download.
+ dest_dir: string, the path to a directory for storing metadata scripts.
+
+ Returns:
+ string, the path to the file storing the metadata script.
+ """
+ dest_file = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
+ dest_file.close()
+ dest = dest_file.name
+
+ self.logger.info('Downloading url from %s to %s.', url, dest)
+ try:
+ urlretrieve.urlretrieve(url, dest)
+ return dest
+ except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
+ self.logger.warning('Could not download %s. %s.', url, str(e))
+ except Exception as e:
+ self.logger.warning('Exception downloading %s. %s.', url, str(e))
+ return None
+
+ def _DownloadScript(self, url, dest_dir):
+ """Download the contents of the URL to the destination.
+
+ Args:
+ url: string, the URL to download.
+ dest_dir: string, the path to a directory for storing metadata scripts.
+
+ Returns:
+ string, the path to the file storing the metadata script.
+ """
+ # Check for the preferred Google Storage URL format:
+ # gs://<bucket>/<object>
+ if url.startswith(r'gs://'):
+ return self._DownloadGsUrl(url, dest_dir)
+
+ header = r'http[s]?://'
+ domain = r'storage\.googleapis\.com'
+
+ # Many of the Google Storage URLs are supported below.
+ # It is prefered that customers specify their object using
+ # its gs://<bucket>/<object> url.
+ bucket = r'(?P<bucket>[a-z0-9][-_.a-z0-9]*[a-z0-9])'
+
+ # Accept any non-empty string that doesn't contain a wildcard character
+ # gsutil interprets some characters as wildcards.
+ # These characters in object names make it difficult or impossible
+ # to perform various wildcard operations using gsutil
+ # For a complete list use "gsutil help naming".
+ obj = r'(?P<obj>[^\*\?]+)'
+
+ # Check for the Google Storage URLs:
+ # http://<bucket>.storage.googleapis.com/<object>
+ # https://<bucket>.storage.googleapis.com/<object>
+ gs_regex = re.compile(r'\A%s%s\.%s/%s\Z' % (header, bucket, domain, obj))
+ match = gs_regex.match(url)
+ if match:
+ gs_url = r'gs://%s/%s' % (match.group('bucket'), match.group('obj'))
+ return self._DownloadGsUrl(gs_url, dest_dir)
+
+ # Check for the other possible Google Storage URLs:
+ # http://storage.googleapis.com/<bucket>/<object>
+ # https://storage.googleapis.com/<bucket>/<object>
+ #
+ # The following are deprecated but checked:
+ # http://commondatastorage.googleapis.com/<bucket>/<object>
+ # https://commondatastorage.googleapis.com/<bucket>/<object>
+ gs_regex = re.compile(
+ r'\A%s(commondata)?%s/%s/%s\Z' % (header, domain, bucket, obj))
+ match = gs_regex.match(url)
+ if match:
+ gs_url = r'gs://%s/%s' % (match.group('bucket'), match.group('obj'))
+ return self._DownloadGsUrl(gs_url, dest_dir)
+
+ # Unauthenticated download of the object.
+ return self._DownloadUrl(url, dest_dir)
+
+ def _GetAttributeScripts(self, attribute_data, dest_dir):
+ """Retrieve the scripts from attribute metadata.
+
+ Args:
+ attribute_data: dict, the contents of the attributes metadata.
+ dest_dir: string, the path to a directory for storing metadata scripts.
+
+ Returns:
+ dict, a dictionary mapping metadata keys to files storing scripts.
+ """
+ script_dict = {}
+ attribute_data = attribute_data or {}
+ metadata_key = '%s-script' % self.script_type
+ metadata_value = attribute_data.get(metadata_key)
+ if metadata_value:
+ self.logger.info('Found %s in metadata.' % metadata_key)
+ with tempfile.NamedTemporaryFile(
+ mode='w', dir=dest_dir, delete=False) as dest:
+ dest.write(metadata_value.lstrip())
+ script_dict[metadata_key] = dest.name
+
+ metadata_key = '%s-script-url' % self.script_type
+ metadata_value = attribute_data.get(metadata_key)
+ if metadata_value:
+ self.logger.info('Found %s in metadata.' % metadata_key)
+ script_dict[metadata_key] = self._DownloadScript(metadata_value, dest_dir)
+
+ return script_dict
+
+ def GetScripts(self, dest_dir):
+ """Retrieve the scripts to execute.
+
+ Args:
+ dest_dir: string, the path to a directory for storing metadata scripts.
+
+ Returns:
+ dict, a dictionary mapping set metadata keys with associated scripts.
+ """
+ metadata_dict = self.watcher.GetMetadata() or {}
+
+ try:
+ instance_data = metadata_dict['instance']['attributes']
+ except KeyError:
+ instance_data = None
+ self.logger.warning('Instance attributes were not found.')
+
+ try:
+ project_data = metadata_dict['project']['attributes']
+ except KeyError:
+ project_data = None
+ self.logger.warning('Project attributes were not found.')
+
+ return (self._GetAttributeScripts(instance_data, dest_dir) or
+ self._GetAttributeScripts(project_data, dest_dir))
diff --git a/google_compute_engine/metadata_scripts/tests/script_executor_test.py b/google_compute_engine/metadata_scripts/tests/script_executor_test.py
new file mode 100644
index 0000000..ffe8b99
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/tests/script_executor_test.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for script_executor.py module."""
+
+import stat
+
+from google_compute_engine.metadata_scripts import script_executor
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class ScriptExecutorTest(unittest.TestCase):
+
+ def setUp(self):
+ self.script_type = 'test'
+ self.metadata_script = '/tmp/script'
+ self.mock_logger = mock.Mock()
+ self.executor = script_executor.ScriptExecutor(
+ self.mock_logger, self.script_type)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_executor.os')
+ def testMakeExecutable(self, mock_os):
+ st_mode = 1
+ chmod_mode = st_mode + stat.S_IEXEC
+ mock_os_stat = mock.Mock()
+ mock_os_stat.st_mode = st_mode
+ mock_os.stat.return_value = mock_os_stat
+ self.executor._MakeExecutable(self.metadata_script)
+ mock_os.chmod.assert_called_once_with(self.metadata_script, chmod_mode)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_executor.subprocess')
+ def testRunScript(self, mock_subprocess):
+ mock_readline = mock.Mock()
+ mock_readline.side_effect = [bytes(b'a\n'), bytes(b'b\n'), bytes(b'')]
+ mock_stdout = mock.Mock()
+ mock_stdout.readline = mock_readline
+ mock_process = mock.Mock()
+ mock_process.poll.return_value = 0
+ mock_process.stdout = mock_stdout
+ mock_process.returncode = 1
+ mock_subprocess.Popen.return_value = mock_process
+ metadata_key = '%s-script' % self.script_type
+
+ self.executor._RunScript(metadata_key, self.metadata_script)
+ expected_calls = [
+ mock.call('%s: %s', metadata_key, 'a'),
+ mock.call('%s: %s', metadata_key, 'b'),
+ mock.call('%s: Return code %s.', metadata_key, 1),
+ ]
+ self.assertEqual(self.mock_logger.info.mock_calls, expected_calls)
+ mock_subprocess.Popen.assert_called_once_with(
+ self.metadata_script, shell=True, stderr=mock_subprocess.STDOUT,
+ stdout=mock_subprocess.PIPE)
+ mock_process.poll.assert_called_once_with()
+
+ def testRunScripts(self):
+ self.executor._MakeExecutable = mock.Mock()
+ self.executor._RunScript = mock.Mock()
+ mocks = mock.Mock()
+ mocks.attach_mock(self.executor._MakeExecutable, 'make_executable')
+ mocks.attach_mock(self.executor._RunScript, 'run_script')
+ mocks.attach_mock(self.mock_logger, 'logger')
+ script_dict = {
+ '%s-script' % self.script_type: 'a',
+ '%s-script-key' % self.script_type: 'b',
+ '%s-script-url' % self.script_type: 'c',
+ }
+
+ self.executor.RunScripts(script_dict)
+ expected_calls = [
+ mock.call.make_executable('c'),
+ mock.call.run_script('%s-script-url' % self.script_type, 'c'),
+ mock.call.make_executable('a'),
+ mock.call.run_script('%s-script' % self.script_type, 'a'),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ def testRunScriptsEmpty(self):
+ self.executor._MakeExecutable = mock.Mock()
+ self.executor._RunScript = mock.Mock()
+ mocks = mock.Mock()
+ mocks.attach_mock(self.executor._MakeExecutable, 'make_executable')
+ mocks.attach_mock(self.executor._RunScript, 'run_script')
+ mocks.attach_mock(self.mock_logger, 'logger')
+ script_dict = {
+ '%s-invalid' % self.script_type: 'script',
+ }
+
+ self.executor.RunScripts(script_dict)
+ expected_calls = [
+ mock.call.logger.info(mock.ANY, self.script_type),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/metadata_scripts/tests/script_manager_test.py b/google_compute_engine/metadata_scripts/tests/script_manager_test.py
new file mode 100644
index 0000000..acc6ae1
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/tests/script_manager_test.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for script_manager.py module."""
+
+from google_compute_engine.metadata_scripts import script_manager
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class ScriptManagerTest(unittest.TestCase):
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_manager.script_retriever')
+ @mock.patch('google_compute_engine.metadata_scripts.script_manager.logger')
+ @mock.patch('google_compute_engine.metadata_scripts.script_manager.script_executor')
+ @mock.patch('google_compute_engine.metadata_scripts.script_manager.shutil.rmtree')
+ @mock.patch('google_compute_engine.metadata_scripts.script_manager.tempfile.mkdtemp')
+ def testRunScripts(self, mock_mkdir, mock_rmtree, mock_executor, mock_logger,
+ mock_retriever):
+ mock_logger_instance = mock.Mock()
+ mock_logger.Logger.return_value = mock_logger_instance
+ mock_retriever_instance = mock.Mock()
+ mock_retriever.ScriptRetriever.return_value = mock_retriever_instance
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_mkdir, 'mkdir')
+ mocks.attach_mock(mock_rmtree, 'rmtree')
+ mocks.attach_mock(mock_executor, 'executor')
+ mocks.attach_mock(mock_logger, 'logger')
+ mocks.attach_mock(mock_retriever, 'retriever')
+ script_type = 'test'
+ script_name = '%s-script' % script_type
+ script_prefix = '%s-' % script_type
+ test_dir = 'test-dir'
+ test_dict = {'test': 'dict'}
+ mock_mkdir.return_value = test_dir
+ mock_retriever_instance.GetScripts.return_value = test_dict
+
+ script_manager.ScriptManager(script_type)
+ expected_calls = [
+ mock.call.logger.Logger(
+ name=script_name, debug=False, facility=mock.ANY),
+ mock.call.retriever.ScriptRetriever(mock_logger_instance, script_type),
+ mock.call.executor.ScriptExecutor(mock_logger_instance, script_type),
+ mock.call.mkdir(prefix=script_prefix),
+ mock.call.logger.Logger().info(mock.ANY, script_type),
+ mock.call.retriever.ScriptRetriever().GetScripts(test_dir),
+ mock.call.executor.ScriptExecutor().RunScripts(test_dict),
+ mock.call.logger.Logger().info(mock.ANY, script_type),
+ mock.call.rmtree(test_dir),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/metadata_scripts/tests/script_retriever_test.py b/google_compute_engine/metadata_scripts/tests/script_retriever_test.py
new file mode 100644
index 0000000..9fa1e79
--- /dev/null
+++ b/google_compute_engine/metadata_scripts/tests/script_retriever_test.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for script_retriever.py module."""
+
+import subprocess
+
+from google_compute_engine.metadata_scripts import script_retriever
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class ScriptRetrieverTest(unittest.TestCase):
+
+ def setUp(self):
+ self.script_type = 'test'
+ self.dest_dir = '/tmp'
+ self.dest = '/tmp/file'
+ self.mock_logger = mock.Mock()
+ self.mock_watcher = mock.Mock()
+ self.retriever = script_retriever.ScriptRetriever(
+ self.mock_logger, self.script_type)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.subprocess.check_call')
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ def testDownloadGsUrl(self, mock_tempfile, mock_call):
+ gs_url = 'gs://fake/url'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.name = self.dest
+ self.assertEqual(
+ self.retriever._DownloadGsUrl(gs_url, self.dest_dir), self.dest)
+ mock_tempfile.assert_called_once_with(dir=self.dest_dir, delete=False)
+ mock_tempfile.close.assert_called_once_with()
+ self.mock_logger.info.assert_called_once_with(mock.ANY, gs_url, self.dest)
+ mock_call.assert_called_once_with(['gsutil', 'cp', gs_url, self.dest])
+ self.mock_logger.warning.assert_not_called()
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.subprocess.check_call')
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ def testDownloadGsUrlProcessError(self, mock_tempfile, mock_call):
+ gs_url = 'gs://fake/url'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.name = self.dest
+ mock_call.side_effect = subprocess.CalledProcessError(1, 'Test')
+ self.assertIsNone(self.retriever._DownloadGsUrl(gs_url, self.dest_dir))
+ self.assertEqual(self.mock_logger.warning.call_count, 1)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.subprocess.check_call')
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ def testDownloadGsUrlException(self, mock_tempfile, mock_call):
+ gs_url = 'gs://fake/url'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.name = self.dest
+ mock_call.side_effect = Exception('Error.')
+ self.assertIsNone(self.retriever._DownloadGsUrl(gs_url, self.dest_dir))
+ self.assertEqual(self.mock_logger.warning.call_count, 1)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve')
+ def testDownloadUrl(self, mock_retrieve, mock_tempfile):
+ url = 'http://www.google.com/fake/url'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.name = self.dest
+ self.assertEqual(self.retriever._DownloadUrl(url, self.dest_dir), self.dest)
+ mock_tempfile.assert_called_once_with(dir=self.dest_dir, delete=False)
+ mock_tempfile.close.assert_called_once_with()
+ self.mock_logger.info.assert_called_once_with(mock.ANY, url, self.dest)
+ mock_retrieve.assert_called_once_with(url, self.dest)
+ self.mock_logger.warning.assert_not_called()
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve')
+ def testDownloadUrlProcessError(self, mock_retrieve, mock_tempfile):
+ url = 'http://www.google.com/fake/url'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.name = self.dest
+ mock_retrieve.side_effect = script_retriever.socket.timeout()
+ self.assertIsNone(self.retriever._DownloadUrl(url, self.dest_dir))
+ self.assertEqual(self.mock_logger.warning.call_count, 1)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.urlretrieve.urlretrieve')
+ def testDownloadUrlException(self, mock_retrieve, mock_tempfile):
+ url = 'http://www.google.com/fake/url'
+ mock_tempfile.return_value = mock_tempfile
+ mock_tempfile.name = self.dest
+ mock_retrieve.side_effect = Exception('Error.')
+ self.assertIsNone(self.retriever._DownloadUrl(url, self.dest_dir))
+ self.assertEqual(self.mock_logger.warning.call_count, 1)
+
+ def _CreateUrls(self, bucket, obj, gs_match=True):
+ """Creates a URL for each of the supported Google Storage URL formats.
+
+ Args:
+ bucket: string, the Google Storage bucket name.
+ obj: string, the object name in the bucket.
+ gs_match: bool, True if the bucket and object names are valid.
+
+ Returns:
+ (list, dict):
+ list, the URLs to download.
+ dict, a Google Storage URL mapped to the expected 'gs://' format.
+ """
+ gs_url = 'gs://%s/%s' % (bucket, obj)
+ gs_urls = {gs_url: gs_url}
+ url_formats = [
+ 'http://%s.storage.googleapis.com/%s',
+ 'https://%s.storage.googleapis.com/%s',
+ 'http://storage.googleapis.com/%s/%s',
+ 'https://storage.googleapis.com/%s/%s',
+ 'http://commondatastorage.googleapis.com/%s/%s',
+ 'https://commondatastorage.googleapis.com/%s/%s',
+ ]
+ url_formats = [url % (bucket, obj) for url in url_formats]
+ if gs_match:
+ gs_urls.update(dict((url, gs_url) for url in url_formats))
+ return ([], gs_urls)
+ else:
+ return (url_formats, gs_urls)
+
+ def testDownloadScript(self):
+ mock_download_gs = mock.Mock()
+ self.retriever._DownloadGsUrl = mock_download_gs
+ mock_download = mock.Mock()
+ self.retriever._DownloadUrl = mock_download
+ download_urls = []
+ download_gs_urls = {}
+
+ component_urls = [
+ ('@#$%^', '\n\n\n\n', False),
+ ('///////', '///////', False),
+ ('Abc', 'xyz', False),
+ (' abc', 'xyz', False),
+ ('abc', 'xyz?', False),
+ ('abc', 'xyz*', False),
+ ('', 'xyz', False),
+ ('a', 'xyz', False),
+ ('abc', '', False),
+ ('hello', 'world', True),
+ ('hello', 'world!', True),
+ ('hello', 'world !', True),
+ ('hello', 'w o r l d ', True),
+ ('hello', 'w\no\nr\nl\nd ', True),
+ ('123_hello', '1!@#$%^', True),
+ ('123456', 'hello.world', True),
+ ]
+
+ for bucket, obj, gs_match in component_urls:
+ urls, gs_urls = self._CreateUrls(bucket, obj, gs_match=gs_match)
+ download_urls.extend(urls)
+ download_gs_urls.update(gs_urls)
+
+ for url in download_urls:
+ mock_download.reset_mock()
+ self.retriever._DownloadScript(url, self.dest_dir)
+ mock_download.assert_called_once_with(url, self.dest_dir)
+
+ for url, gs_url in download_gs_urls.items():
+ mock_download_gs.reset_mock()
+ self.retriever._DownloadScript(url, self.dest_dir)
+ mock_download_gs.assert_called_once_with(gs_url, self.dest_dir)
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ def testGetAttributeScripts(self, mock_tempfile):
+ script = 'echo Hello World.\n'
+ script_dest = '/tmp/script'
+ script_url = 'gs://fake/url'
+ script_url_dest = '/tmp/script_url'
+ attribute_data = {
+ '%s-script' % self.script_type: '\n%s' % script,
+ '%s-script-url' % self.script_type: script_url,
+ }
+ expected_data = {
+ '%s-script' % self.script_type: script_dest,
+ '%s-script-url' % self.script_type: script_url_dest,
+ }
+ # Mock saving a script to a file.
+ mock_dest = mock.Mock()
+ mock_dest.name = script_dest
+ mock_tempfile.__enter__.return_value = mock_dest
+ mock_tempfile.return_value = mock_tempfile
+ # Mock downloading a script from a URL.
+ mock_download = mock.Mock()
+ mock_download.return_value = script_url_dest
+ self.retriever._DownloadScript = mock_download
+
+ self.assertEqual(
+ self.retriever._GetAttributeScripts(attribute_data, self.dest_dir),
+ expected_data)
+ self.assertEqual(self.mock_logger.info.call_count, 2)
+ mock_dest.write.assert_called_once_with(script)
+ mock_download.assert_called_once_with(script_url, self.dest_dir)
+
+ def testGetAttributeScriptsNone(self):
+ attribute_data = {}
+ expected_data = {}
+ self.assertEqual(
+ self.retriever._GetAttributeScripts(attribute_data, self.dest_dir),
+ expected_data)
+ self.mock_logger.info.assert_not_called()
+
+ @mock.patch('google_compute_engine.metadata_scripts.script_retriever.tempfile.NamedTemporaryFile')
+ def testGetScripts(self, mock_tempfile):
+ script_dest = '/tmp/script'
+ script_url_dest = '/tmp/script_url'
+ metadata = {
+ 'instance': {
+ 'attributes': {
+ '%s-script' % self.script_type: 'a',
+ '%s-script-url' % self.script_type: 'b',
+ },
+ },
+ 'project': {
+ 'attributes': {
+ '%s-script' % self.script_type: 'c',
+ '%s-script-url' % self.script_type: 'd',
+ },
+ },
+ }
+ expected_data = {
+ '%s-script' % self.script_type: script_dest,
+ '%s-script-url' % self.script_type: script_url_dest,
+ }
+ self.mock_watcher.GetMetadata.return_value = metadata
+ self.retriever.watcher = self.mock_watcher
+ # Mock saving a script to a file.
+ mock_dest = mock.Mock()
+ mock_dest.name = script_dest
+ mock_tempfile.__enter__.return_value = mock_dest
+ mock_tempfile.return_value = mock_tempfile
+ # Mock downloading a script from a URL.
+ mock_download = mock.Mock()
+ mock_download.return_value = script_url_dest
+ self.retriever._DownloadScript = mock_download
+
+ self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data)
+ self.assertEqual(self.mock_logger.info.call_count, 2)
+ mock_dest.write.assert_called_once_with('a')
+ mock_download.assert_called_once_with('b', self.dest_dir)
+
+ def testGetScriptsNone(self):
+ metadata = {
+ 'instance': {
+ 'attributes': None,
+ },
+ 'project': {
+ 'attributes': None,
+ },
+ }
+ expected_data = {}
+ self.mock_watcher.GetMetadata.return_value = metadata
+ self.retriever.watcher = self.mock_watcher
+ self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data)
+ self.mock_logger.info.assert_not_called()
+
+ def testGetScriptsNoMetadata(self):
+ metadata = None
+ expected_data = {}
+ self.mock_watcher.GetMetadata.return_value = metadata
+ self.retriever.watcher = self.mock_watcher
+ self.assertEqual(self.retriever.GetScripts(self.dest_dir), expected_data)
+ self.mock_logger.info.assert_not_called()
+ self.assertEqual(self.mock_logger.warning.call_count, 2)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/metadata_watcher.py b/google_compute_engine/metadata_watcher.py
new file mode 100644
index 0000000..b8db9b9
--- /dev/null
+++ b/google_compute_engine/metadata_watcher.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A library for watching changes in the metadata server."""
+
+import functools
+import json
+import os
+import socket
+import time
+
+from google_compute_engine.compat import httpclient
+from google_compute_engine.compat import urlerror
+from google_compute_engine.compat import urlparse
+from google_compute_engine.compat import urlrequest
+
+METADATA_SERVER = 'http://metadata.google.internal/computeMetadata/v1'
+
+
+class StatusException(urlerror.HTTPError):
+
+ def __init__(self, response):
+ url = response.geturl()
+ code = response.getcode()
+ message = httpclient.responses.get(code)
+ headers = response.headers
+ super(StatusException, self).__init__(url, code, message, headers, response)
+
+
+def RetryOnUnavailable(func):
+ """Function decorator to retry on a service unavailable exception."""
+
+ @functools.wraps(func)
+ def Wrapper(*args, **kwargs):
+ while True:
+ try:
+ response = func(*args, **kwargs)
+ except urlerror.HTTPError as e:
+ if e.getcode() == httpclient.SERVICE_UNAVAILABLE:
+ time.sleep(5)
+ else:
+ raise
+ else:
+ if response.getcode() == httpclient.OK:
+ return response
+ else:
+ raise StatusException(response)
+ return Wrapper
+
+
+class MetadataWatcher(object):
+ """Watches for changes in metadata."""
+
+ def __init__(self, logger, timeout=60):
+ """Constructor.
+
+ Args:
+ logger: logger object, used to write to SysLog and serial port.
+ timeout: int, timeout in seconds for metadata requests.
+ """
+ self.etag = 0
+ self.logger = logger
+ self.timeout = timeout
+
+ @RetryOnUnavailable
+ def _GetMetadataRequest(self, metadata_url, params=None):
+ """Performs a GET request with the metadata headers.
+
+ Args:
+ metadata_url: string, the URL to perform a GET request on.
+ params: dictionary, the query parameters in the GET request.
+
+ Returns:
+ HTTP response from the GET request.
+
+ Raises:
+ urlerror.HTTPError: raises when the GET request fails.
+ """
+ headers = {'Metadata-Flavor': 'Google'}
+ params = urlparse.urlencode(params or {})
+ url = '%s?%s' % (metadata_url, params)
+ request = urlrequest.Request(url, headers=headers)
+ return urlrequest.urlopen(request, timeout=self.timeout*1.1)
+
+ def _UpdateEtag(self, response):
+ """Update the etag from an API response.
+
+ Args:
+ response: HTTP response with a header field.
+
+ Returns:
+ bool, True if the etag in the response header updated.
+ """
+ etag = response.headers.get('etag', self.etag)
+ etag_updated = self.etag != etag
+ self.etag = etag
+ return etag_updated
+
+ def _GetMetadataUpdate(self, metadata_key='', recursive=True, wait=True):
+ """Request the contents of metadata server and deserialize the response.
+
+ Args:
+ metadata_key: string, the metadata key to watch for changes.
+ recursive: bool, True if we should recursively watch for metadata changes.
+ wait: bool, True if we should wait for a metadata change.
+
+ Returns:
+ json, the deserialized contents of the metadata server.
+ """
+ metadata_key = os.path.join(metadata_key, '') if recursive else metadata_key
+ metadata_url = os.path.join(METADATA_SERVER, metadata_key)
+ params = {
+ 'alt': 'json',
+ 'last_etag': self.etag,
+ 'recursive': recursive,
+ 'timeout_sec': self.timeout,
+ 'wait_for_change': wait,
+ }
+ while True:
+ response = self._GetMetadataRequest(metadata_url, params=params)
+ etag_updated = self._UpdateEtag(response)
+ if wait and not etag_updated:
+ # Retry until the etag is updated.
+ continue
+ else:
+ # Waiting for change is not required or the etag is updated.
+ break
+ return json.loads(response.read().decode('utf-8'))
+
+ def WatchMetadata(self, handler, metadata_key='', recursive=True):
+ """Watch for changes to the contents of the metadata server.
+
+ Args:
+ handler: callable, a function to call with the updated metadata contents.
+ metadata_key: string, the metadata key to watch for changes.
+ recursive: bool, True if we should recursively watch for metadata changes.
+ """
+ while True:
+ try:
+ response = self._GetMetadataUpdate(
+ metadata_key=metadata_key, recursive=recursive, wait=True)
+ except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
+ self.logger.error('GET request error watching metadata. %s.', str(e))
+ try:
+ handler(response)
+ except Exception as e:
+ self.logger.error('Exception calling the response handler. %s.', str(e))
+
+ def GetMetadata(self, metadata_key='', recursive=True):
+ """Retrieve the contents of metadata server for a metadata key.
+
+ Args:
+ metadata_key: string, the metadata key to watch for changes.
+ recursive: bool, True if we should recursively watch for metadata changes.
+
+ Returns:
+ json, the deserialized contents of the metadata server or None if error.
+ """
+ try:
+ return self._GetMetadataUpdate(
+ metadata_key=metadata_key, recursive=recursive, wait=False)
+ except (httpclient.HTTPException, socket.error, urlerror.URLError) as e:
+ self.logger.error('GET request error retrieving metadata. %s.', str(e))
+ return None
diff --git a/google_compute_engine/test_compat.py b/google_compute_engine/test_compat.py
new file mode 100644
index 0000000..232be4c
--- /dev/null
+++ b/google_compute_engine/test_compat.py
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A module for resolving compatibility issues between Python 2 and Python 3."""
+
+import sys
+
+from google_compute_engine.compat import httpclient
+from google_compute_engine.compat import parser
+from google_compute_engine.compat import urlerror
+from google_compute_engine.compat import urlparse
+from google_compute_engine.compat import urlrequest
+from google_compute_engine.compat import urlretrieve
+
+# Import the mock module in Python 3.2.
+if sys.version_info >= (3, 3):
+ import unittest.mock as mock
+else:
+ import mock
+
+# Import the unittest2 module to backport testing features to Python 2.6.
+if sys.version_info >= (2, 7):
+ import unittest
+else:
+ import unittest2 as unittest
+
+builtin = 'builtins' if sys.version_info >= (3,) else '__builtin__'
diff --git a/google_compute_engine/tests/config_manager_test.py b/google_compute_engine/tests/config_manager_test.py
new file mode 100644
index 0000000..1fccbf9
--- /dev/null
+++ b/google_compute_engine/tests/config_manager_test.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for config_manager.py module."""
+
+from google_compute_engine import config_manager
+from google_compute_engine.test_compat import builtin
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class ConfigManagerTest(unittest.TestCase):
+
+ option = 'option'
+ section = 'section'
+ value = 'value'
+
+ def HasOption(self, _, option):
+ """Validate the option exists in the config file.
+
+ Args:
+ option: string, the config option to check.
+
+ Returns:
+ bool, True if test is not in the option name.
+ """
+ return 'test' not in option
+
+ def HasSection(self, section):
+ """Validate the section exists in the config file.
+
+ Args:
+ section: string, the config section to check.
+
+ Returns:
+ bool, True if test is not in the section name.
+ """
+ return 'test' not in section
+
+ def setUp(self):
+ self.mock_config = mock.Mock()
+ self.mock_config.has_option.side_effect = self.HasOption
+ self.mock_config.has_section.side_effect = self.HasSection
+ config_manager.parser.SafeConfigParser = mock.Mock()
+ config_manager.parser.SafeConfigParser.return_value = self.mock_config
+
+ self.config_file = 'test.cfg'
+ self.config_header = 'Config file header.'
+
+ self.mock_config_manager = config_manager.ConfigManager(
+ config_file=self.config_file, config_header=self.config_header)
+
+ def testAddHeader(self):
+ mock_fp = mock.Mock()
+ self.mock_config_manager._AddHeader(mock_fp)
+ expected_calls = [
+ mock.call('# %s' % self.config_header),
+ mock.call('\n\n'),
+ ]
+ self.assertEqual(mock_fp.write.mock_calls, expected_calls)
+
+ def testGetOptionString(self):
+ self.mock_config_manager.GetOptionString(self.section, self.option)
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_option(self.section, self.option),
+ mock.call.get(self.section, self.option),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testGetOptionStringNoOption(self):
+ option = 'test-option'
+ self.assertIsNone(
+ self.mock_config_manager.GetOptionString(self.section, option))
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_option(self.section, option),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testGetOptionBool(self):
+ self.mock_config_manager.GetOptionBool(self.section, self.option)
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_option(self.section, self.option),
+ mock.call.getboolean(self.section, self.option),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testGetOptionBoolNoOption(self):
+ option = 'test-option'
+ self.assertFalse(
+ self.mock_config_manager.GetOptionBool(self.section, option))
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_option(self.section, option),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testSetOption(self):
+ self.mock_config_manager.SetOption(self.section, self.option, self.value)
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_section(self.section),
+ mock.call.set(self.section, self.option, self.value),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testSetOptionNoOverwrite(self):
+ self.mock_config_manager.SetOption(
+ self.section, self.option, self.value, overwrite=False)
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_option(self.section, self.option),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testSetOptionNewSection(self):
+ section = 'test-section'
+ self.mock_config_manager.SetOption(section, self.option, self.value)
+ expected_calls = [
+ mock.call.read(self.config_file),
+ mock.call.has_section(section),
+ mock.call.add_section(section),
+ mock.call.set(section, self.option, self.value),
+ ]
+ self.assertEqual(self.mock_config.mock_calls, expected_calls)
+
+ def testWriteConfig(self):
+ mock_open = mock.mock_open()
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ self.mock_config_manager.WriteConfig()
+ expected_calls = [
+ mock.call('# %s' % self.config_header),
+ mock.call('\n\n'),
+ ]
+ self.assertEqual(mock_open().write.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.config_manager.file_utils')
+ def testWriteConfigNoHeader(self, mock_lock):
+ self.mock_config_manager = config_manager.ConfigManager(
+ config_file='/tmp/file.cfg')
+ mock_open = mock.mock_open()
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ self.mock_config_manager.WriteConfig()
+ mock_open().write.assert_not_called()
+ mock_lock.LockFile.assert_called_once_with('/var/lock/google_file.lock')
+
+ @mock.patch('google_compute_engine.config_manager.file_utils')
+ def testWriteConfigLocked(self, mock_lock):
+ ioerror = IOError('Test Error')
+ mock_lock.LockFile.side_effect = ioerror
+ mock_open = mock.mock_open()
+ with mock.patch('%s.open' % builtin, mock_open, create=False):
+ with self.assertRaises(IOError) as error:
+ self.mock_config_manager.WriteConfig()
+ self.assertEqual(error.exception, ioerror)
+ mock_open().write.assert_not_called()
+ mock_lock.LockFile.assert_called_once_with('/var/lock/google_test.lock')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/tests/file_utils_test.py b/google_compute_engine/tests/file_utils_test.py
new file mode 100644
index 0000000..21d9784
--- /dev/null
+++ b/google_compute_engine/tests/file_utils_test.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for file_utils_test.py module."""
+
+from google_compute_engine import file_utils
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class FileUtilsTest(unittest.TestCase):
+
+ def setUp(self):
+ self.fd = 1
+ self.path = '/tmp/path'
+
+ @mock.patch('google_compute_engine.file_utils.subprocess.call')
+ @mock.patch('google_compute_engine.file_utils.os.access')
+ @mock.patch('google_compute_engine.file_utils.os.path.isfile')
+ def testSetSELinuxContext(self, mock_isfile, mock_access, mock_call):
+ restorecon = '/sbin/restorecon'
+ path = 'path'
+ mock_isfile.return_value = True
+ mock_access.return_value = True
+ file_utils._SetSELinuxContext(path)
+ mock_isfile.assert_called_once_with(restorecon)
+ mock_access.assert_called_once_with(restorecon, file_utils.os.X_OK)
+ mock_call.assert_called_once_with([restorecon, path])
+
+ @mock.patch('google_compute_engine.file_utils.subprocess.call')
+ @mock.patch('google_compute_engine.file_utils.os.access')
+ @mock.patch('google_compute_engine.file_utils.os.path.isfile')
+ def testSetSELinuxContextSkip(self, mock_isfile, mock_access, mock_call):
+ mock_isfile.side_effect = [True, False, False]
+ mock_access.side_effect = [False, True, False]
+ file_utils._SetSELinuxContext('1')
+ file_utils._SetSELinuxContext('2')
+ file_utils._SetSELinuxContext('3')
+ mock_call.assert_not_called()
+
+ @mock.patch('google_compute_engine.file_utils._SetSELinuxContext')
+ @mock.patch('google_compute_engine.file_utils.os.path.exists')
+ @mock.patch('google_compute_engine.file_utils.os.mkdir')
+ @mock.patch('google_compute_engine.file_utils.os.chown')
+ @mock.patch('google_compute_engine.file_utils.os.chmod')
+ def testSetPermissions(self, mock_chmod, mock_chown, mock_mkdir, mock_exists,
+ mock_context):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_chmod, 'chmod')
+ mocks.attach_mock(mock_chown, 'chown')
+ mocks.attach_mock(mock_mkdir, 'mkdir')
+ mocks.attach_mock(mock_exists, 'exists')
+ mocks.attach_mock(mock_context, 'context')
+ path = 'path'
+ mode = 'mode'
+ uid = 'uid'
+ gid = 'gid'
+ mock_exists.side_effect = [False, True, False]
+
+ # Create a new directory.
+ file_utils.SetPermissions(path, mode=mode, uid=uid, gid=gid, mkdir=True)
+ # The path exists, so do not create a new directory.
+ file_utils.SetPermissions(path, mode=mode, uid=uid, gid=gid, mkdir=True)
+ # Create a new directory without a mode specified.
+ file_utils.SetPermissions(path, uid=uid, gid=gid, mkdir=True)
+ # Do not create the path even though it does not exist.
+ file_utils.SetPermissions(path, mode=mode, uid=uid, gid=gid, mkdir=False)
+ # Do not set an owner when a UID or GID is not specified.
+ file_utils.SetPermissions(path, mode=mode, mkdir=False)
+ # Set the SELinux context when no parameters are specified.
+ file_utils.SetPermissions(path)
+ expected_calls = [
+ # Create a new directory.
+ mock.call.exists(path),
+ mock.call.mkdir(path, mode),
+ mock.call.chown(path, uid, gid),
+ mock.call.context(path),
+ # Attempt to create a new path but reuse existing path.
+ mock.call.exists(path),
+ mock.call.chmod(path, mode),
+ mock.call.chown(path, uid, gid),
+ mock.call.context(path),
+ # Create a new directory with default mode.
+ mock.call.exists(path),
+ mock.call.mkdir(path, 0o777),
+ mock.call.chown(path, uid, gid),
+ mock.call.context(path),
+ # Set permissions and owner on an existing path.
+ mock.call.chmod(path, mode),
+ mock.call.chown(path, uid, gid),
+ mock.call.context(path),
+ # Set permissions, without changing ownership, of an existing path.
+ mock.call.chmod(path, mode),
+ mock.call.context(path),
+ # Set SELinux context on an existing path.
+ mock.call.context(path),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testLock(self, mock_flock):
+ operation = file_utils.fcntl.LOCK_EX | file_utils.fcntl.LOCK_NB
+ file_utils.Lock(self.fd, self.path, False)
+ mock_flock.assert_called_once_with(self.fd, operation)
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testLockBlocking(self, mock_flock):
+ operation = file_utils.fcntl.LOCK_EX
+ file_utils.Lock(self.fd, self.path, True)
+ mock_flock.assert_called_once_with(self.fd, operation)
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testLockTakenException(self, mock_flock):
+ error = IOError('Test Error')
+ error.errno = file_utils.errno.EWOULDBLOCK
+ mock_flock.side_effect = error
+ try:
+ file_utils.Lock(self.fd, self.path, False)
+ except IOError as e:
+ self.assertTrue(self.path in str(e))
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testLockException(self, mock_flock):
+ error = IOError('Test Error')
+ mock_flock.side_effect = error
+ try:
+ file_utils.Lock(self.fd, self.path, False)
+ except IOError as e:
+ self.assertTrue(self.path in str(e))
+ self.assertTrue('Test Error' in str(e))
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testUnlock(self, mock_flock):
+ operation = file_utils.fcntl.LOCK_UN | file_utils.fcntl.LOCK_NB
+ file_utils.Unlock(self.fd, self.path)
+ mock_flock.assert_called_once_with(self.fd, operation)
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testUnlockTakenException(self, mock_flock):
+ error = IOError('Test Error')
+ error.errno = file_utils.errno.EWOULDBLOCK
+ mock_flock.side_effect = error
+ try:
+ file_utils.Unlock(self.fd, self.path)
+ except IOError as e:
+ self.assertTrue(self.path in str(e))
+
+ @mock.patch('google_compute_engine.file_utils.fcntl.flock')
+ def testUnlockException(self, mock_flock):
+ error = IOError('Test Error')
+ mock_flock.side_effect = error
+ try:
+ file_utils.Unlock(self.fd, self.path)
+ except IOError as e:
+ self.assertTrue(self.path in str(e))
+ self.assertTrue('Test Error' in str(e))
+
+ @mock.patch('google_compute_engine.file_utils.Unlock')
+ @mock.patch('google_compute_engine.file_utils.Lock')
+ @mock.patch('google_compute_engine.file_utils.os')
+ def testLockFile(self, mock_os, mock_lock, mock_unlock):
+ mock_callable = mock.Mock()
+ mock_os.open.return_value = self.fd
+ mock_os.O_CREAT = 1
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_callable, 'callable')
+ mocks.attach_mock(mock_lock, 'lock')
+ mocks.attach_mock(mock_unlock, 'unlock')
+ mocks.attach_mock(mock_os.open, 'open')
+ mocks.attach_mock(mock_os.close, 'close')
+
+ with file_utils.LockFile(self.path, blocking=True):
+ mock_callable('test')
+
+ expected_calls = [
+ mock.call.open(self.path, 1),
+ mock.call.lock(self.fd, self.path, True),
+ mock.call.callable('test'),
+ mock.call.unlock(self.fd, self.path),
+ mock.call.close(1),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/tests/logger_test.py b/google_compute_engine/tests/logger_test.py
new file mode 100644
index 0000000..6fb7d4a
--- /dev/null
+++ b/google_compute_engine/tests/logger_test.py
@@ -0,0 +1,51 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for logger.py module."""
+
+from google_compute_engine import logger
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class LoggerTest(unittest.TestCase):
+
+ @mock.patch('google_compute_engine.logger.logging.handlers.SysLogHandler')
+ @mock.patch('google_compute_engine.logger.logging.StreamHandler')
+ def testLogger(self, mock_stream, mock_syslog):
+ mock_stream.return_value = mock_stream
+ mock_syslog.return_value = mock_syslog
+ name = 'test'
+
+ # Verify basic logger setup.
+ named_logger = logger.Logger(name=name, debug=True)
+ mock_stream.setLevel.assert_called_once_with(logger.logging.DEBUG)
+ self.assertEqual(named_logger.handlers, [mock_stream])
+
+ # Verify logger setup with a facility.
+ address = '/dev/log'
+ facility = 1
+ named_logger = logger.Logger(name=name, debug=True, facility=facility)
+ mock_syslog.assert_called_once_with(address=address, facility=facility)
+ mock_syslog.setLevel.assert_called_once_with(logger.logging.INFO)
+ self.assertEqual(named_logger.handlers, [mock_stream, mock_syslog])
+
+ # Verify the handlers are reset during repeated calls.
+ named_logger = logger.Logger(name=name, debug=False)
+ self.assertEqual(named_logger.handlers, [])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google_compute_engine/tests/metadata_watcher_test.py b/google_compute_engine/tests/metadata_watcher_test.py
new file mode 100644
index 0000000..4a7de5a
--- /dev/null
+++ b/google_compute_engine/tests/metadata_watcher_test.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittest for metadata_watcher.py module."""
+
+import os
+
+from google_compute_engine import metadata_watcher
+from google_compute_engine.test_compat import mock
+from google_compute_engine.test_compat import unittest
+
+
+class MetadataWatcherTest(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_logger = mock.Mock()
+ self.timeout = 60
+ self.url = 'http://metadata.google.internal/computeMetadata/v1'
+ self.params = {
+ 'alt': 'json',
+ 'last_etag': 0,
+ 'recursive': True,
+ 'timeout_sec': self.timeout,
+ 'wait_for_change': True,
+ }
+ self.mock_watcher = metadata_watcher.MetadataWatcher(
+ logger=self.mock_logger, timeout=self.timeout)
+
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.urlopen')
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request')
+ def testGetMetadataRequest(self, mock_request, mock_urlopen):
+ mock_request.return_value = mock_request
+ mock_response = mock.Mock()
+ mock_response.getcode.return_value = metadata_watcher.httpclient.OK
+ mock_urlopen.return_value = mock_response
+ request_url = '%s?' % self.url
+ headers = {'Metadata-Flavor': 'Google'}
+ timeout = self.timeout * 1.1
+
+ self.mock_watcher._GetMetadataRequest(self.url)
+ mock_request.assert_called_once_with(request_url, headers=headers)
+ mock_urlopen.assert_called_once_with(mock_request, timeout=timeout)
+
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.urlopen')
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request')
+ def testGetMetadataRequestArgs(self, mock_request, mock_urlopen):
+ mock_request.return_value = mock_request
+ mock_response = mock.Mock()
+ mock_response.getcode.return_value = metadata_watcher.httpclient.OK
+ mock_urlopen.return_value = mock_response
+ params = {'hello': 'world'}
+ request_url = '%s?hello=world' % self.url
+ headers = {'Metadata-Flavor': 'Google'}
+ timeout = self.timeout * 1.1
+
+ self.mock_watcher._GetMetadataRequest(self.url, params=params)
+ mock_request.assert_called_once_with(request_url, headers=headers)
+ mock_urlopen.assert_called_once_with(mock_request, timeout=timeout)
+
+ @mock.patch('google_compute_engine.metadata_watcher.time')
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.urlopen')
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request')
+ def testGetMetadataRequestRetry(self, mock_request, mock_urlopen, mock_time):
+ mocks = mock.Mock()
+ mocks.attach_mock(mock_request, 'request')
+ mocks.attach_mock(mock_urlopen, 'urlopen')
+ mocks.attach_mock(mock_time, 'time')
+ mock_request.return_value = mock_request
+ mock_unavailable = mock.Mock()
+ mock_unavailable.getcode.return_value = (
+ metadata_watcher.httpclient.SERVICE_UNAVAILABLE)
+ mock_success = mock.Mock()
+ mock_success.getcode.return_value = metadata_watcher.httpclient.OK
+
+ # Retry after a service unavailable error response.
+ mock_urlopen.side_effect = [
+ metadata_watcher.StatusException(mock_unavailable),
+ mock_success,
+ ]
+
+ self.mock_watcher._GetMetadataRequest(self.url)
+ request_url = '%s?' % self.url
+ headers = {'Metadata-Flavor': 'Google'}
+ timeout = self.timeout * 1.1
+ expected_calls = [
+ mock.call.request(request_url, headers=headers),
+ mock.call.urlopen(mock_request, timeout=timeout),
+ mock.call.time.sleep(mock.ANY),
+ mock.call.request(request_url, headers=headers),
+ mock.call.urlopen(mock_request, timeout=timeout),
+ ]
+ self.assertEqual(mocks.mock_calls, expected_calls)
+
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.urlopen')
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request')
+ def testGetMetadataRequestHttpException(self, mock_request, mock_urlopen):
+ mock_request.return_value = mock_request
+ mock_response = mock.Mock()
+ mock_response.getcode.return_value = metadata_watcher.httpclient.NOT_FOUND
+ mock_urlopen.side_effect = metadata_watcher.StatusException(mock_response),
+
+ with self.assertRaises(metadata_watcher.StatusException):
+ self.mock_watcher._GetMetadataRequest(self.url)
+ self.assertEqual(mock_request.call_count, 1)
+ self.assertEqual(mock_urlopen.call_count, 1)
+
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.urlopen')
+ @mock.patch('google_compute_engine.metadata_watcher.urlrequest.Request')
+ def testGetMetadataRequestException(self, mock_request, mock_urlopen):
+ mock_request.return_value = mock_request
+ mock_response = mock.Mock()
+ mock_response.getcode.return_value = metadata_watcher.httpclient.NOT_FOUND
+ mock_urlopen.side_effect = mock_response
+
+ with self.assertRaises(metadata_watcher.StatusException):
+ self.mock_watcher._GetMetadataRequest(self.url)
+ self.assertEqual(mock_request.call_count, 1)
+ self.assertEqual(mock_urlopen.call_count, 1)
+
+ def testUpdateEtag(self):
+ mock_response = mock.Mock()
+ mock_response.headers = {'etag': 1}
+ self.assertEqual(self.mock_watcher.etag, 0)
+
+ # Update the etag if the etag is set.
+ self.assertTrue(self.mock_watcher._UpdateEtag(mock_response))
+ self.assertEqual(self.mock_watcher.etag, 1)
+
+ # Do not update the etag if the etag is unchanged.
+ self.assertFalse(self.mock_watcher._UpdateEtag(mock_response))
+ self.assertEqual(self.mock_watcher.etag, 1)
+
+ # Do not update the etag if the etag is not set.
+ mock_response.headers = {}
+ self.assertFalse(self.mock_watcher._UpdateEtag(mock_response))
+ self.assertEqual(self.mock_watcher.etag, 1)
+
+ def testGetMetadataUpdate(self):
+ mock_response = mock.Mock()
+ mock_response.return_value = mock_response
+ mock_response.headers = {'etag': 1}
+ mock_response.read.return_value = bytes(b'{}')
+ self.mock_watcher._GetMetadataRequest = mock_response
+ request_url = os.path.join(self.url, '')
+
+ self.assertEqual(self.mock_watcher._GetMetadataUpdate(), {})
+ self.assertEqual(self.mock_watcher.etag, 1)
+ mock_response.assert_called_once_with(request_url, params=self.params)
+
+ def testGetMetadataUpdateArgs(self):
+ mock_response = mock.Mock()
+ mock_response.return_value = mock_response
+ mock_response.headers = {'etag': 0}
+ mock_response.read.return_value = bytes(b'{}')
+ self.mock_watcher._GetMetadataRequest = mock_response
+ metadata_key = 'instance/id'
+ self.params['recursive'] = False
+ self.params['wait_for_change'] = False
+ request_url = os.path.join(self.url, metadata_key)
+
+ self.mock_watcher._GetMetadataUpdate(
+ metadata_key=metadata_key, recursive=False, wait=False)
+ self.assertEqual(self.mock_watcher.etag, 0)
+ mock_response.assert_called_once_with(request_url, params=self.params)
+
+ def testGetMetadataUpdateWait(self):
+ self.params['last_etag'] = 1
+ self.mock_watcher.etag = 1
+ mock_unchanged = mock.Mock()
+ mock_unchanged.headers = {'etag': 1}
+ mock_unchanged.read.return_value = bytes(b'{}')
+ mock_changed = mock.Mock()
+ mock_changed.headers = {'etag': 2}
+ mock_changed.read.return_value = bytes(b'{}')
+ mock_response = mock.Mock()
+ mock_response.side_effect = [mock_unchanged, mock_unchanged, mock_changed]
+ self.mock_watcher._GetMetadataRequest = mock_response
+ request_url = os.path.join(self.url, '')
+
+ self.mock_watcher._GetMetadataUpdate()
+ self.assertEqual(self.mock_watcher.etag, 2)
+ expected_calls = [mock.call(request_url, params=self.params)] * 3
+ self.assertEqual(mock_response.mock_calls, expected_calls)
+
+ def testWatchMetadata(self):
+ mock_response = mock.Mock()
+ mock_response.return_value = {}
+ self.mock_watcher._GetMetadataUpdate = mock_response
+ mock_handler = mock.Mock()
+ mock_handler.side_effect = Exception()
+ self.mock_logger.error.side_effect = RuntimeError()
+ recursive = True
+
+ with self.assertRaises(RuntimeError):
+ self.mock_watcher.WatchMetadata(mock_handler, recursive=recursive)
+ mock_handler.assert_called_once_with({})
+ mock_response.assert_called_once_with(
+ metadata_key='', recursive=recursive, wait=True)
+
+ def testWatchMetadataException(self):
+ mock_response = mock.Mock()
+ mock_response.side_effect = metadata_watcher.socket.timeout()
+ self.mock_watcher._GetMetadataUpdate = mock_response
+ self.mock_logger.error.side_effect = RuntimeError()
+ metadata_key = 'instance/id'
+ recursive = False
+
+ with self.assertRaises(RuntimeError):
+ self.mock_watcher.WatchMetadata(
+ None, metadata_key=metadata_key, recursive=recursive)
+ mock_response.assert_called_once_with(
+ metadata_key=metadata_key, recursive=recursive, wait=True)
+
+ def testGetMetadata(self):
+ mock_response = mock.Mock()
+ mock_response.return_value = {}
+ self.mock_watcher._GetMetadataUpdate = mock_response
+
+ self.assertEqual(self.mock_watcher.GetMetadata(), {})
+ mock_response.assert_called_once_with(
+ metadata_key='', recursive=True, wait=False)
+ self.mock_watcher.logger.error.assert_not_called()
+
+ def testGetMetadataArgs(self):
+ mock_response = mock.Mock()
+ mock_response.return_value = {}
+ self.mock_watcher._GetMetadataUpdate = mock_response
+ metadata_key = 'instance/id'
+ recursive = False
+
+ response = self.mock_watcher.GetMetadata(
+ metadata_key=metadata_key, recursive=recursive)
+ self.assertEqual(response, {})
+ mock_response.assert_called_once_with(
+ metadata_key=metadata_key, recursive=False, wait=False)
+ self.mock_watcher.logger.error.assert_not_called()
+
+ def testGetMetadataException(self):
+ mock_response = mock.Mock()
+ mock_response.side_effect = metadata_watcher.socket.timeout()
+ mock_response.return_value = {}
+ self.mock_watcher._GetMetadataUpdate = mock_response
+
+ self.assertEqual(self.mock_watcher.GetMetadata(), None)
+ self.assertEqual(self.mock_watcher.logger.error.call_count, 1)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/google-startup-scripts/usr/share/google/set-hostname b/google_configs/bin/set_hostname
index 9b71e4d..f7d0b10 100755
--- a/google-startup-scripts/usr/share/google/set-hostname
+++ b/google_configs/bin/set_hostname
@@ -1,4 +1,5 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -33,7 +34,7 @@ fi
# unqualified domain name.
if [ -n "$new_host_name" ]; then
- hostname ${new_host_name%%.*}
+ hostname "${new_host_name%%.*}"
# Let syslogd know we've changed the hostname.
pkill -HUP syslogd
diff --git a/google_configs/build_packages.sh b/google_configs/build_packages.sh
new file mode 100755
index 0000000..94f7bba
--- /dev/null
+++ b/google_configs/build_packages.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+COMMON_FILES=(
+ 'rsyslog/90-google.conf=/etc/rsyslog.d/90-google.conf'
+ 'sysctl/11-gce-network-security.conf=/etc/sysctl.d/11-gce-network-security.conf'
+ 'udev/64-gce-disk-removal.rules=/etc/udev/rules.d/64-gce-disk-removal.rules'
+ 'udev/65-gce-disk-naming.rules=/etc/udev/rules.d/65-gce-disk-naming.rules')
+TIMESTAMP="$(date +%s)"
+
+function build_distro() {
+ declare -r distro="$1"
+ declare -r pkg_type="$2"
+ declare files=("$@")
+ declare name='google-config'
+
+ if [[ "${pkg_type}" == 'deb' ]]; then
+ name="${name}-${distro}"
+ fi
+
+ fpm \
+ -s dir \
+ -t "${pkg_type}" \
+ --description 'Google Compute Engine guest configs' \
+ --iteration "0.${TIMESTAMP}" \
+ --license 'Apache Software License' \
+ --maintainer 'gc-team@google.com' \
+ --name "${name}" \
+ --rpm-dist "${distro}" \
+ --url 'https://github.com/GoogleCloudPlatform/compute-image-packages' \
+ --vendor 'Google Compute Engine Team' \
+ --version '2.0.0' \
+ "${COMMON_FILES[@]}" \
+ "${files[@]:2}"
+}
+
+# RHEL/CentOS 6
+build_distro 'el6' 'rpm' \
+ 'bin/set_hostname=/etc/dhcp/dhclient-exit-hooks'
+
+# RHEL/CentOS 7
+build_distro 'el7' 'rpm' \
+ 'bin/set_hostname=/usr/bin/set_hostname' \
+ 'dhcp/google_hostname.sh=/etc/dhcp/dhclient.d/google_hostname.sh'
+
+# Debian 7
+build_distro 'wheezy' 'deb' \
+ 'bin/set_hostname=/etc/dhcp/dhclient-exit-hooks.d/set_hostname'
+
+# Debian 8
+build_distro 'jessie' 'deb' \
+ 'bin/set_hostname=/etc/dhcp/dhclient-exit-hooks.d/set_hostname'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py b/google_configs/dhcp/google_hostname.sh
index 42723d7..67231e0 100644..100755
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py
+++ b/google_configs/dhcp/google_hostname.sh
@@ -1,5 +1,5 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,4 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Unit tests module for Image Bundle."""
+google_hostname_config() {
+ set_hostname
+}
+google_hostname_restore() {
+ :
+}
diff --git a/google_configs/rsyslog/90-google.conf b/google_configs/rsyslog/90-google.conf
new file mode 100644
index 0000000..81b2ed7
--- /dev/null
+++ b/google_configs/rsyslog/90-google.conf
@@ -0,0 +1,6 @@
+# Google Compute Engine default console logging.
+#
+# daemon: logging from Google provided daemons.
+# kern: logging information in case of an unexpected crash during boot.
+#
+daemon,kern.* /dev/console
diff --git a/google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf b/google_configs/sysctl/11-gce-network-security.conf
index 0f70b99..0e4db8c 100644
--- a/google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf
+++ b/google_configs/sysctl/11-gce-network-security.conf
@@ -1,3 +1,17 @@
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
# Google-recommended kernel parameters
# Turn on SYN-flood protections. Starting with 2.6.26, there is no loss
diff --git a/google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules b/google_configs/udev/64-gce-disk-removal.rules
index 004f64a..4ff1f99 100644
--- a/google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules
+++ b/google_configs/udev/64-gce-disk-removal.rules
@@ -1,4 +1,4 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules b/google_configs/udev/65-gce-disk-naming.rules
index eef1e17..c686837 100644
--- a/google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules
+++ b/google_configs/udev/65-gce-disk-naming.rules
@@ -1,4 +1,4 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/legacy/README.md b/legacy/README.md
deleted file mode 100644
index 524aeb2..0000000
--- a/legacy/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-## Legacy packages
-
-gcimagebundle is deprecated and is provided here as is with no further
-support or maintenance. See [replacement
-instructions](https://cloud.google.com/compute/docs/creating-custom-image#export_an_image_to_google_cloud_storage).
diff --git a/legacy/gcimagebundle/LICENSE b/legacy/gcimagebundle/LICENSE
deleted file mode 100644
index 04cb0d7..0000000
--- a/legacy/gcimagebundle/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2013 Google Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/legacy/gcimagebundle/MANIFEST.in b/legacy/gcimagebundle/MANIFEST.in
deleted file mode 100644
index 6bbb29c..0000000
--- a/legacy/gcimagebundle/MANIFEST.in
+++ /dev/null
@@ -1,4 +0,0 @@
-include *.md
-include distribute_setup.py
-include LICENSE
-include VERSION
diff --git a/legacy/gcimagebundle/README b/legacy/gcimagebundle/README
deleted file mode 100644
index 13afc26..0000000
--- a/legacy/gcimagebundle/README
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Image bundling tool for root file system.
-
-Note: This tool is deprecated. Please see alternate instructions at
-https://cloud.google.com/compute/docs/creating-custom-image#export_an_image_to_google_cloud_storage
-
-To build a root filesystem tar
-$ sudo gcimagebundle -d /dev/sda -r / -o /tmp \
- --loglevel=DEBUG --log_file=/tmp/gcimagebundle.log
-
-This will output the image tar in the output directory
-specified with -o option.
-
-Note that this is copied out file by file into the default google image.
-
-To run unittest:
-$ sudo python setup.py test
diff --git a/legacy/gcimagebundle/README.md b/legacy/gcimagebundle/README.md
deleted file mode 100644
index e3ee03d..0000000
--- a/legacy/gcimagebundle/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-Image Bundle
-============
-
-_Image Bundle is deprecated. Please see alternate instructions for [Exporting an image to Google Cloud Storage](https://cloud.google.com/compute/docs/creating-custom-image#export_an_image_to_google_cloud_storage)._
-
-Image Bundle is a python package that allows users to create an image from the current state of the running virtual machine. Image Bundle creates the image with the recommended packaging format and also allows you to run unit tests to verify that image bundle works properly on your operating system. See [Custom Images](https://cloud.google.com/compute/docs/creating-custom-image) for more information.
-
-### Installation
-
- $ sudo python setup.py install
-
-### Usage
-
-To build a root filesystem tar:
-
- $ sudo gcimagebundle -d /dev/sda -r / -o /tmp \
- --loglevel=DEBUG --log_file=/tmp/image_bundle.log
-
-This will output the image tar in the output directory specified with -o option.
-
-For details on all the parameters use...
-
- $ sudo gcimagebundle --help
-
-### Unit Tests
-
-Image Bundle includes unit tests that should be run if you make any changes. These tests perform mount operations so root access is required.
-
- $ sudo python setup.py test
-
-### Packaging
-
-Since Image Bundle uses setuptools it can be packaged into a DEB or RPM.
-
-Install the required dependencies:
-
- # For Debian based distributions
- $ sudo apt-get install python-stdeb rpm
- # For Red-Hat based distributions
- $ sudo yum install rpmbuild
-
-DEB package:
-
- $ python setup.py --command-packages=stdeb.command bdist_deb
-
-RPM package:
-
- $ python setup.py bdist_rpm
diff --git a/legacy/gcimagebundle/VERSION b/legacy/gcimagebundle/VERSION
deleted file mode 100644
index 3a3cd8c..0000000
--- a/legacy/gcimagebundle/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.3.1
diff --git a/legacy/gcimagebundle/distribute_setup.py b/legacy/gcimagebundle/distribute_setup.py
deleted file mode 100644
index 3553b21..0000000
--- a/legacy/gcimagebundle/distribute_setup.py
+++ /dev/null
@@ -1,556 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
- from distribute_setup import use_setuptools
- use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
- from site import USER_SITE
-except ImportError:
- USER_SITE = None
-
-try:
- import subprocess
-
- def _python_cmd(*args):
- args = (sys.executable,) + args
- return subprocess.call(args) == 0
-
-except ImportError:
- # will be used for python 2.3
- def _python_cmd(*args):
- args = (sys.executable,) + args
- # quoting arguments if windows
- if sys.platform == 'win32':
- def quote(arg):
- if ' ' in arg:
- return '"%s"' % arg
- return arg
- args = [quote(arg) for arg in args]
- return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.49"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
- # extracting the tarball
- tmpdir = tempfile.mkdtemp()
- log.warn('Extracting in %s', tmpdir)
- old_wd = os.getcwd()
- try:
- os.chdir(tmpdir)
- tar = tarfile.open(tarball)
- _extractall(tar)
- tar.close()
-
- # going in the directory
- subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
- os.chdir(subdir)
- log.warn('Now working in %s', subdir)
-
- # installing
- log.warn('Installing Distribute')
- if not _python_cmd('setup.py', 'install', *install_args):
- log.warn('Something went wrong during the installation.')
- log.warn('See the error message above.')
- # exitcode will be 2
- return 2
- finally:
- os.chdir(old_wd)
- shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
- # extracting the tarball
- tmpdir = tempfile.mkdtemp()
- log.warn('Extracting in %s', tmpdir)
- old_wd = os.getcwd()
- try:
- os.chdir(tmpdir)
- tar = tarfile.open(tarball)
- _extractall(tar)
- tar.close()
-
- # going in the directory
- subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
- os.chdir(subdir)
- log.warn('Now working in %s', subdir)
-
- # building an egg
- log.warn('Building a Distribute egg in %s', to_dir)
- _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
- finally:
- os.chdir(old_wd)
- shutil.rmtree(tmpdir)
- # returning the result
- log.warn(egg)
- if not os.path.exists(egg):
- raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
- egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
- % (version, sys.version_info[0], sys.version_info[1]))
- if not os.path.exists(egg):
- tarball = download_setuptools(version, download_base,
- to_dir, download_delay)
- _build_egg(egg, tarball, to_dir)
- sys.path.insert(0, egg)
- import setuptools
- setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
- to_dir=os.curdir, download_delay=15, no_fake=True):
- # making sure we use the absolute path
- to_dir = os.path.abspath(to_dir)
- was_imported = 'pkg_resources' in sys.modules or \
- 'setuptools' in sys.modules
- try:
- try:
- import pkg_resources
-
- # Setuptools 0.7b and later is a suitable (and preferable)
- # substitute for any Distribute version.
- try:
- pkg_resources.require("setuptools>=0.7b")
- return
- except (pkg_resources.DistributionNotFound,
- pkg_resources.VersionConflict):
- pass
-
- if not hasattr(pkg_resources, '_distribute'):
- if not no_fake:
- _fake_setuptools()
- raise ImportError
- except ImportError:
- return _do_download(version, download_base, to_dir, download_delay)
- try:
- pkg_resources.require("distribute>=" + version)
- return
- except pkg_resources.VersionConflict:
- e = sys.exc_info()[1]
- if was_imported:
- sys.stderr.write(
- "The required version of distribute (>=%s) is not available,\n"
- "and can't be installed while this script is running. Please\n"
- "install a more recent version first, using\n"
- "'easy_install -U distribute'."
- "\n\n(Currently using %r)\n" % (version, e.args[0]))
- sys.exit(2)
- else:
- del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return _do_download(version, download_base, to_dir,
- download_delay)
- except pkg_resources.DistributionNotFound:
- return _do_download(version, download_base, to_dir,
- download_delay)
- finally:
- if not no_fake:
- _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
- to_dir=os.curdir, delay=15):
- """Download distribute from a specified location and return its filename
-
- `version` should be a valid distribute version number that is available
- as an egg for download under the `download_base` URL (which should end
- with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download
- attempt.
- """
- # making sure we use the absolute path
- to_dir = os.path.abspath(to_dir)
- try:
- from urllib.request import urlopen
- except ImportError:
- from urllib2 import urlopen
- tgz_name = "distribute-%s.tar.gz" % version
- url = download_base + tgz_name
- saveto = os.path.join(to_dir, tgz_name)
- src = dst = None
- if not os.path.exists(saveto): # Avoid repeated downloads
- try:
- log.warn("Downloading %s", url)
- src = urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = src.read()
- dst = open(saveto, "wb")
- dst.write(data)
- finally:
- if src:
- src.close()
- if dst:
- dst.close()
- return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
- def __no_sandbox(*args, **kw):
- try:
- from setuptools.sandbox import DirectorySandbox
- if not hasattr(DirectorySandbox, '_old'):
- def violation(*args):
- pass
- DirectorySandbox._old = DirectorySandbox._violation
- DirectorySandbox._violation = violation
- patched = True
- else:
- patched = False
- except ImportError:
- patched = False
-
- try:
- return function(*args, **kw)
- finally:
- if patched:
- DirectorySandbox._violation = DirectorySandbox._old
- del DirectorySandbox._old
-
- return __no_sandbox
-
-
-def _patch_file(path, content):
- """Will backup the file then patch it"""
- f = open(path)
- existing_content = f.read()
- f.close()
- if existing_content == content:
- # already patched
- log.warn('Already patched.')
- return False
- log.warn('Patching...')
- _rename_path(path)
- f = open(path, 'w')
- try:
- f.write(content)
- finally:
- f.close()
- return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
- f = open(path)
- existing_content = f.read()
- f.close()
- return existing_content == content
-
-
-def _rename_path(path):
- new_name = path + '.OLD.%s' % time.time()
- log.warn('Renaming %s to %s', path, new_name)
- os.rename(path, new_name)
- return new_name
-
-
-def _remove_flat_installation(placeholder):
- if not os.path.isdir(placeholder):
- log.warn('Unkown installation at %s', placeholder)
- return False
- found = False
- for file in os.listdir(placeholder):
- if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
- found = True
- break
- if not found:
- log.warn('Could not locate setuptools*.egg-info')
- return
-
- log.warn('Moving elements out of the way...')
- pkg_info = os.path.join(placeholder, file)
- if os.path.isdir(pkg_info):
- patched = _patch_egg_dir(pkg_info)
- else:
- patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
- if not patched:
- log.warn('%s already patched.', pkg_info)
- return False
- # now let's move the files out of the way
- for element in ('setuptools', 'pkg_resources.py', 'site.py'):
- element = os.path.join(placeholder, element)
- if os.path.exists(element):
- _rename_path(element)
- else:
- log.warn('Could not find the %s element of the '
- 'Setuptools distribution', element)
- return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
- log.warn('After install bootstrap.')
- placeholder = dist.get_command_obj('install').install_purelib
- _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
- if not placeholder or not os.path.exists(placeholder):
- log.warn('Could not find the install location')
- return
- pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
- setuptools_file = 'setuptools-%s-py%s.egg-info' % \
- (SETUPTOOLS_FAKED_VERSION, pyver)
- pkg_info = os.path.join(placeholder, setuptools_file)
- if os.path.exists(pkg_info):
- log.warn('%s already exists', pkg_info)
- return
-
- log.warn('Creating %s', pkg_info)
- try:
- f = open(pkg_info, 'w')
- except EnvironmentError:
- log.warn("Don't have permissions to write %s, skipping", pkg_info)
- return
- try:
- f.write(SETUPTOOLS_PKG_INFO)
- finally:
- f.close()
-
- pth_file = os.path.join(placeholder, 'setuptools.pth')
- log.warn('Creating %s', pth_file)
- f = open(pth_file, 'w')
- try:
- f.write(os.path.join(os.curdir, setuptools_file))
- finally:
- f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
- _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
- # let's check if it's already patched
- pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- if os.path.exists(pkg_info):
- if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
- log.warn('%s already patched.', pkg_info)
- return False
- _rename_path(path)
- os.mkdir(path)
- os.mkdir(os.path.join(path, 'EGG-INFO'))
- pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- f = open(pkg_info, 'w')
- try:
- f.write(SETUPTOOLS_PKG_INFO)
- finally:
- f.close()
- return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
- log.warn('Before install bootstrap.')
- _fake_setuptools()
-
-
-def _under_prefix(location):
- if 'install' not in sys.argv:
- return True
- args = sys.argv[sys.argv.index('install') + 1:]
- for index, arg in enumerate(args):
- for option in ('--root', '--prefix'):
- if arg.startswith('%s=' % option):
- top_dir = arg.split('root=')[-1]
- return location.startswith(top_dir)
- elif arg == option:
- if len(args) > index:
- top_dir = args[index + 1]
- return location.startswith(top_dir)
- if arg == '--user' and USER_SITE is not None:
- return location.startswith(USER_SITE)
- return True
-
-
-def _fake_setuptools():
- log.warn('Scanning installed packages')
- try:
- import pkg_resources
- except ImportError:
- # we're cool
- log.warn('Setuptools or Distribute does not seem to be installed.')
- return
- ws = pkg_resources.working_set
- try:
- setuptools_dist = ws.find(
- pkg_resources.Requirement.parse('setuptools', replacement=False)
- )
- except TypeError:
- # old distribute API
- setuptools_dist = ws.find(
- pkg_resources.Requirement.parse('setuptools')
- )
-
- if setuptools_dist is None:
- log.warn('No setuptools distribution found')
- return
- # detecting if it was already faked
- setuptools_location = setuptools_dist.location
- log.warn('Setuptools installation detected at %s', setuptools_location)
-
- # if --root or --preix was provided, and if
- # setuptools is not located in them, we don't patch it
- if not _under_prefix(setuptools_location):
- log.warn('Not patching, --root or --prefix is installing Distribute'
- ' in another location')
- return
-
- # let's see if its an egg
- if not setuptools_location.endswith('.egg'):
- log.warn('Non-egg installation')
- res = _remove_flat_installation(setuptools_location)
- if not res:
- return
- else:
- log.warn('Egg installation')
- pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
- if (os.path.exists(pkg_info) and
- _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
- log.warn('Already patched.')
- return
- log.warn('Patching...')
- # let's create a fake egg replacing setuptools one
- res = _patch_egg_dir(setuptools_location)
- if not res:
- return
- log.warn('Patching complete.')
- _relaunch()
-
-
-def _relaunch():
- log.warn('Relaunching...')
- # we have to relaunch the process
- # pip marker to avoid a relaunch bug
- _cmd1 = ['-c', 'install', '--single-version-externally-managed']
- _cmd2 = ['-c', 'install', '--record']
- if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
- sys.argv[0] = 'setup.py'
- args = [sys.executable] + sys.argv
- sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
- """Extract all members from the archive to the current working
- directory and set owner, modification time and permissions on
- directories afterwards. `path' specifies a different directory
- to extract to. `members' is optional and must be a subset of the
- list returned by getmembers().
- """
- import copy
- import operator
- from tarfile import ExtractError
- directories = []
-
- if members is None:
- members = self
-
- for tarinfo in members:
- if tarinfo.isdir():
- # Extract directories with a safe mode.
- directories.append(tarinfo)
- tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
- self.extract(tarinfo, path)
-
- # Reverse sort directories.
- if sys.version_info < (2, 4):
- def sorter(dir1, dir2):
- return cmp(dir1.name, dir2.name)
- directories.sort(sorter)
- directories.reverse()
- else:
- directories.sort(key=operator.attrgetter('name'), reverse=True)
-
- # Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- dirpath = os.path.join(path, tarinfo.name)
- try:
- self.chown(tarinfo, dirpath)
- self.utime(tarinfo, dirpath)
- self.chmod(tarinfo, dirpath)
- except ExtractError:
- e = sys.exc_info()[1]
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
- """
- Build the arguments to 'python setup.py install' on the distribute package
- """
- install_args = []
- if options.user_install:
- if sys.version_info < (2, 6):
- log.warn("--user requires Python 2.6 or later")
- raise SystemExit(1)
- install_args.append('--user')
- return install_args
-
-def _parse_args():
- """
- Parse the command line for options
- """
- parser = optparse.OptionParser()
- parser.add_option(
- '--user', dest='user_install', action='store_true', default=False,
- help='install in user site package (requires Python 2.6 or later)')
- parser.add_option(
- '--download-base', dest='download_base', metavar="URL",
- default=DEFAULT_URL,
- help='alternative URL from where to download the distribute package')
- options, args = parser.parse_args()
- # positional arguments are ignored
- return options
-
-def main(version=DEFAULT_VERSION):
- """Install or upgrade setuptools and EasyInstall"""
- options = _parse_args()
- tarball = download_setuptools(download_base=options.download_base)
- return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/legacy/gcimagebundle/gcimagebundlelib/block_disk.py b/legacy/gcimagebundle/gcimagebundlelib/block_disk.py
deleted file mode 100644
index a860b89..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/block_disk.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module to create raw disk images.
-
-Stores a copy of directories/files in a file mounted as a partitioned blocked
-device.
-"""
-
-
-
-import hashlib
-import logging
-import os
-import re
-import tempfile
-
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib import fs_copy
-from gcimagebundlelib import utils
-
-
-class RawDiskError(Exception):
- """Error occured during raw disk creation."""
-
-
-class InvalidRawDiskError(Exception):
- """Error when verification fails before copying."""
-
-
-class FsRawDisk(fs_copy.FsCopy):
- """Creates a raw disk copy of OS image and bundles it into gzipped tar."""
-
- def __init__(self, fs_size, fs_type):
- """Constructor for FsRawDisk class.
-
- Args:
- fs_size: Size of the raw disk.
- """
- super(FsRawDisk, self).__init__()
- self._fs_size = fs_size
- self._fs_type = fs_type
-
- def _ResizeFile(self, file_path, file_size):
- logging.debug('Resizing %s to %s', file_path, file_size)
- with open(file_path, 'a') as disk_file:
- disk_file.truncate(file_size)
-
- def _InitializeDiskFileFromDevice(self, file_path):
- """Initializes disk file from the device specified in self._disk.
-
- It preserves whatever may be there on the device prior to the start of the
- first partition.
-
- At the moment this method supports devices with a single partition only.
-
- Args:
- file_path: The path where the disk file should be created.
-
- Returns:
- A tuple with partition_start, uuid. partition_start is the location
- where the first partition on the disk starts and uuid is the filesystem
- UUID to use for the first partition.
-
- Raises:
- RawDiskError: If there are more than one partition on the disk device.
- """
- # Find the disk size
- disk_size = utils.GetDiskSize(self._disk)
- logging.debug('Size of disk is %s', disk_size)
- # Make the disk file big enough to hold the disk
- self._ResizeFile(file_path, disk_size)
- # Find the location where the first partition starts
- partition_start = utils.GetPartitionStart(self._disk, 1)
- logging.debug('First partition starts at %s', partition_start)
- # Copy all the bytes as is from the start of the disk to the start of
- # first partition
- utils.CopyBytes(self._disk, file_path, partition_start)
- # Verify there is only 1 partition on the disk
- with utils.LoadDiskImage(file_path) as devices:
- # For now we only support disks with a single partition.
- if len(devices) == 0:
- raise RawDiskError(
- 'Device %s should be a disk not a partition.' % self._disk)
- elif len(devices) != 1:
- raise RawDiskError(
- 'Device %s has more than 1 partition. Only devices '
- 'with a single partition are supported.' % self._disk)
- # Remove the first partition from the file we are creating. We will
- # recreate a partition that will fit inside _fs_size later.
- utils.RemovePartition(file_path, 1)
- # Resize the disk.raw file down to self._fs_size
- # We do this after removing the first partition to ensure that an
- # existing partition doesn't fall outside the boundary of the disk device.
- self._ResizeFile(file_path, self._fs_size)
- # Get UUID of the first partition on the disk
- # TODO(user): This is very hacky and relies on the disk path being
- # similar to /dev/sda etc which is bad. Need to fix it.
- uuid = utils.GetUUID(self._disk + '1')
- return partition_start, uuid
-
- def Bundleup(self):
- """Creates a raw disk copy of OS image and bundles it into gzipped tar.
-
- Returns:
- A size of a generated raw disk and the SHA1 digest of the the tar archive.
-
- Raises:
- RawDiskError: If number of partitions in a created image doesn't match
- expected count.
- """
-
- # Create sparse file with specified size
- disk_file_path = os.path.join(self._scratch_dir, 'disk.raw')
- with open(disk_file_path, 'wb') as _:
- pass
- self._excludes.append(exclude_spec.ExcludeSpec(disk_file_path))
-
- logging.info('Initializing disk file')
- partition_start = None
- uuid = None
- if self._disk:
- # If a disk device has been provided then preserve whatever is there on
- # the disk before the first partition in case there is an MBR present.
- partition_start, uuid = self._InitializeDiskFileFromDevice(disk_file_path)
- else:
- # User didn't specify a disk device. Initialize a device with a simple
- # partition table.
- self._ResizeFile(disk_file_path, self._fs_size)
- # User didn't specify a disk to copy. Create a new partition table
- utils.MakePartitionTable(disk_file_path)
- # Pass 1MB as start to avoid 'Warning: The resulting partition is not
- # properly aligned for best performance.' from parted.
- partition_start = 1024 * 1024
-
- # Create a new partition starting at partition_start of size
- # self._fs_size - partition_start
- utils.MakePartition(disk_file_path, 'primary', 'ext2', partition_start,
- self._fs_size - partition_start)
- with utils.LoadDiskImage(disk_file_path) as devices:
- # For now we only support disks with a single partition.
- if len(devices) != 1:
- raise RawDiskError(devices)
- # List contents of /dev/mapper to help with debugging. Contents will
- # be listed in debug log only
- utils.RunCommand(['ls', '/dev/mapper'])
- logging.info('Making filesystem')
- uuid = utils.MakeFileSystem(devices[0], self._fs_type, uuid)
- with utils.LoadDiskImage(disk_file_path) as devices:
- if uuid is None:
- raise Exception('Could not get uuid from MakeFileSystem')
- mount_point = tempfile.mkdtemp(dir=self._scratch_dir)
- with utils.MountFileSystem(devices[0], mount_point, self._fs_type):
- logging.info('Copying contents')
- self._CopySourceFiles(mount_point)
- self._CopyPlatformSpecialFiles(mount_point)
- self._ProcessOverwriteList(mount_point)
- self._CleanupNetwork(mount_point)
- self._UpdateFstab(mount_point, uuid)
-
- tar_entries = []
-
- manifest_file_path = os.path.join(self._scratch_dir, 'manifest.json')
- manifest_created = self._manifest.CreateIfNeeded(manifest_file_path)
- if manifest_created:
- tar_entries.append(manifest_file_path)
-
- tar_entries.append(disk_file_path)
- logging.info('Creating tar.gz archive')
- utils.TarAndGzipFile(tar_entries,
- self._output_tarfile)
- for tar_entry in tar_entries:
- os.remove(tar_entry)
-
- # TODO(user): It would be better to compute tar.gz file hash during
- # archiving.
- h = hashlib.sha1()
- with open(self._output_tarfile, 'rb') as tar_file:
- for chunk in iter(lambda: tar_file.read(8192), ''):
- h.update(chunk)
- return (self._fs_size, h.hexdigest())
-
- def _CopySourceFiles(self, mount_point):
- """Copies all source files/directories to a mounted raw disk.
-
- There are several cases which must be handled separately:
- 1. src=dir1 and dest is empty. In this case we simply copy the content of
- dir1 to mount_point.
- 2. src=dir1 and dest=dir2. In this case dir1 is copied to mount_point
- under a new name dir2, so its content would be copied under
- mount_point/dir2.
- 3. src=file1/dir1 and dest=file2/dir2 and is_recursive=False. file1/dir1
- is copied to mount_point/file2 or mount_point/dir2.
-
- Args:
- mount_point: A path to a mounted raw disk.
- """
- for (src, dest, is_recursive) in self._srcs:
- # Generate a list of files/directories excluded from copying to raw disk.
- # rsync expects them to be relative to src directory so we need to
- # regenerate this list for every src separately.
- with tempfile.NamedTemporaryFile(dir=self._scratch_dir) as rsync_file:
- for spec in self._excludes:
- rsync_file.write(spec.GetRsyncSpec(src))
-
- # make sure that rsync utility sees all the content of rsync_file which
- # otherwise can be buffered.
- rsync_file.flush()
- if is_recursive:
- # if a directory ends with / rsync copies the content of a
- # directory, otherwise it also copies the directory itself.
- src = src.rstrip('/')
- if not dest:
- src += '/'
- utils.Rsync(src, mount_point, rsync_file.name,
- self._ignore_hard_links, recursive=True, xattrs=True)
- if dest:
- os.rename(os.path.join(mount_point, os.path.basename(src)),
- os.path.join(mount_point, dest))
- else:
- utils.Rsync(src, os.path.join(mount_point, dest), rsync_file.name,
- self._ignore_hard_links, recursive=False, xattrs=True)
-
- def _CopyPlatformSpecialFiles(self, mount_point):
- """Copies platform special files to a mounted raw disk.
-
- Args:
- mount_point: A path to a mounted raw disk.
- """
- if self._platform:
- special_files = self._platform.GetPlatformSpecialFiles(self._scratch_dir)
- for (src, dest) in special_files:
- # Ensure we don't use extended attributes here, so that copying /selinux
- # on Linux doesn't try and fail to preserve the SELinux context. That
- # doesn't work and causes rsync to return a nonzero status code.
- utils.Rsync(src, os.path.join(mount_point, dest), None,
- self._ignore_hard_links, recursive=False, xattrs=False)
-
- def _ProcessOverwriteList(self, mount_point):
- """Overwrites a set of files/directories requested by platform.
-
- Args:
- mount_point: A path to a mounted raw disk.
- """
- for file_name in self._overwrite_list:
- file_path = os.path.join(mount_point, file_name)
- if os.path.exists(file_path):
- if os.path.isdir(file_path):
- # TODO(user): platform.Overwrite is expected to overwrite the
- # directory in place from what I can tell. In case of a file it will
- # create a new file which must be copied to mounted raw disk. So there
- # some inconsistency which would need to be addresses if and when we
- # encounter a platform which would want to overwrite a directory.
- self._platform.Overwrite(file_path, file_name, self._scratch_dir)
- logging.info('rawdisk: modifying directory %s', file_path)
- else:
- new_file = self._platform.Overwrite(file_path, file_name,
- self._scratch_dir)
- logging.info('rawdisk: modifying %s from %s', file_path, new_file)
- utils.Rsync(new_file, file_path, None, self._ignore_hard_links,
- recursive=False, xattrs=True)
-
-
- def _CleanupNetwork(self, mount_point):
- """Remove any record of our current MAC address."""
- net_rules_path = os.path.join(
- mount_point,
- 'lib/udev/rules.d/75-persistent-net-generator.rules')
- if os.path.exists(net_rules_path):
- os.remove(net_rules_path)
-
- def _UpdateFstab(self, mount_point, uuid):
- """Update /etc/fstab with the new root fs UUID."""
- fstab_path = os.path.join(mount_point, 'etc/fstab')
- if not os.path.exists(fstab_path):
- logging.warning('etc/fstab does not exist. Not updating fstab uuid')
- return
-
- f = open(fstab_path, 'r')
- lines = f.readlines()
- f.close()
-
- def UpdateUUID(line):
- """Replace the UUID on the entry for /."""
- g = re.match(r'UUID=\S+\s+/\s+(.*)', line)
- if not g:
- return line
- return 'UUID=%s / %s\n' % (uuid, g.group(1))
-
- logging.debug('Original /etc/fstab contents:\n%s', lines)
- updated_lines = map(UpdateUUID, lines)
- if lines == updated_lines:
- logging.debug('No changes required to /etc/fstab')
- return
- logging.debug('Updated /etc/fstab contents:\n%s', updated_lines)
- f = open(fstab_path, 'w')
- f.write(''.join(updated_lines))
- f.close()
-
-
-class RootFsRaw(FsRawDisk):
- """Block disk copy of the root file system.
-
- Takes care of additional checks for a root file system.
- """
-
- def __init__(
- self, fs_size, fs_type, skip_disk_space_check, statvfs = os.statvfs):
- # statvfs parameter is for unit test to mock out os.statvfs call.
- super(RootFsRaw, self).__init__(fs_size, fs_type)
- self._skip_disk_space_check = skip_disk_space_check
- self._statvfs = statvfs
-
- def _Verify(self):
- super(RootFsRaw, self)._Verify()
- # exactly one file system to bundle up
- if len(self._srcs) != 1:
- raise InvalidRawDiskError('Root filesystems must have exactly one src.')
- # check that destination field is empty.
- if self._srcs[0][1]:
- raise InvalidRawDiskError('Root filesystems must be copied as /')
- if (not self._skip_disk_space_check and
- self._srcs[0][0] == '/'):
- self._VerifyDiskSpace()
-
- def _VerifyDiskSpace(self):
- """Verify that there is enough free disk space to generate the image file"""
- # We use a very quick and simplistic check,
- # DiskSpaceNeeded = disk.raw + image.tar.gz + LogFile
- # disk.raw = PartitionTable + AllFilesCopied
- # AllFilesCopied = RootDiskSize - RootDiskFreeSize - ExcludedFiles
- # We ignore LogFile, PartitionTable, and ExcludedFiles.
- # Some empirical experience showed that the compression ratio of the
- # tar.gz file is about 1/3. To be conservative, we assume image.tar.gz is
- # about 40% of disk.raw file.
- # As a result, DiskSpaceNeeded=1.4*(RootDiskSize - RootDiskFreeSize)
- # TODO(user): Make this check more accurate because ignoring ExcludedFiles
- # can result in significant overestimation of disk
- # space needed if the user has large disk space used in /tmp, for example.
- root_fs = self._statvfs(self._srcs[0][0])
- disk_space_needed = long(1.4 * root_fs.f_bsize * (root_fs.f_blocks -
- root_fs.f_bfree))
- logging.info(("Root disk on %s: f_bsize=%d f_blocks=%d f_bfree=%d. "
- "Estimated space needed is %d (may be overestimated)."),
- self._srcs[0][0],
- root_fs.f_bsize,
- root_fs.f_blocks,
- root_fs.f_bfree,
- disk_space_needed)
-
- # self._scratch_dir is where we will put the disk.raw and *.tar.gz file.
- scratch_fs = self._statvfs(self._scratch_dir)
- free_space = scratch_fs.f_bsize * scratch_fs.f_bfree
- logging.info("Free disk space for %s is %d bytes.",
- self._scratch_dir,
- free_space)
-
- if disk_space_needed > free_space:
- errorMessage = ("The operation may require up to %d bytes of disk space. "
- "However, the free disk space for %s is %d bytes. Please consider "
- "freeing more disk space. Note that the disk space required may "
- "be overestimated because it does not exclude temporary files that "
- "will not be copied. You may use --skip_disk_space_check to disable "
- "this check.") % (disk_space_needed, self._scratch_dir, free_space)
- raise InvalidRawDiskError(errorMessage)
- if disk_space_needed > self._fs_size:
- errorMessage = ("The root disk files to be copied may require up to %d "
- "bytes. However, the limit on the image disk file is %d bytes. "
- "Please consider deleting unused files from root disk, "
- "or increasing the image disk file limit with --fssize option. "
- "Note that the disk space required may "
- "be overestimated because it does not exclude temporary files that "
- "will not be copied. You may use --skip_disk_space_check to disable "
- "this check.") % (disk_space_needed, self._fs_size)
- raise InvalidRawDiskError(errorMessage)
-
-
-
diff --git a/legacy/gcimagebundle/gcimagebundlelib/centos.py b/legacy/gcimagebundle/gcimagebundlelib/centos.py
deleted file mode 100644
index 1a082de..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/centos.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Centos specific platform info."""
-
-
-
-import os
-import platform
-import re
-
-from gcimagebundlelib import linux
-
-
-class Centos(linux.LinuxPlatform):
- """Centos specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/redhat-release'
- if os.path.exists(release_file):
- (_, _, flavor, _) = Centos.ParseRedhatRelease(release_file)
- if flavor and flavor.lower() == 'centos':
- return True
- return False
-
- @staticmethod
- def ParseRedhatRelease(release_file='/etc/redhat-release'):
- """Parses the /etc/redhat-release file."""
- f = open(release_file)
- lines = f.readlines()
- f.close()
- if not lines:
- return (None, None, None, None)
- line0 = lines[0]
- # Matches both CentOS 6 and CentOS 7 formats.
- # CentOS 6: CentOS release 6.5 (Final)
- # CentOS 7: CentOS Linux release 7.0.1406 (Core)
- g = re.match(r'(\S+)( Linux)? release (\d+(\.\d+)+) \(([^)]*)\)', line0)
- if not g:
- return (None, None, None, None)
- (osname, version, label) = (g.group(1), g.group(3), g.group(5))
- return (osname, label, osname, version)
-
- def __init__(self):
- super(Centos, self).__init__()
- (self.distribution_codename, _, self.distribution,
- self.distribution_version) = Centos.ParseRedhatRelease()
-
- def GetPreferredFilesystemType(self):
- (_,version,_) = platform.linux_distribution()
- if version.startswith('7'):
- return 'xfs'
- return 'ext4'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/debian.py b/legacy/gcimagebundle/gcimagebundlelib/debian.py
deleted file mode 100644
index 957e3a7..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/debian.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Debian Linux specific platform info."""
-
-
-
-import platform
-
-from gcimagebundlelib import linux
-
-
-class Debian(linux.LinuxPlatform):
- """Debian Linux specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- (distribution, _, _) = platform.linux_distribution()
- if distribution and distribution.lower() == 'debian':
- return True
- return False
-
- def __init__(self):
- super(Debian, self).__init__()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py b/legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py
deleted file mode 100644
index b5bc237..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Exclude file specification."""
-
-
-
-import logging
-import os
-
-
-class ExcludeSpec(object):
- """Specifies how exclusion of a path should be handled."""
-
- def __init__(self, path, preserve_file=False, preserve_dir=False,
- preserve_subdir=False):
- self.path = path
- self.preserve_dir = preserve_dir
- self.preserve_file = False
- self.preserve_subdir = False
- # Preserve files and subdirs only if dir is preserved.
- if preserve_file and preserve_dir:
- self.preserve_file = True
- if preserve_subdir and preserve_dir:
- self.preserve_subdir = True
-
- def ShouldExclude(self, filename):
- prefix = os.path.commonprefix([filename, self.path])
- if prefix == self.path:
- if ((self.preserve_dir and filename == self.path) or
- (self.preserve_subdir and os.path.isdir(filename)) or
- (self.preserve_file and os.path.isfile(filename))):
- logging.warning('preserving %s', filename)
- return False
- return True
- return False
-
- def GetSpec(self):
- return '(%s, %d:%d:%d)' % (self.path, self.preserve_file, self.preserve_dir,
- self.preserve_subdir)
-
- def GetRsyncSpec(self, src):
- """Returns exclude spec in a format required by rsync.
-
- Args:
- src: source directory path passed to rsync. rsync expects exclude-spec to
- be relative to src directory.
-
- Returns:
- A string of exclude filters in rsync exclude-from file format.
- """
- spec = ''
- prefix = os.path.commonprefix([src, self.path])
- if prefix == src:
- relative_path = os.path.join('/', self.path[len(prefix):])
- if self.preserve_dir:
- spec += '+ %s\n' % relative_path
- if self.preserve_file or self.preserve_subdir:
- if os.path.isdir(self.path):
- for f in os.listdir(self.path):
- file_path = os.path.join(self.path, f)
- relative_file_path = os.path.join(relative_path, f)
- if self.preserve_file and os.path.isfile(file_path):
- spec += '+ %s\n' % relative_file_path
- if self.preserve_subdir and os.path.isdir(file_path):
- spec += '+ %s\n' % relative_file_path
- else:
- spec += '- %s\n' % relative_path
- spec += '- %s\n' % os.path.join(relative_path, '**')
- return spec
diff --git a/legacy/gcimagebundle/gcimagebundlelib/fedora.py b/legacy/gcimagebundle/gcimagebundlelib/fedora.py
deleted file mode 100644
index 21d098b..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/fedora.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Fedora specific platform info."""
-
-
-
-import os
-import re
-
-from gcimagebundlelib import linux
-
-
-class Fedora(linux.LinuxPlatform):
- """Fedora specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/redhat-release'
- if os.path.exists(release_file):
- (_, _, flavor, _) = Fedora.ParseRedhatRelease(release_file)
- if flavor and flavor.lower() == 'fedora':
- return True
- return False
-
- @staticmethod
- def ParseRedhatRelease(release_file='/etc/redhat-release'):
- """Parses the /etc/redhat-release file."""
- f = open(release_file)
- lines = f.readlines()
- f.close()
- if not lines:
- return (None, None, None, None)
- line0 = lines[0]
- g = re.match(r'(\S+) release (\d+) \(([^)]*)\)', line0)
- if not g:
- return (None, None, None, None)
- (osname, version, label) = (g.group(1), g.group(2), g.group(3))
- return (osname, label, osname, version)
-
- def __init__(self):
- super(Fedora, self).__init__()
- (self.distribution_codename, _, self.distribution,
- self.distribution_version) = Fedora.ParseRedhatRelease()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/fs_copy.py b/legacy/gcimagebundle/gcimagebundlelib/fs_copy.py
deleted file mode 100644
index e9adc91..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/fs_copy.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Creates a copy of specified directories\files."""
-
-
-
-import logging
-import os
-import re
-
-from gcimagebundlelib import manifest
-from gcimagebundlelib import utils
-
-
-class FsCopyError(Exception):
- """Error occured in fs copy operation."""
-
-
-class InvalidFsCopyError(Exception):
- """Error when verification fails before fs copying."""
-
-
-class FsCopy(object):
- """Specifies which files/directories must be copied."""
-
- def __init__(self):
- # Populate the required parameters with None so we can verify.
- self._output_tarfile = None
- self._srcs = []
- self._excludes = []
- self._key = None
- self._recursive = True
- self._fs_size = 0
- self._ignore_hard_links = False
- self._platform = None
- self._overwrite_list = []
- self._scratch_dir = '/tmp'
- self._disk = None
- self._manifest = manifest.ImageManifest(is_gce_instance=utils.IsRunningOnGCE())
-
- def SetTarfile(self, tar_file):
- """Sets tar file which will contain file system copy.
-
- Args:
- tar_file: path to a tar file.
- """
- self._output_tarfile = tar_file
-
- def AddDisk(self, disk):
- """Adds the disk which should be bundled.
-
- Args:
- disk: The block disk that needs to be bundled.
- """
- self._disk = disk
-
- def AddSource(self, src, arcname='', recursive=True):
- """Adds a source to be copied to the tar file.
-
- Args:
- src: path to directory/file to be copied.
- arcname: name of src in the tar archive. If arcname is empty, then instead
- of copying src itself only its content is copied.
- recursive: specifies if src directory should be copied recursively.
-
- Raises:
- ValueError: If src path doesn't exist.
- """
- if not os.path.exists(src):
- raise ValueError('invalid path')
- # Note that there is a fundamental asymmetry here as
- # abspath('/') => '/' while abspath('/usr/') => '/usr'.
- # This creates some subtleties elsewhere in the code.
- self._srcs.append((os.path.abspath(src), arcname, recursive))
-
- def AppendExcludes(self, excludes):
- """Adds a file/directory to be excluded from file copy.
-
- Args:
- excludes: A list of ExcludeSpec objects.
- """
- self._excludes.extend(excludes)
-
- def SetKey(self, key):
- """Sets a key to use to sign the archive digest.
-
- Args:
- key: key to use to sign the archive digest.
- """
- # The key is ignored for now.
- # TODO(user): sign the digest with the key
- self._key = key
-
- def SetPlatform(self, platform):
- """Sets the OS platform which is used to create an image.
-
- Args:
- platform: OS platform specific settings.
- """
- self._platform = platform
- logging.warning('overwrite list = %s',
- ' '.join(platform.GetOverwriteList()))
- self._overwrite_list = [re.sub('^/', '', x)
- for x in platform.GetOverwriteList()]
-
- def _SetManifest(self, image_manifest):
- """For test only, allows to set a test manifest object."""
- self._manifest = image_manifest
-
- def SetScratchDirectory(self, directory):
- """Sets a directory used for storing intermediate results.
-
- Args:
- directory: scratch directory path.
- """
- self._scratch_dir = directory
-
- def IgnoreHardLinks(self):
- """Requests that hard links should not be copied as hard links."""
-
- # TODO(user): I don't see a reason for this option to exist. Currently
- # there is a difference in how this option is interpreted between FsTarball
- # and FsRawDisk. FsTarball only copies one hard link to an inode and ignores
- # the rest of them. FsRawDisk copies the content of a file that hard link is
- # pointing to instead of recreating a hard link. Either option seems useless
- # for creating a copy of a file system.
- self._ignore_hard_links = True
-
- def Verify(self):
- """Verify if we have all the components to build a tar."""
- self._Verify()
-
- def Bundleup(self):
- """Creates the tar image based on set parameters.
-
- Returns:
- the SHA1 digest of the the tar archive.
- """
- return (0, None)
-
- def _Verify(self):
- """Verifies the tar attributes. Raises InvalidTarballError.
-
- Raises:
- InvalidFsCopyError: If not all required parameters are set.
- FsCopyError: If source file does not exist.
- """
- if not self._output_tarfile or not self._srcs or not self._key:
- raise InvalidFsCopyError('Incomplete copy spec')
- for (src, _, _) in self._srcs:
- if not os.path.exists(src):
- raise FsCopyError('%s does not exists' % src)
-
- def _ShouldExclude(self, filename):
- """"Checks if a file/directory are excluded from a copy.
-
- Args:
- filename: a file/directory path.
-
- Returns:
- True if a file/directory shouldn't be copied, False otherwise.
- """
- for spec in self._excludes:
- if spec.ShouldExclude(filename):
- logging.info('tarfile: Excluded %s', filename)
- return True
- return False
diff --git a/legacy/gcimagebundle/gcimagebundlelib/gcel.py b/legacy/gcimagebundle/gcimagebundlelib/gcel.py
deleted file mode 100644
index 2622cf7..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/gcel.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""GCE Linux specific platform info."""
-
-
-
-import csv
-import os
-
-from gcimagebundlelib import linux
-
-
-class Gcel(linux.LinuxPlatform):
- """GCE Linux specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/lsb-release'
- if os.path.exists(release_file):
- (flavor, _, _, _) = Gcel.ParseLsbRelease(release_file)
- if flavor and flavor.lower() == 'gcel':
- return True
- return False
-
- @staticmethod
- def ParseLsbRelease(release_file='/etc/lsb-release'):
- """Parses the /etc/lsb-releases file.
-
- Returns:
- A 4-tuple containing id, release, codename, and description
- """
- release_info = {}
- for line in csv.reader(open(release_file), delimiter='='):
- if len(line) > 1:
- release_info[line[0]] = line[1]
- return (release_info.get('DISTRIB_ID', None),
- release_info.get('DISTRIB_RELEASE', None),
- release_info.get('DISTRIB_CODENAME', None),
- release_info.get('DISTRIB_DESCRIPTION', None))
-
- def __init__(self):
- super(Gcel, self).__init__()
- (self.distribution, self.distribution_version,
- self.distribution_codename, _) = Gcel.ParseLsbRelease()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/imagebundle.py b/legacy/gcimagebundle/gcimagebundlelib/imagebundle.py
deleted file mode 100755
index f275c3c..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/imagebundle.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Tool to bundle root filesystem to a tarball.
-
-Creates a tar bundle and a Manifest, which can be uploaded to image store.
-"""
-
-
-
-import logging
-from optparse import OptionParser
-import os
-import shutil
-import subprocess
-import tempfile
-import time
-
-from gcimagebundlelib import block_disk
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib import platform_factory
-from gcimagebundlelib import utils
-
-def SetupArgsParser():
- """Sets up the command line flags."""
- parser = OptionParser()
- parser.add_option('-d', '--disk', dest='disk',
- default='/dev/sda',
- help='Disk to bundle.')
- parser.add_option('-r', '--root', dest='root_directory',
- default='/', metavar='ROOT',
- help='Root of the file system to bundle.'
- ' Recursively bundles all sub directories.')
- parser.add_option('-e', '--excludes', dest='excludes',
- help='Comma separated list of sub directories to exclude.'
- ' The defaults are platform specific.')
- parser.add_option('-o', '--output_directory', dest='output_directory',
- default='/tmp/', metavar='DIR',
- help='Output directory for image.')
- parser.add_option('--output_file_name', dest='output_file_name',
- default=None, metavar='FILENAME',
- help=('Output filename for the image. Default is a digest'
- ' of the image bytes.'))
- parser.add_option('--include_mounts', dest='include_mounts',
- help='Don\'t ignore mounted filesystems under ROOT.',
- action='store_true', default=False)
- parser.add_option('-v', '--version',
- action='store_true', dest='display_version', default=False,
- help='Print the tool version.')
- parser.add_option('--loglevel', dest='log_level',
- help='Debug logging level.', default='INFO',
- choices=['DEBUG', 'INFO', 'WARNING', 'ERROR' 'CRITICAL'])
- parser.add_option('--log_file', dest='log_file',
- help='Output file for log messages.')
- parser.add_option('-k', '--key', dest='key', default='nebula',
- help='Public key used for signing the image.')
- parser.add_option('--nocleanup', dest='cleanup',
- action='store_false', default=True,
- help=' Do not clean up temporary and log files.')
- #TODO(user): Get dehumanize.
- parser.add_option('--fssize', dest='fs_size', default=10*1024*1024*1024,
- type='int', help='File system size in bytes')
- parser.add_option('-b', '--bucket', dest='bucket',
- help='Destination storage bucket')
- parser.add_option('-f', '--filesystem', dest='file_system',
- default=None,
- help='File system type for the image.')
- parser.add_option('--skip_disk_space_check', dest='skip_disk_space_check',
- default=False, action='store_true',
- help='Skip the disk space requirement check.')
-
- return parser
-
-
-def VerifyArgs(parser, options):
- """Verifies that commandline flags are consistent."""
- if not options.output_directory:
- parser.error('output bundle directory must be specified.')
- if not os.path.exists(options.output_directory):
- parser.error('output bundle directory does not exist.')
-
- # TODO(user): add more verification as needed
-
-def EnsureSuperUser():
- """Ensures that current user has super user privileges."""
- if os.getuid() != 0:
- logging.warning('Tool must be run as root.')
- exit(-1)
-
-
-def GetLogLevel(options):
- """Log Level string to logging.LogLevel mapping."""
- level = {
- 'DEBUG': logging.DEBUG,
- 'INFO': logging.INFO,
- 'WARNING': logging.WARNING,
- 'ERROR': logging.ERROR,
- 'CRITICAL': logging.CRITICAL
- }
- if options.log_level in level:
- return level[options.log_level]
- print 'Invalid logging level. defaulting to INFO.'
- return logging.INFO
-
-
-def SetupLogging(options, log_dir='/tmp'):
- """Set up logging.
-
- All messages above INFO level are also logged to console.
-
- Args:
- options: collection of command line options.
- log_dir: directory used to generate log files.
- """
- if options.log_file:
- logfile = options.log_file
- else:
- logfile = tempfile.mktemp(dir=log_dir, prefix='bundle_log_')
- print 'Starting logging in %s' % logfile
- logging.basicConfig(filename=logfile,
- level=GetLogLevel(options),
- format='%(asctime)s %(levelname)s:%(name)s:%(message)s')
- # Use GMT timestamp in logging.
- logging.Formatter.converter=time.gmtime
- console = logging.StreamHandler()
- console.setLevel(GetLogLevel(options))
- logging.getLogger().addHandler(console)
-
-
-def PrintVersionInfo():
- #TODO: Should read from the VERSION file instead.
- print 'version 1.3.1'
-
-
-def GetTargetFilesystem(options, guest_platform):
- if options.file_system:
- return options.file_system
- else:
- return guest_platform.GetPreferredFilesystemType()
-
-
-def main():
- parser = SetupArgsParser()
- (options, _) = parser.parse_args()
- if options.display_version:
- PrintVersionInfo()
- return 0
- EnsureSuperUser()
- VerifyArgs(parser, options)
-
- scratch_dir = tempfile.mkdtemp(dir=options.output_directory)
- SetupLogging(options, scratch_dir)
- logging.warn('============================================================\n'
- 'Warning: gcimagebundle is deprecated. See\n'
- 'https://cloud.google.com/compute/docs/creating-custom-image'
- '#export_an_image_to_google_cloud_storage\n'
- 'for updated instructions.\n'
- '============================================================')
- try:
- guest_platform = platform_factory.PlatformFactory(
- options.root_directory).GetPlatform()
- except platform_factory.UnknownPlatformException:
- logging.critical('Platform is not supported.'
- ' Platform rules can be added to platform_factory.py.')
- return -1
-
- temp_file_name = tempfile.mktemp(dir=scratch_dir, suffix='.tar.gz')
-
- file_system = GetTargetFilesystem(options, guest_platform)
- logging.info('File System: %s', file_system)
- logging.info('Disk Size: %s bytes', options.fs_size)
- bundle = block_disk.RootFsRaw(
- options.fs_size, file_system, options.skip_disk_space_check)
- bundle.SetTarfile(temp_file_name)
- if options.disk:
- readlink_command = ['readlink', '-f', options.disk]
- final_path = utils.RunCommand(readlink_command).strip()
- logging.info('Resolved %s to %s', options.disk, final_path)
- bundle.AddDisk(final_path)
- # TODO(user): Find the location where the first partition of the disk
- # is mounted and add it as the source instead of relying on the source
- # param flag
- bundle.AddSource(options.root_directory)
- bundle.SetKey(options.key)
- bundle.SetScratchDirectory(scratch_dir)
-
- # Merge platform specific exclude list, mounts points
- # and user specified excludes
- excludes = guest_platform.GetExcludeList()
- if options.excludes:
- excludes.extend([exclude_spec.ExcludeSpec(x) for x in
- options.excludes.split(',')])
- logging.info('exclude list: %s', ' '.join([x.GetSpec() for x in excludes]))
- bundle.AppendExcludes(excludes)
- if not options.include_mounts:
- mount_points = utils.GetMounts(options.root_directory)
- logging.info('ignoring mounts %s', ' '.join(mount_points))
- bundle.AppendExcludes([exclude_spec.ExcludeSpec(x, preserve_dir=True) for x
- in utils.GetMounts(options.root_directory)])
- bundle.SetPlatform(guest_platform)
-
- # Verify that bundle attributes are correct and create tar bundle.
- bundle.Verify()
- (fs_size, digest) = bundle.Bundleup()
- if not digest:
- logging.critical('Could not get digest for the bundle.'
- ' The bundle may not be created correctly')
- return -1
- if fs_size > options.fs_size:
- logging.critical('Size of tar %d exceeds the file system size %d.', fs_size,
- options.fs_size)
- return -1
-
- if options.output_file_name:
- output_file = os.path.join(
- options.output_directory, options.output_file_name)
- else:
- output_file = os.path.join(
- options.output_directory, '%s.image.tar.gz' % digest)
-
- os.rename(temp_file_name, output_file)
- logging.info('Created tar.gz file at %s' % output_file)
-
- if options.bucket:
- bucket = options.bucket
- if bucket.startswith('gs://'):
- output_bucket = '%s/%s' % (
- bucket, os.path.basename(output_file))
- else:
- output_bucket = 'gs://%s/%s' % (
- bucket, os.path.basename(output_file))
-
- # /usr/local/bin not in redhat root PATH by default
- if '/usr/local/bin' not in os.environ['PATH']:
- os.environ['PATH'] += ':/usr/local/bin'
-
- # TODO: Consider using boto library directly.
- cmd = ['gsutil', 'cp', output_file, output_bucket]
- retcode = subprocess.call(cmd)
- if retcode != 0:
- logging.critical('Failed to copy image to bucket. '
- 'gsutil returned %d. To retry, run the command: %s',
- retcode, ' '.join(cmd))
-
- return -1
- logging.info('Uploaded image to %s', output_bucket)
-
- # If we've uploaded, then we can remove the local file.
- os.remove(output_file)
-
- if options.cleanup:
- shutil.rmtree(scratch_dir)
diff --git a/legacy/gcimagebundle/gcimagebundlelib/linux.py b/legacy/gcimagebundle/gcimagebundlelib/linux.py
deleted file mode 100644
index ff8c1d4..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/linux.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Base class for Linux platform specific information."""
-
-
-
-import os
-import platform
-import stat
-
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib import os_platform
-
-
-class LinuxPlatform(os_platform.Platform):
- """Base class for all Linux flavors."""
- EXCLUDE_LIST = [
- exclude_spec.ExcludeSpec('/etc/ssh/.host_key_regenerated'),
- exclude_spec.ExcludeSpec('/dev', preserve_dir=True),
- exclude_spec.ExcludeSpec('/proc', preserve_dir=True),
- exclude_spec.ExcludeSpec('/run',
- preserve_dir=True, preserve_subdir=True),
- exclude_spec.ExcludeSpec('/selinux'),
- exclude_spec.ExcludeSpec('/tmp', preserve_dir=True),
- exclude_spec.ExcludeSpec('/sys', preserve_dir=True),
- exclude_spec.ExcludeSpec('/var/lib/google/per-instance',
- preserve_dir=True),
- exclude_spec.ExcludeSpec('/var/lock',
- preserve_dir=True, preserve_subdir=True),
- exclude_spec.ExcludeSpec('/var/log',
- preserve_dir=True, preserve_subdir=True),
- exclude_spec.ExcludeSpec('/var/run',
- preserve_dir=True, preserve_subdir=True)]
-
- def __init__(self):
- """Populate the uname -a information."""
- super(LinuxPlatform, self).__init__()
- (self.name, self.hostname, self.release, self.version, self.architecture,
- self.processor) = platform.uname()
- (self.distribution, self.distribution_version,
- self.distribution_codename) = platform.dist()
-
- def GetPlatformDetails(self):
- return ' '.join([self.name, self.hostname, self.release, self.version,
- self.architecture, self.processor, self.distribution,
- self.distribution_version, self.distribution_codename])
-
- def GetName(self):
- return self.GetOs()
-
- def GetProcessor(self):
- return platform.processor()
-
- def GetArchitecture(self):
- if self.architecture:
- return self.architecture
- return ''
-
- def GetOs(self):
- if self.distribution:
- if self.distribution_codename:
- return '%s (%s)' % (self.distribution, self.distribution_codename)
- else:
- return self.distribution
- if self.name:
- return self.name
- return 'Linux'
-
- def IsLinux(self):
- return True
-
- # Linux specific methods
- def GetKernelVersion(self):
- return self.release
-
- # distribution specific methods
- # if platforms module does not do a good job override these.
- def GetDistribution(self):
- return self.distribution
-
- def GetDistributionCodeName(self):
- return self.distribution_codename
-
- def GetDistributionVersion(self):
- return self.distribution_version
-
- def GetPlatformSpecialFiles(self, tmpdir='/tmp'):
- """Creates any platform specific special files."""
- retval = []
- console_dev = os.makedev(5, 1)
- os.mknod(tmpdir + 'console', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR, console_dev)
- retval.append((tmpdir + 'console', 'dev/console'))
- null_dev = os.makedev(1, 3)
- os.mknod(tmpdir + 'null', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH, null_dev)
- retval.append((tmpdir + 'null', 'dev/null'))
- tty_dev = os.makedev(5, 0)
- os.mknod(tmpdir + 'tty', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH, tty_dev)
- retval.append((tmpdir + 'tty', 'dev/tty'))
- zero_dev = os.makedev(1, 5)
- os.mknod(tmpdir + 'zero', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH, zero_dev)
- retval.append((tmpdir + 'zero', 'dev/zero'))
- # /selinux is deprecated in favor of /sys/fs/selinux, but preserve it on
- # those OSes where it's present.
- if os.path.isdir('/selinux'):
- os.mkdir(tmpdir + 'selinux', 0755)
- retval.append((tmpdir + 'selinux', 'selinux'))
- return retval
-
- def Overwrite(self, filename, arcname, tmpdir='/tmp'):
- """Overwrites specified file if needed for the Linux platform."""
- pass
-
- def GetPreferredFilesystemType(self):
- """Return the optimal filesystem supported for the platform."""
- return 'ext4'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/manifest.py b/legacy/gcimagebundle/gcimagebundlelib/manifest.py
deleted file mode 100755
index 2e83d9e..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/manifest.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Image manifest."""
-
-
-import json
-from gcimagebundlelib import utils
-
-
-class ImageManifest(object):
- """Retrieves metadata from the instance and stores it in manifest.json.
-
- The image manifest is a JSON file that is bundled along side the disk.
-
- Included Metadata
- - Licenses
- """
-
- def __init__(self, http=utils.Http(), is_gce_instance=True):
- self._http = http
- self._licenses = []
- self._is_gce_instance = is_gce_instance
-
- def CreateIfNeeded(self, file_path):
- """Creates the manifest file to the specified path if it's needed.
-
- Args:
- file_path: Location of where the manifest should be written to.
-
- Returns:
- True Manifest was written to file_path.
- False Manifest was not created.
- """
- if self._is_gce_instance:
- self._LoadLicenses()
- if self._IsManifestNeeded():
- with open(file_path, 'w') as manifest_file:
- self._WriteToFile(manifest_file)
- return True
- return False
-
- def _LoadLicenses(self):
- """Loads the licenses from the metadata server if they exist."""
- response = self._http.GetMetadata('instance/', recursive=True)
- instance_metadata = json.loads(response)
- if 'licenses' in instance_metadata:
- for license_obj in instance_metadata['licenses']:
- self._licenses.append(license_obj['id'])
-
- def _ToJson(self):
- """Formats the image metadata as a JSON object."""
- return json.dumps(
- {
- 'licenses': self._licenses
- })
-
- def _IsManifestNeeded(self):
- """Determines if a manifest should be bundled with the disk."""
- if self._licenses:
- return len(self._licenses)
- return False
-
- def _WriteToFile(self, file_obj):
- """Writes the manifest data to the file handle."""
- manifest_json = self._ToJson()
- file_obj.write(manifest_json)
diff --git a/legacy/gcimagebundle/gcimagebundlelib/os_platform.py b/legacy/gcimagebundle/gcimagebundlelib/os_platform.py
deleted file mode 100644
index 65e6e7c..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/os_platform.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Base class for platform specific information."""
-
-
-class Platform(object):
- """Base class for platform information."""
- EXCLUDE_LIST = []
- OVERWRITE_LIST = []
-
- @staticmethod
- def IsThisPlatform(root='/'):
- return False
-
- def __init__(self):
- pass
-
- def GetName(self):
- """Generic name for the platform."""
- return 'Unknown'
-
- def GetProcessor(self):
- """Real processor."""
- return ''
-
- def GetArchitecture(self):
- """Returns machine architecture."""
- return ''
-
- def GetExcludeList(self):
- """Returns the default exclude list of the platform."""
- return self.__class__.EXCLUDE_LIST
-
- def GetOs(self):
- """Returns the name of OS."""
- return 'Unknown'
-
- def IsLinux(self):
- return False
-
- def IsWindows(self):
- return False
-
- def IsUnix(self):
- return False
-
- def GetOverwriteList(self):
- """Returns list of platform specific files to overwrite."""
- return self.__class__.OVERWRITE_LIST
-
- def Overwrite(self, file_path, file_name, scratch_dir):
- """Called for each file in the OverwriteList."""
- return file_name
-
- def GetPlatformSpecialFiles(self, tmpdir):
- """returns a list of platform special files that should be created."""
- return []
diff --git a/legacy/gcimagebundle/gcimagebundlelib/platform_factory.py b/legacy/gcimagebundle/gcimagebundlelib/platform_factory.py
deleted file mode 100644
index da63f0e..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/platform_factory.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Factory that guesses the correct platform and creates it."""
-
-import logging
-
-from gcimagebundlelib import centos
-from gcimagebundlelib import fedora
-from gcimagebundlelib import debian
-from gcimagebundlelib import gcel
-from gcimagebundlelib import opensuse
-from gcimagebundlelib import rhel
-from gcimagebundlelib import sle
-from gcimagebundlelib import ubuntu
-
-
-class UnknownPlatformException(Exception):
- """The platform could not be correctly determined."""
-
-
-class PlatformFactory(object):
- """Guess the platform and create it."""
-
- def __init__(self, root='/'):
- self.__root = root
- self.__registry = {}
- self.__platform_registry = {}
- self.Register('Centos', centos.Centos)
- self.Register('Fedora', fedora.Fedora)
- self.Register('Debian', debian.Debian)
- self.Register('GCEL', gcel.Gcel)
- self.Register('openSUSE', opensuse.OpenSUSE)
- self.Register('Red Hat Enterprise Linux', rhel.RHEL)
- self.Register('SUSE Linux Enterprise', sle.SLE)
- self.Register('Ubuntu', ubuntu.Ubuntu)
-
- def Register(self, name, klass):
- self.__registry[name] = klass
-
- def GetPlatform(self):
- for name in self.__registry:
- if self.__registry[name].IsThisPlatform(self.__root):
- logging.info('found platform %s', name)
- return self.__registry[name]()
- else:
- logging.debug('skipping platform %s %s ', name, self.__registry[name])
- raise UnknownPlatformException('Could not determine host platform.')
diff --git a/legacy/gcimagebundle/gcimagebundlelib/rhel.py b/legacy/gcimagebundle/gcimagebundlelib/rhel.py
deleted file mode 100644
index 9ebf1ef..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/rhel.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Red Hat Enterprise Linux Linux specific platform info."""
-
-
-
-import platform
-
-from gcimagebundlelib import linux
-
-
-class RHEL(linux.LinuxPlatform):
- """Red Hat Enterprise Linux specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- (distribution, _, _) = platform.linux_distribution()
- if distribution == 'Red Hat Enterprise Linux Server':
- return True
- return False
-
- def __init__(self):
- super(RHEL, self).__init__()
-
- def GetPreferredFilesystemType(self):
- (_,version,_) = platform.linux_distribution()
- if version.startswith('7'):
- return 'xfs'
- return 'ext4'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/sle.py b/legacy/gcimagebundle/gcimagebundlelib/sle.py
deleted file mode 100644
index 8b74827..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/sle.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2013 SUSE LLC All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""SUSE Linux Enterprise (SLE) platform info."""
-
-import re
-from gcimagebundlelib import suse
-
-class SLE(suse.SUSE):
- """SLE platform info."""
-
- @staticmethod
- def IsThisPlatform(self, root='/'):
- if re.match(r'SUSE Linux Enterprise', suse.SUSE().distribution):
- return True
- return False
-
- def __init__(self):
- super(SLE, self).__init__()
-
- def GetPreferredFilesystemType(self):
- return 'ext3'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/suse.py b/legacy/gcimagebundle/gcimagebundlelib/suse.py
deleted file mode 100644
index 4911b8b..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/suse.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2013 SUSE LLC All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""openSUSE and SUSE generic platform info."""
-
-import os
-import re
-
-from gcimagebundlelib import linux
-
-
-class SUSE(linux.LinuxPlatform):
- """openSUSE and SUSE generic platform info."""
-
- def __init__(self):
- super(SUSE, self).__init__()
- self.distribution_codename = None
- self.ParseOSRelease()
- if not self.distribution:
- self.ParseSUSERelease()
- if not self.distribution:
- self.distribution = ''
-
- def ParseOSRelease(self):
- """Parse the /etc/os-release file."""
- release_file = '/etc/os-release'
- if not os.path.isfile(release_file):
- self.distribution = None
- return
- lines = open(release_file, 'r').readlines()
- for ln in lines:
- if not ln:
- continue
- if re.match(r'^NAME=', ln):
- self.distribution = self.__getData(ln)
- if re.match(r'^VERSION_ID=', ln):
- self.distribution_version = self.__getData(ln)
- if re.match(r'^VERSION=', ln):
- data = self.__getData(ln)
- self.distribution_codename = data.split('(')[-1][:-1]
- return
-
- def ParseSUSERelease(self):
- """Parse /etc/SuSE-release file."""
- release_file = '/etc/SuSE-release'
- if not os.path.isfile(release_file):
- self.distribution = None
- return
- lines = open(release_file, 'r').readlines()
- prts = lines[0].split()
- cnt = 0
- self.distribution = ''
- if len(prts):
- while 1:
- item = prts[cnt]
- if re.match('\d', item):
- item = None
- break
- elif cnt > 0:
- self.distribution += ' '
- self.distribution += item
- cnt += 1
-
- for ln in lines:
- if re.match(r'^VERSION =', ln):
- self.distribution_version = self.__getData(ln)
- if re.match(r'^CODENAME =', ln):
- self.distribution_codename = self.__getData(ln)
- return
-
- def __getData(self, ln):
- """Extract data from a line in a file. Either returns data inside the
- first double quotes ("a b"; a b in this example) or if no double
- quotes exist, returns the data after the first = sign. Leading
- and trailing whitspace are stripped."""
- if ln.find('"') != -1:
- return ln.split('"')[1]
- else:
- return ln.split('=')[-1].strip()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py b/legacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py
deleted file mode 100755
index 1cbb384..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py
+++ /dev/null
@@ -1,512 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittest for block_disk.py module."""
-
-
-__pychecker__ = 'no-local' # for unittest
-
-from contextlib import closing
-import json
-import logging
-import os
-import random
-import subprocess
-import tarfile
-import tempfile
-import unittest
-import urllib2
-
-from gcimagebundlelib import block_disk
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib.tests import image_bundle_test_base
-from gcimagebundlelib import utils
-
-
-class FsRawDiskTest(image_bundle_test_base.ImageBundleTest):
- """FsRawDisk Unit Test."""
-
- _MEGABYTE = 1024*1024
- _GIGABYTE = 1024*_MEGABYTE
-
- def setUp(self):
- super(FsRawDiskTest, self).setUp()
- self._fs_size = 10* FsRawDiskTest._MEGABYTE
- self._bundle = block_disk.FsRawDisk(self._fs_size, 'ext4')
- self._tar_path = self.tmp_path + '/image.tar.gz'
- self._bundle.SetTarfile(self._tar_path)
- self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
- self._bundle.SetKey('key')
- self._bundle._SetManifest(self._manifest)
-
- def _SetupMbrDisk(self, partition_start, partition_end, fs_uuid):
- """Creates a disk with a fake MBR.
-
- Args:
- partition_start: The byte offset where the partition starts.
- partition_end: The byte offset where the partition ends.
- fs_uuid: The UUID of the filesystem to create on the partition.
-
- Returns:
- The path where the disk is located.
- """
- # Create the disk file with the size specified.
- disk_path = os.path.join(self.tmp_root, 'mbrdisk.raw')
- disk_size = partition_end + FsRawDiskTest._MEGABYTE
- with open(disk_path, 'wb') as disk_file:
- disk_file.truncate(disk_size)
-
- # Create a partition table
- utils.MakePartitionTable(disk_path)
-
- # Create the partition
- utils.MakePartition(disk_path, 'primary', 'ext2',
- partition_start, partition_end)
-
- # Create the file system
- with utils.LoadDiskImage(disk_path) as devices:
- utils.MakeFileSystem(devices[0], 'ext4', fs_uuid)
-
- # Write some data after the MBR but before the first partition
- with open(disk_path, 'r+b') as disk_file:
- # Seek to last two bytes of first sector
- disk_file.seek(510)
- # Write MBR signature
- disk_file.write(chr(0x55))
- disk_file.write(chr(0xAA))
- # Write random data on the disk till the point first partition starts
- for _ in range(partition_start - 512):
- # Write a byte
- disk_file.write(chr(random.randint(0, 127)))
-
- return disk_path
-
- def tearDown(self):
- super(FsRawDiskTest, self).tearDown()
-
- def testDiskBundle(self):
- """Tests bundle command when a disk is specified.
-
- Creates a 20Gb source disk to start with and verifies that creating
- a 10MB file off it works.
- """
- # Create a 20GB disk with first partition starting at 1MB
- self._TestDiskBundleHelper(FsRawDiskTest._MEGABYTE,
- FsRawDiskTest._GIGABYTE*20,
- utils.RunCommand(['uuidgen']).strip())
-
- def testDiskBundlePartitionAt2MB(self):
- """Tests bundle command when a disk is specified.
-
- Creates the first partition at 2MB and verifies all data prior to that is
- copied.
- """
- # Create a 20GB disk with first partition starting at 2MB
- self._TestDiskBundleHelper(FsRawDiskTest._MEGABYTE*2,
- FsRawDiskTest._GIGABYTE*20,
- utils.RunCommand(['uuidgen']).strip())
-
- def _TestDiskBundleHelper(self, partition_start, partition_end, fs_uuid):
- disk_path = self._SetupMbrDisk(partition_start, partition_end, fs_uuid)
-
- with utils.LoadDiskImage(disk_path) as devices:
- # Get the path to do the disk.
- # devices will have something which is like /dev/mapper/loop0p1
- # We need to get loop0 out of it.
- disk_loop_back_path = '/dev/' + devices[0].split('/')[3][:-2]
-
- # Create a symlinks to the disk and loopback paths
- # This is required because of the code where we assume first
- # partition is device path appended by 1. Will remove it once we
- # update that part of the code.
- symlink_disk = os.path.join(self.tmp_root, 'disk')
- symlink_partition = self.tmp_root + '/disk1'
- utils.RunCommand(['ln', '-s', disk_loop_back_path, symlink_disk])
- utils.RunCommand(['ln', '-s', devices[0], symlink_partition])
-
- # Bundle up
- self._bundle.AddDisk(symlink_disk)
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- (_, _) = self._bundle.Bundleup()
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
- self._VerifyDiskSize(self._tar_path, self._fs_size)
- self._VerifyNonPartitionContents(self._tar_path,
- disk_path,
- partition_start)
- self._VerifyFilesystemUUID(self._tar_path, fs_uuid)
-
- def testRawDisk(self):
- """Tests the regular operation. No expected error."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
-
- def testRawDiskIgnoresHardlinks(self):
- """Tests if the raw disk ignores hard links if asked."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.IgnoreHardLinks()
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 1)
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 1)
-
- def testRawDiskIgnoresExcludes(self):
- """Tests if the raw disk ignores specified excludes files."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir2/', '/dir2/dir1',
- '/dir2/sl2', '/dir2/hl1'])
-
- def testRawDiskExcludePreservesSubdirs(self):
- """Tests if excludes preserves subdirs underneath if asked."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
- preserve_dir=True,
- preserve_subdir=True)])
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11', 'dir2/', '/dir2/dir1',
- '/dir2/sl2', '/dir2/hl1'])
-
- def testRawDiskExcludePreservesFiles(self):
- """Tests if excludes preserves the files underneath if asked."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
- preserve_dir=True,
- preserve_file=True)])
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/', '/dir1/hl2',
- '/dir1/sl1', 'dir2/', '/dir2/dir1', '/dir2/sl2',
- '/dir2/hl1'])
-
- def testRawDiskUsesModifiedFiles(self):
- """Tests if the raw disk uses modified files."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
- self._bundle.SetPlatform(image_bundle_test_base.MockPlatform(self.tmp_root))
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyFileInRawDiskEndsWith(self._tar_path, 'test1',
- 'something extra.')
-
- def testRawDiskGeneratesCorrectDigest(self):
- """Tests if the SHA1 digest generated is accurate."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- p = subprocess.Popen(['/usr/bin/openssl dgst -sha1 ' + self._tar_path],
- stdout=subprocess.PIPE, shell=True)
- file_digest = p.communicate()[0].split('=')[1].strip()
- self.assertEqual(digest, file_digest)
-
- def testRawDiskHonorsRecursiveOff(self):
- """Tests if raw disk handles recursive off."""
- self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
- self._bundle.AddSource(self.tmp_path + '/dir1',
- arcname='dir1', recursive=False)
- self._bundle.AddSource(self.tmp_path + '/dir2', arcname='dir2')
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'dir1/', 'dir2/', '/dir2/dir1',
- '/dir2/sl2', '/dir2/hl1'])
-
- def testSkipLicenseCheckIfNotOnGCE(self):
- """Tests that no licenses are loaded if gcimagebundle is not run on GCE."""
- class MockHttp(utils.Http):
- def Get(self, request, timeout=None):
- # if gcimagebundle is not run on GCE the metadata server will be unreachable
- raise urllib2.URLError("urlopen error timed out")
-
- self._http = MockHttp()
- self._manifest._http = self._http
- self._manifest._is_gce_instance = False
-
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- self.assertFalse(self._bundle._manifest._IsManifestNeeded())
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
-
- def testNoManifestCreatedWithZeroLicenses(self):
- """Tests that no manifest is created when there are 0 licenses."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- self.assertFalse(self._bundle._manifest._IsManifestNeeded())
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
-
- def testManifestWithOneLicense(self):
- """Tests manifest is populated with 1 license."""
- self._http._instance_response = ('{"hostname":"test",'
- '"licenses":[{"id":"TEST-LICENSE"}]}')
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- manifest_json = self._bundle._manifest._ToJson()
- manifest_obj = json.loads(manifest_json)
- self.assertTrue(self._bundle._manifest._IsManifestNeeded())
- self.assertEqual(1, len(manifest_obj['licenses']))
- self.assertEqual('TEST-LICENSE', manifest_obj['licenses'][0])
- self._VerifyTarHas(self._tar_path, ['manifest.json', 'disk.raw'])
- self._VerifyFileContentsInTarball(self._tar_path,
- 'manifest.json',
- '{"licenses": ["TEST-LICENSE"]}')
-
- def testManifestWithTwoLicenses(self):
- """Tests manifest is populated with 2 licenses."""
- self._http._instance_response = ('{"hostname":"test",'
- '"licenses":[{"id":"TEST-1"},'
- '{"id":"TEST-2"}]}')
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- manifest_json = self._bundle._manifest._ToJson()
- manifest_obj = json.loads(manifest_json)
- self.assertTrue(self._bundle._manifest._IsManifestNeeded())
- self.assertEqual(2, len(manifest_obj['licenses']))
- self.assertEqual('TEST-1', manifest_obj['licenses'][0])
- self.assertEqual('TEST-2', manifest_obj['licenses'][1])
- self._VerifyTarHas(self._tar_path, ['manifest.json', 'disk.raw'])
- self._VerifyFileContentsInTarball(self._tar_path,
- 'manifest.json',
- '{"licenses": ["TEST-1", "TEST-2"]}')
-
- def _VerifyFilesystemUUID(self, tar, expected_uuid):
- """Verifies UUID of the first partition on disk matches the value."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
-
- created_disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(created_disk_path) as devices:
- self.assertEqual(1, len(devices))
- self.assertEqual(expected_uuid, utils.GetUUID(devices[0]))
-
- def _VerifyNonPartitionContents(self, tar, disk_path, partition_start):
- """Verifies that bytes outside the partition are preserved."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- created_disk_path = os.path.join(tmp_dir, 'disk.raw')
-
- # Verify first parition in both disks starts at the same offset
- self.assertEqual(partition_start,
- utils.GetPartitionStart(disk_path, 1))
- self.assertEqual(partition_start,
- utils.GetPartitionStart(created_disk_path, 1))
- with open(disk_path, 'r') as source_file:
- with open(created_disk_path, 'r') as created_file:
- # Seek to 510'th byte in both streams and verify rest of the
- # bytes until the partition start are the same
- source_file.seek(510)
- created_file.seek(510)
- for i in range(partition_start - 510):
- self.assertEqual(source_file.read(1),
- created_file.read(1),
- 'byte at position %s not equal' % (i + 510))
-
- def _VerifyDiskSize(self, tar, expected_size):
- """Verifies that the disk file has the same size as expected."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- statinfo = os.stat(disk_path)
- self.assertEqual(expected_size, statinfo.st_size)
-
- def _VerifyImageHas(self, tar, expected):
- """Tests if raw disk contains an expected list of files/directories."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(disk_path) as devices:
- self.assertEqual(len(devices), 1)
- mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
- with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
- found = []
- for root, dirs, files in os.walk(mnt_dir):
- root = root.replace(mnt_dir, '')
- for f in files:
- found.append(os.path.join(root, f))
- for d in dirs:
- found.append(os.path.join(root, d))
- self._AssertListEqual(expected, found)
-
- def _VerifyFileContentsInTarball(self, tar, file_name, expected_content):
- """Reads the file from the tar file and turns it."""
- with closing(tarfile.open(tar)) as tar_file:
- content = tar_file.extractfile(file_name).read()
- self.assertEqual(content, expected_content)
-
- def _VerifyFileInRawDiskEndsWith(self, tar, filename, text):
- """Tests if a file on raw disk contains ends with a specified text."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(disk_path) as devices:
- self.assertEqual(len(devices), 1)
- mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
- with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
- f = open(os.path.join(mnt_dir, filename), 'r')
- file_content = f.read()
- f.close()
- self.assertTrue(file_content.endswith(text))
-
- def _VerifyNumberOfHardLinksInRawDisk(self, tar, filename, count):
- """Tests if a file on raw disk has a specified number of hard links."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(disk_path) as devices:
- self.assertEqual(len(devices), 1)
- mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
- with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
- self.assertEqual(os.stat(os.path.join(mnt_dir, filename)).st_nlink,
- count)
-
-
-class RootFsRawTest(image_bundle_test_base.ImageBundleTest):
- """RootFsRaw Unit Test."""
-
- def setUp(self):
- super(RootFsRawTest, self).setUp()
- self._bundle = block_disk.RootFsRaw(
- 10*1024*1024, 'ext4', False, self._MockStatvfs)
- self._tar_path = self.tmp_path + '/image.tar.gz'
- self._bundle.SetTarfile(self._tar_path)
- self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
- self._bundle._SetManifest(self._manifest)
-
- def tearDown(self):
- super(RootFsRawTest, self).tearDown()
-
- def testRootRawDiskVerifiesOneSource(self):
- """Tests that only one root directory is allowed."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AddSource(self.tmp_path + '/dir1')
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError:
- return
- self.fail()
-
- def testRootRawDiskVerifiesRootDestination(self):
- """Tests that destination directory must be /."""
- self._bundle.AddSource(self.tmp_path, arcname='/tmp')
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError:
- return
- self.fail()
-
- def testRootRawDiskNotEnoughFreeSpace(self):
- """Tests that there is not enough disk space to complete the operation."""
- self._statvfs_map = {
- "/" : image_bundle_test_base.StatvfsResult(1024, 500, 100),
- "/tmp" : image_bundle_test_base.StatvfsResult(1024, 500, 100)
- }
- self._bundle.AddSource("/")
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError as e:
- print str(e)
- return
- self.fail()
-
- def testRootFilesExceedDiskSize(self):
- """Tests that source files may exceed the raw disk file size limit."""
- self._statvfs_map = {
- "/" : image_bundle_test_base.StatvfsResult(1024, 50000, 20000),
- "/tmp" : image_bundle_test_base.StatvfsResult(1024, 100000, 90000)
- }
- self._bundle.AddSource("/")
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError as e:
- print str(e)
- return
- self.fail()
-
- def _MockStatvfs(self, file_path):
- return self._statvfs_map[file_path]
-
-def main():
- logging.basicConfig(level=logging.DEBUG)
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py b/legacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py
deleted file mode 100755
index 37b7fae..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Base class for image_bundle unittests."""
-
-
-__pychecker__ = 'no-local' # for unittest
-
-
-import os
-import re
-import shutil
-import subprocess
-import tarfile
-import tempfile
-import unittest
-import urllib2
-
-from gcimagebundlelib import manifest
-from gcimagebundlelib.os_platform import Platform
-from gcimagebundlelib import utils
-
-
-class InvalidOverwriteFileException(Exception):
- """Invalid overwrite target was passed to MockPlatform.Overwrite method."""
-
-
-class MockPlatform(Platform):
- """Mock platform for image bundle unit tests."""
- OVERWRITE_LIST = ['test1']
-
- def __init__(self, tmp_root):
- super(MockPlatform, self).__init__()
- self.tmp_root = tmp_root
-
- def Overwrite(self, filename, arcname, tmpdir):
- temp = tempfile.mktemp(dir=tmpdir)
- if arcname != 'test1':
- raise InvalidOverwriteFileException(arcname)
- fd = open(temp, 'w')
- fd.write(open(filename).read())
- fd.write('something extra.')
- fd.close()
- return temp
-
-
-class MockHttp(utils.Http):
- """Fake implementation of the utils.Http client. Used for metadata queries."""
- def __init__(self):
- self._instance_response = '{"hostname":"test"}'
-
- def Get(self, request, timeout=None):
- """Accepts an Http request and returns a precanned response."""
- url = request.get_full_url()
- if url == utils.METADATA_URL_PREFIX:
- return 'v1/'
- elif url.startswith(utils.METADATA_V1_URL_PREFIX):
- url = url.replace(utils.METADATA_V1_URL_PREFIX, '')
- if url == 'instance/?recursive=true':
- return self._instance_response
- raise urllib2.HTTPError
-
-class StatvfsResult:
- """ A struct for partial os.statvfs result, used to mock the result. """
-
- def __init__(self, f_bsize, f_blocks, f_bfree):
- self.f_bsize = f_bsize
- self.f_blocks = f_blocks
- self.f_bfree = f_bfree
-
-class ImageBundleTest(unittest.TestCase):
- """ImageBundle Unit Test Base Class."""
-
- def setUp(self):
- self.tmp_root = tempfile.mkdtemp(dir='/tmp')
- self.tmp_path = tempfile.mkdtemp(dir=self.tmp_root)
- self._http = MockHttp()
- self._manifest = manifest.ImageManifest(http=self._http, is_gce_instance=True)
- self._SetupFilesystemToTar()
-
- def tearDown(self):
- self._CleanupFiles()
-
- def _SetupFilesystemToTar(self):
- """Creates some directory structure to tar."""
- if os.path.exists(self.tmp_path):
- shutil.rmtree(self.tmp_path)
- os.makedirs(self.tmp_path)
- with open(self.tmp_path + '/test1', 'w') as fd:
- print >> fd, 'some text'
- shutil.copyfile(self.tmp_path + '/test1', self.tmp_path + '/test2')
- os.makedirs(self.tmp_path + '/dir1')
- os.makedirs(self.tmp_path + '/dir1/dir11')
- os.makedirs(self.tmp_path + '/dir2')
- os.makedirs(self.tmp_path + '/dir2/dir1')
- os.symlink(self.tmp_path + '/test1', self.tmp_path + '/dir1/sl1')
- os.link(self.tmp_path + '/test2', self.tmp_path + '/dir1/hl2')
- os.symlink(self.tmp_path + '/test2', self.tmp_path + '/dir2/sl2')
- os.link(self.tmp_path + '/test1', self.tmp_path + '/dir2/hl1')
-
- def _CleanupFiles(self):
- """Removes the files under test directory."""
- if os.path.exists(self.tmp_root):
- shutil.rmtree(self.tmp_root)
-
- def _VerifyTarHas(self, tar, expected):
- p = subprocess.Popen(['tar -tf %s' % tar],
- stdout=subprocess.PIPE, shell=True)
- found = p.communicate()[0].split('\n')
- if './' in found:
- found.remove('./')
- if '' in found:
- found.remove('')
- self._AssertListEqual(expected, found)
-
- def _VerifyFileInTarEndsWith(self, tar, filename, text):
- tf = tarfile.open(tar, 'r:gz')
- fd = tf.extractfile(filename)
- file_content = fd.read()
- self.assertTrue(file_content.endswith(text))
-
- def _AssertListEqual(self, list1, list2):
- """Asserts that, when sorted, list1 and list2 are identical."""
- sorted_list1 = [re.sub(r'/$', '', x) for x in list1]
- sorted_list2 = [re.sub(r'/$', '', x) for x in list2]
- sorted_list1.sort()
- sorted_list2.sort()
- self.assertEqual(sorted_list1, sorted_list2)
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py b/legacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py
deleted file mode 100755
index dd7d2cd..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittest for utils.py module."""
-
-__pychecker__ = 'no-local' # for unittest
-
-import logging
-import subprocess
-import unittest
-import uuid
-
-from gcimagebundlelib import utils
-
-
-class ImageBundleTest(unittest.TestCase):
-
- def testRunCommand(self):
- """Run a simple command and verify it works."""
- utils.RunCommand(['ls', '/'])
-
- def testRunCommandThatFails(self):
- """Run a command that will fail and verify it raises the correct error."""
- def RunCommandUnderTest():
- non_existent_path = '/' + uuid.uuid4().hex
- utils.RunCommand(['mkfs', '-t', 'ext4', non_existent_path])
- self.assertRaises(subprocess.CalledProcessError, RunCommandUnderTest)
-
-
-def main():
- logging.basicConfig(level=logging.DEBUG)
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/legacy/gcimagebundle/gcimagebundlelib/ubuntu.py b/legacy/gcimagebundle/gcimagebundlelib/ubuntu.py
deleted file mode 100644
index 8d68687..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/ubuntu.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Ubuntu specific platform info."""
-
-import csv
-import os
-from gcimagebundlelib import linux
-
-
-class Ubuntu(linux.LinuxPlatform):
- """Ubuntu specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/lsb-release'
- if os.path.exists(release_file):
- (_, _, flavor, _) = Ubuntu.ParseLsbRelease(release_file)
- if flavor and flavor.lower() == 'ubuntu':
- return True
- return False
-
- @staticmethod
- def ParseLsbRelease(release_file='/etc/lsb-release'):
- """Parses the /etc/lsb-releases file."""
- release_info = {}
- for line in csv.reader(open(release_file), delimiter='='):
- if len(line) > 1:
- release_info[line[0]] = line[1]
- if ('DISTRIB_CODENAME' not in release_info or
- 'DISTRIB_DESCRIPTION' not in release_info or
- 'DISTRIB_ID' not in release_info or
- 'DISTRIB_RELEASE' not in release_info):
- return (None, None, None, None)
- return (release_info['DISTRIB_CODENAME'],
- release_info['DISTRIB_DESCRIPTION'],
- release_info['DISTRIB_ID'],
- release_info['DISTRIB_RELEASE'])
-
- def __init__(self):
- super(Ubuntu, self).__init__()
- (self.distribution_codename, _, self.distribution,
- self.distribution_version) = Ubuntu.ParseLsbRelease()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/utils.py b/legacy/gcimagebundle/gcimagebundlelib/utils.py
deleted file mode 100644
index a8fde40..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/utils.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Utilities for image bundling tool."""
-
-import logging
-import os
-import subprocess
-import time
-import urllib2
-
-METADATA_URL_PREFIX = 'http://169.254.169.254/computeMetadata/'
-METADATA_V1_URL_PREFIX = METADATA_URL_PREFIX + 'v1/'
-
-
-class MakeFileSystemException(Exception):
- """Error occurred in file system creation."""
-
-
-class TarAndGzipFileException(Exception):
- """Error occurred in creating the tarball."""
-
-
-class LoadDiskImage(object):
- """Loads raw disk image using kpartx."""
-
- def __init__(self, file_path):
- """Initializes LoadDiskImage object.
-
- Args:
- file_path: a path to a file containing raw disk image.
-
- Returns:
- A list of devices for every partition found in an image.
- """
- self._file_path = file_path
-
- def __enter__(self):
- """Map disk image as a device."""
- SyncFileSystem()
- kpartx_cmd = ['kpartx', '-a', '-v', '-s', self._file_path]
- output = RunCommand(kpartx_cmd)
- devs = []
- for line in output.splitlines():
- split_line = line.split()
- if (len(split_line) > 2 and split_line[0] == 'add'
- and split_line[1] == 'map'):
- devs.append('/dev/mapper/' + split_line[2])
- time.sleep(2)
- return devs
-
- def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
- """Unmap disk image as a device.
-
- Args:
- unused_exc_type: unused.
- unused_exc_value: unused.
- unused_exc_tb: unused.
- """
- SyncFileSystem()
- time.sleep(2)
- kpartx_cmd = ['kpartx', '-d', '-v', '-s', self._file_path]
- RunCommand(kpartx_cmd)
-
-
-class MountFileSystem(object):
- """Mounts a file system."""
-
- def __init__(self, dev_path, dir_path, fs_type):
- """Initializes MountFileSystem object.
-
- Args:
- dev_path: A path to a device to mount.
- dir_path: A path to a directory where a device is to be mounted.
- """
- self._dev_path = dev_path
- self._dir_path = dir_path
- self._fs_type = fs_type
-
- def __enter__(self):
- """Mounts a device.
- """
- # Since the bundled image can have the same uuid as the root disk,
- # we should prevent uuid conflicts for xfs mounts.
- if self._fs_type is 'xfs':
- mount_cmd = ['mount', '-o', 'nouuid', self._dev_path, self._dir_path]
- else:
- mount_cmd = ['mount', self._dev_path, self._dir_path]
- RunCommand(mount_cmd)
-
- def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
- """Unmounts a file system.
-
- Args:
- unused_exc_type: unused.
- unused_exc_value: unused.
- unused_exc_tb: unused.
- """
- umount_cmd = ['umount', self._dir_path]
- RunCommand(umount_cmd)
- SyncFileSystem()
-
-
-def SyncFileSystem():
- RunCommand(['sync'])
-
-def GetMounts(root='/'):
- """Find all mount points under the specified root.
-
- Args:
- root: a path to look for a mount points.
-
- Returns:
- A list of mount points.
- """
- output = RunCommand(['/bin/mount', '-l'])
- mounts = []
- for line in output.splitlines():
- split_line = line.split()
- mount_point = split_line[2]
- if mount_point == root:
- continue
- # We are simply ignoring the fs_type of fs for now. But we can use that
- # later Just verify that these are actually mount points.
- if os.path.ismount(mount_point) and mount_point.startswith(root):
- mounts.append(mount_point)
- return mounts
-
-
-def MakePartitionTable(file_path):
- """Create a partition table in a file.
-
- Args:
- file_path: A path to a file where a partition table will be created.
- """
- RunCommand(['parted', file_path, 'mklabel', 'msdos'])
-
-
-def MakePartition(file_path, partition_type, fs_type, start, end):
- """Create a partition in a file.
-
- Args:
- file_path: A path to a file where a partition will be created.
- partition_type: A type of a partition to be created. Tested option is msdos.
- fs_type: A type of a file system to be created. For example, ext2, ext3,
- etc.
- start: Start offset of a partition in bytes.
- end: End offset of a partition in bytes.
- """
- parted_cmd = ['parted', file_path, 'unit B', 'mkpart', partition_type,
- fs_type, str(start), str(end)]
- RunCommand(parted_cmd)
-
-
-def MakeFileSystem(dev_path, fs_type, uuid=None):
- """Create a file system in a device.
-
- Args:
- dev_path: A path to a device.
- fs_type: A type of a file system to be created. For example ext2, ext3, etc.
- uuid: The value to use as the UUID for the filesystem. If none, a random
- UUID will be generated and used.
-
- Returns:
- The uuid of the filesystem. This will be the same as the passed value if
- a value was specified. If no uuid was passed in, this will be the randomly
- generated uuid.
-
- Raises:
- MakeFileSystemException: If mkfs encounters an error.
- """
- if uuid is None:
- uuid = RunCommand(['uuidgen']).strip()
- if uuid is None:
- raise MakeFileSystemException(dev_path)
-
- mkfs_cmd = ['mkfs', '-t', fs_type, dev_path]
- RunCommand(mkfs_cmd)
-
- if fs_type is 'xfs':
- set_uuid_cmd = ['xfs_admin', '-U', uuid, dev_path]
- else:
- set_uuid_cmd = ['tune2fs', '-U', uuid, dev_path]
- RunCommand(set_uuid_cmd)
-
- return uuid
-
-
-def Rsync(src, dest, exclude_file, ignore_hard_links, recursive, xattrs):
- """Copy files from specified directory using rsync.
-
- Args:
- src: Source location to copy.
- dest: Destination to copy files to.
- exclude_file: A path to a file which contains a list of exclude from copy
- filters.
- ignore_hard_links: If True a hard links are copied as a separate files. If
- False, hard link are recreated in dest.
- recursive: Specifies if directories are copied recursively or not.
- xattrs: Specifies if extended attributes are preserved or not.
- """
- rsync_cmd = ['rsync', '--times', '--perms', '--owner', '--group', '--links',
- '--devices', '--acls', '--sparse']
- if not ignore_hard_links:
- rsync_cmd.append('--hard-links')
- if recursive:
- rsync_cmd.append('--recursive')
- else:
- rsync_cmd.append('--dirs')
- if xattrs:
- rsync_cmd.append('--xattrs')
- if exclude_file:
- rsync_cmd.append('--exclude-from=' + exclude_file)
- rsync_cmd.extend([src, dest])
-
- logging.debug('Calling: %s', repr(rsync_cmd))
- if exclude_file:
- logging.debug('Contents of exclude file %s:', exclude_file)
- with open(exclude_file, 'rb') as excludes:
- for line in excludes:
- logging.debug(' %s', line.rstrip())
-
- RunCommand(rsync_cmd)
-
-
-def GetUUID(partition_path):
- """Fetches the UUID of the filesystem on the specified partition.
-
- Args:
- partition_path: The path to the partition.
-
- Returns:
- The uuid of the filesystem.
- """
- output = RunCommand(['blkid', partition_path])
- for token in output.split():
- if token.startswith('UUID='):
- uuid = token.strip()[len('UUID="'):-1]
-
- logging.debug('found uuid = %s', uuid)
- return uuid
-
-
-def CopyBytes(src, dest, count):
- """Copies count bytes from the src to dest file.
-
- Args:
- src: The source to read bytes from.
- dest: The destination to copy bytes to.
- count: Number of bytes to copy.
- """
- block_size = 4096
- block_count = count / block_size
- dd_command = ['dd',
- 'if=%s' % src,
- 'of=%s' % dest,
- 'conv=notrunc',
- 'bs=%s' % block_size,
- 'count=%s' % block_count]
- RunCommand(dd_command)
- remaining_bytes = count - block_count * block_size
- if remaining_bytes:
- logging.debug('remaining bytes to copy = %s', remaining_bytes)
- dd_command = ['dd',
- 'if=%s' % src,
- 'of=%s' % dest,
- 'seek=%s' % block_count,
- 'skip=%s' % block_count,
- 'conv=notrunc',
- 'bs=1',
- 'count=%s' % remaining_bytes]
- RunCommand(dd_command)
-
-
-def GetPartitionStart(disk_path, partition_number):
- """Returns the starting position in bytes of the partition.
-
- Args:
- disk_path: The path to disk device.
- partition_number: The partition number to lookup. 1 based.
-
- Returns:
- The starting position of the first partition in bytes.
-
- Raises:
- subprocess.CalledProcessError: If running parted fails.
- IndexError: If there is no partition at the given number.
- """
- parted_cmd = ['parted',
- disk_path,
- 'unit B',
- 'print']
- # In case the device is not valid and parted throws the retry/cancel prompt
- # write c to stdin.
- output = RunCommand(parted_cmd, input_str='c')
- for line in output.splitlines():
- split_line = line.split()
- if len(split_line) > 4 and split_line[0] == str(partition_number):
- return int(split_line[1][:-1])
- raise IndexError()
-
-
-def RemovePartition(disk_path, partition_number):
- """Removes the partition number from the disk.
-
- Args:
- disk_path: The disk to remove the partition from.
- partition_number: The partition number to remove.
- """
- parted_cmd = ['parted',
- disk_path,
- 'rm',
- str(partition_number)]
- # In case the device is not valid and parted throws the retry/cancel prompt
- # write c to stdin.
- RunCommand(parted_cmd, input_str='c')
-
-
-def GetDiskSize(disk_file):
- """Returns the size of the disk device in bytes.
-
- Args:
- disk_file: The full path to the disk device.
-
- Returns:
- The size of the disk device in bytes.
-
- Raises:
- subprocess.CalledProcessError: If fdisk command fails for the disk file.
- """
- output = RunCommand(['fdisk', '-s', disk_file])
- return int(output) * 1024
-
-
-def RunCommand(command, input_str=None):
- """Runs the command and returns the output printed on stdout.
-
- Args:
- command: The command to run.
- input_str: The input to pass to subprocess via stdin.
-
- Returns:
- The stdout from running the command.
-
- Raises:
- subprocess.CalledProcessError: if the command fails.
- """
- logging.debug('running %s with input=%s', command, input_str)
- p = subprocess.Popen(command, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- cmd_output = p.communicate(input_str)
- logging.debug('stdout %s', cmd_output[0])
- logging.debug('stderr %s', cmd_output[1])
- logging.debug('returncode %s', p.returncode)
- if p.returncode:
- logging.warning('Error while running %s return_code = %s\n'
- 'stdout=%s\nstderr=%s',
- command, p.returncode, cmd_output[0],
- cmd_output[1])
- raise subprocess.CalledProcessError(p.returncode,
- cmd=command)
- return cmd_output[0]
-
-
-def TarAndGzipFile(src_paths, dest):
- """Pack file in tar archive and optionally gzip it.
-
- Args:
- src_paths: A list of files that will be archived.
- (Must be in the same directory.)
- dest: An archive name. If a file ends with .gz or .tgz an archive is gzipped
- as well.
-
- Raises:
- TarAndGzipFileException: If tar encounters an error.
- """
- if dest.endswith('.gz') or dest.endswith('.tgz'):
- mode = 'czSf'
- else:
- mode = 'cSf'
- src_names = [os.path.basename(src_path) for src_path in src_paths]
- # Take the directory of the first file in the list, all files are expected
- # to be in the same directory.
- src_dir = os.path.dirname(src_paths[0])
- tar_cmd = ['tar', mode, dest, '-C', src_dir] + src_names
- retcode = subprocess.call(tar_cmd)
- if retcode:
- raise TarAndGzipFileException(','.join(src_paths))
-
-
-class Http(object):
- def Get(self, request, timeout=None):
- return urllib2.urlopen(request, timeout=timeout).read()
-
- def GetMetadata(self, url_path, recursive=False, timeout=None):
- """Retrieves instance metadata.
-
- Args:
- url_path: The path of the metadata url after the api version.
- http://169.254.169.254/computeMetadata/v1/url_path
- recursive: If set, returns the tree of metadata starting at url_path as
- a json string.
- timeout: How long to wait for blocking operations (in seconds).
- A value of None uses urllib2's default timeout.
- Returns:
- The metadata returned based on the url path.
-
- """
- # Use the latest version of the metadata.
- suffix = ''
- if recursive:
- suffix = '?recursive=true'
- url = '{0}{1}{2}'.format(METADATA_V1_URL_PREFIX, url_path, suffix)
- request = urllib2.Request(url)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- return self.Get(request, timeout=timeout)
-
-
-def IsRunningOnGCE():
- """Detect if we are running on GCE.
-
- Returns:
- True if we are running on GCE, False otherwise.
- """
- # Try accessing DMI/SMBIOS informations through dmidecode first
- try:
- dmidecode_cmd = ['dmidecode', '-s', 'bios-vendor']
- output = RunCommand(dmidecode_cmd)
- return 'Google' in output
- except subprocess.CalledProcessError:
- # We fail if dmidecode doesn't exist or we have insufficient privileges
- pass
-
- # If dmidecode is not working, fallback to contacting the metadata server
- try:
- Http().GetMetadata('instance/id', timeout=1)
- return True
- except urllib2.HTTPError as e:
- logging.warning('HTTP error: %s (http status code=%s)' % (e.reason, e.code))
- except urllib2.URLError as e:
- logging.warning('Cannot reach metadata server: %s' % e.reason)
-
- return False
diff --git a/legacy/gcimagebundle/setup.py b/legacy/gcimagebundle/setup.py
deleted file mode 100755
index 76ccd04..0000000
--- a/legacy/gcimagebundle/setup.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Setup installation module for Image Bundle."""
-
-import os
-import distribute_setup
-distribute_setup.use_setuptools()
-
-from setuptools import find_packages
-from setuptools import setup
-
-CURDIR = os.path.abspath(os.path.dirname(__file__))
-
-def Read(file_name):
- with open(os.path.join(CURDIR, file_name), 'r') as f:
- return f.read().strip()
-
-setup(
- name='gcimagebundle',
- version=Read('VERSION'),
- url='https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/image-bundle',
- download_url='https://github.com/GoogleCloudPlatform/compute-image-packages/releases',
- license='Apache 2.0',
- author='Google Inc.',
- author_email='gc-team@google.com',
- description=('Image bundling tool for root file system.'),
- long_description=Read('README.md'),
- zip_safe=False,
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'Intended Audience :: System Administrators',
- 'License :: OSI Approved :: Apache Software License',
- 'Natural Language :: English',
- 'Topic :: System :: Filesystems',
- 'Topic :: Utilities',
- ],
- platforms='any',
- include_package_data=True,
- packages=find_packages(exclude=['distribute_setup']),
- scripts=['gcimagebundle'],
- test_suite='gcimagebundlelib.tests',
-)
diff --git a/legacy/gcimagebundle/stdeb.cfg b/legacy/gcimagebundle/stdeb.cfg
deleted file mode 100644
index 09364a3..0000000
--- a/legacy/gcimagebundle/stdeb.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-Depends: kpartx, parted, rsync, uuid-runtime
-XS-Python-Version: >= 2.6
diff --git a/package/instance_configs.cfg b/package/instance_configs.cfg
new file mode 100644
index 0000000..8e1d63e
--- /dev/null
+++ b/package/instance_configs.cfg
@@ -0,0 +1,22 @@
+[Accounts]
+deprovision_remove = false
+groups = adm,dip,lxd,plugdev,video
+
+[Daemons]
+accounts_daemon = true
+clock_skew_daemon = true
+ip_forwarding_daemon = true
+
+[InstanceSetup]
+network_enabled = true
+optimize_local_ssd = true
+set_boto_config = true
+set_host_keys = true
+set_multiqueue = true
+
+[IpForwarding]
+ethernet_proto_id = 66
+
+[MetadataScripts]
+shutdown = true
+startup = true
diff --git a/package/systemd/google-accounts-daemon.service b/package/systemd/google-accounts-daemon.service
new file mode 100644
index 0000000..04d74b5
--- /dev/null
+++ b/package/systemd/google-accounts-daemon.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Google Compute Engine Accounts Daemon
+After=network.target google-instance-setup.service
+Before=sshd.service
+Requires=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/google_accounts_daemon
+
+[Install]
+WantedBy=multi-user.target
diff --git a/package/systemd/google-clock-skew-daemon.service b/package/systemd/google-clock-skew-daemon.service
new file mode 100644
index 0000000..511f55b
--- /dev/null
+++ b/package/systemd/google-clock-skew-daemon.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Google Compute Engine Clock Skew Daemon
+After=network.target google-instance-setup.service
+Requires=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/google_clock_skew_daemon
+
+[Install]
+WantedBy=multi-user.target
diff --git a/google-startup-scripts/usr/lib/systemd/system/google.service b/package/systemd/google-instance-setup.service
index ea76a46..afa639a 100644
--- a/google-startup-scripts/usr/lib/systemd/system/google.service
+++ b/package/systemd/google-instance-setup.service
@@ -1,12 +1,12 @@
[Unit]
-Description=Google Compute Engine VM initialization
-After=local-fs.target network-online.target network.target
+Description=Google Compute Engine Instance Setup
+After=local-fs.target network-online.target network.target rsyslog.service
Before=sshd.service
Wants=local-fs.target network-online.target network.target
[Service]
-ExecStart=/usr/share/google/onboot
Type=oneshot
+ExecStart=/usr/bin/google_instance_setup
[Install]
WantedBy=sshd.service
diff --git a/package/systemd/google-ip-forwarding-daemon.service b/package/systemd/google-ip-forwarding-daemon.service
new file mode 100644
index 0000000..d8d98ad
--- /dev/null
+++ b/package/systemd/google-ip-forwarding-daemon.service
@@ -0,0 +1,11 @@
+[Unit]
+Description=Google Compute Engine IP Forwarding Daemon
+After=network.target google-instance-setup.service
+Requires=network.target
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/google_ip_forwarding_daemon
+
+[Install]
+WantedBy=multi-user.target
diff --git a/package/systemd/google-shutdown-scripts.service b/package/systemd/google-shutdown-scripts.service
new file mode 100644
index 0000000..04c82fd
--- /dev/null
+++ b/package/systemd/google-shutdown-scripts.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Google Compute Engine Shutdown Scripts
+After=local-fs.target network-online.target network.target rsyslog.service
+After=google-instance-setup.service
+Wants=local-fs.target network-online.target network.target
+
+[Service]
+ExecStart=/bin/true
+ExecStop=/usr/bin/google_metadata_script_runner --script-type shutdown
+Type=oneshot
+RemainAfterExit=true
+TimeoutStopSec=0
+
+[Install]
+WantedBy=multi-user.target
diff --git a/package/systemd/google-startup-scripts.service b/package/systemd/google-startup-scripts.service
new file mode 100644
index 0000000..1c373c5
--- /dev/null
+++ b/package/systemd/google-startup-scripts.service
@@ -0,0 +1,13 @@
+[Unit]
+Description=Google Compute Engine Startup Scripts
+After=local-fs.target network-online.target network.target rsyslog.service
+After=google-instance-setup.service
+Wants=local-fs.target network-online.target network.target
+
+[Service]
+ExecStart=/usr/bin/google_metadata_script_runner --script-type startup
+KillMode=process
+Type=oneshot
+
+[Install]
+WantedBy=multi-user.target
diff --git a/package/systemd/postinst.sh b/package/systemd/postinst.sh
new file mode 100755
index 0000000..66369fe
--- /dev/null
+++ b/package/systemd/postinst.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop existing daemons.
+systemctl stop --no-block google-accounts-daemon
+systemctl stop --no-block google-clock-skew-daemon
+systemctl stop --no-block google-ip-forwarding-daemon
+
+# Enable systemd services.
+systemctl enable google-accounts-daemon.service
+systemctl enable google-clock-skew-daemon.service
+systemctl enable google-instance-setup.service
+systemctl enable google-ip-forwarding-daemon.service
+systemctl enable google-shutdown-scripts.service
+systemctl enable google-startup-scripts.service
+
+# Start daemons.
+systemctl start --no-block google-accounts-daemon
+systemctl start --no-block google-clock-skew-daemon
+systemctl start --no-block google-ip-forwarding-daemon
diff --git a/package/systemd/prerm.sh b/package/systemd/prerm.sh
new file mode 100755
index 0000000..16419b8
--- /dev/null
+++ b/package/systemd/prerm.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "$1" = purge ]; then
+ systemctl stop --no-block google-accounts-daemon
+ systemctl stop --no-block google-clock-skew-daemon
+ systemctl stop --no-block google-ip-forwarding-daemon
+
+ systemctl --no-reload disable google-accounts-daemon.service
+ systemctl --no-reload disable google-clock-skew-daemon.service
+ systemctl --no-reload disable google-instance-setup.service
+ systemctl --no-reload disable google-ip-forwarding-daemon.service
+ systemctl --no-reload disable google-shutdown-scripts.service
+ systemctl --no-reload disable google-startup-scripts.service
+fi
diff --git a/package/sysvinit/google-accounts-daemon b/package/sysvinit/google-accounts-daemon
new file mode 100755
index 0000000..96b0a55
--- /dev/null
+++ b/package/sysvinit/google-accounts-daemon
@@ -0,0 +1,107 @@
+#!/bin/sh
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: google_accounts_daemon
+# X-Start-Before: ssh
+# Required-Start: $local_fs $network $named $syslog $google_instance_setup
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Google Compute Engine Accounts Daemon
+# Description: Manages accounts from metadata SSH keys.
+### END INIT INFO
+
+# Do NOT "set -e".
+
+NAME=google-accounts-daemon
+DAEMON=/usr/bin/google_accounts_daemon
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed.
+[ -x "$DAEMON" ] || exit 0
+
+# Load the rcS variables.
+. /lib/init/vars.sh
+
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service.
+#
+do_start()
+{
+ start-stop-daemon \
+ --background \
+ --exec $DAEMON \
+ --make-pidfile \
+ --pidfile $PIDFILE \
+ --quiet \
+ --start > /dev/null
+}
+
+#
+# Function that stops the daemon/service.
+#
+do_stop()
+{
+ start-stop-daemon \
+ --exec $DAEMON \
+ --pidfile $PIDFILE \
+ --quiet \
+ --retry=TERM/30/KILL/5 \
+ --stop > /dev/null
+
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon \
+ --exec $DAEMON \
+ --oknodo \
+ --quiet \
+ --retry=0/30/KILL/5 \
+ --stop > /dev/null
+
+ # Delete the pidfile when the daemon exits.
+ rm -f $PIDFILE
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ stop)
+ do_stop
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ do_stop
+ do_start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 1
+ ;;
+esac
+
+:
diff --git a/package/sysvinit/google-clock-skew-daemon b/package/sysvinit/google-clock-skew-daemon
new file mode 100755
index 0000000..02b0011
--- /dev/null
+++ b/package/sysvinit/google-clock-skew-daemon
@@ -0,0 +1,106 @@
+#!/bin/sh
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: google_clock_skew_daemon
+# Required-Start: $network $syslog $google_instance_setup
+# Required-Stop: $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Google Compute Engine Clock Skew Daemon
+# Description: Sync the system clock on migration.
+### END INIT INFO
+
+# Do NOT "set -e".
+
+NAME=google-clock-skew-daemon
+DAEMON=/usr/bin/google_clock_skew_daemon
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed.
+[ -x "$DAEMON" ] || exit 0
+
+# Load the rcS variables.
+. /lib/init/vars.sh
+
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service.
+#
+do_start()
+{
+ start-stop-daemon \
+ --background \
+ --exec $DAEMON \
+ --make-pidfile \
+ --pidfile $PIDFILE \
+ --quiet \
+ --start > /dev/null
+}
+
+#
+# Function that stops the daemon/service.
+#
+do_stop()
+{
+ start-stop-daemon \
+ --exec $DAEMON \
+ --pidfile $PIDFILE \
+ --quiet \
+ --retry=TERM/30/KILL/5 \
+ --stop > /dev/null
+
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon \
+ --exec $DAEMON \
+ --oknodo \
+ --quiet \
+ --retry=0/30/KILL/5 \
+ --stop > /dev/null
+
+ # Delete the pidfile when the daemon exits.
+ rm -f $PIDFILE
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ stop)
+ do_stop
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ do_stop
+ do_start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 1
+ ;;
+esac
+
+:
diff --git a/package/sysvinit/google-instance-setup b/package/sysvinit/google-instance-setup
new file mode 100755
index 0000000..4382f25
--- /dev/null
+++ b/package/sysvinit/google-instance-setup
@@ -0,0 +1,51 @@
+#!/bin/sh
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: google_instance_setup
+# X-Start-Before: ssh
+# Required-Start: $local_fs $network $syslog
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Google Compute Engine Instance Setup
+# Description: Runs instance setup on boot.
+### END INIT INFO
+
+NAME=google-instance-setup
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Load the rcS variables.
+. /lib/init/vars.sh
+
+#
+# Function that starts the daemon/service.
+#
+do_start()
+{
+ /usr/bin/google_instance_setup > /dev/null
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME start" >&2
+ exit 1
+ ;;
+esac
+
+:
diff --git a/package/sysvinit/google-ip-forwarding-daemon b/package/sysvinit/google-ip-forwarding-daemon
new file mode 100755
index 0000000..92e72a8
--- /dev/null
+++ b/package/sysvinit/google-ip-forwarding-daemon
@@ -0,0 +1,106 @@
+#!/bin/sh
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: google_ip_forwarding_daemon
+# Required-Start: $network $syslog $google_instance_setup
+# Required-Stop: $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Google Compute Engine IP Forwarding Daemon
+# Description: Manages IP forwarding.
+### END INIT INFO
+
+# Do NOT "set -e".
+
+NAME=google-ip-forwarding-daemon
+DAEMON=/usr/bin/google_ip_forwarding_daemon
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Exit if the package is not installed.
+[ -x "$DAEMON" ] || exit 0
+
+# Load the rcS variables.
+. /lib/init/vars.sh
+
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service.
+#
+do_start()
+{
+ start-stop-daemon \
+ --background \
+ --exec $DAEMON \
+ --make-pidfile \
+ --pidfile $PIDFILE \
+ --quiet \
+ --start > /dev/null
+}
+
+#
+# Function that stops the daemon/service.
+#
+do_stop()
+{
+ start-stop-daemon \
+ --exec $DAEMON \
+ --pidfile $PIDFILE \
+ --quiet \
+ --retry=TERM/30/KILL/5 \
+ --stop > /dev/null
+
+ # Wait for children to finish too if this is a daemon that forks
+ # and if the daemon is only ever run from this initscript.
+ # If the above conditions are not satisfied then add some other code
+ # that waits for the process to drop all resources that could be
+ # needed by services started subsequently. A last resort is to
+ # sleep for some time.
+ start-stop-daemon \
+ --exec $DAEMON \
+ --oknodo \
+ --quiet \
+ --retry=0/30/KILL/5 \
+ --stop > /dev/null
+
+ # Delete the pidfile when the daemon exits.
+ rm -f $PIDFILE
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ stop)
+ do_stop
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ do_stop
+ do_start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 1
+ ;;
+esac
+
+:
diff --git a/package/sysvinit/google-shutdown-scripts b/package/sysvinit/google-shutdown-scripts
new file mode 100755
index 0000000..3f93799
--- /dev/null
+++ b/package/sysvinit/google-shutdown-scripts
@@ -0,0 +1,50 @@
+#!/bin/sh
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: google_shutdown_scripts
+# Required-Start:
+# Required-Stop: $remote_fs $syslog docker kubelet
+# Default-Start:
+# Default-Stop: 0 6
+# Short-Description: Google Compute Engine Shutdown Scripts
+# Description: Runs user specified shutdown scripts from metadata.
+### END INIT INFO
+
+NAME=google-shutdown-scripts
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Load the rcS variables.
+. /lib/init/vars.sh
+
+#
+# Function that stops the daemon/service.
+#
+do_stop()
+{
+ /usr/bin/google_metadata_script_runner --script-type shutdown > /dev/null
+}
+
+case "$1" in
+ stop)
+ do_stop
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME stop" >&2
+ exit 1
+ ;;
+esac
+
+:
diff --git a/package/sysvinit/google-startup-scripts b/package/sysvinit/google-startup-scripts
new file mode 100755
index 0000000..2ee8a56
--- /dev/null
+++ b/package/sysvinit/google-startup-scripts
@@ -0,0 +1,50 @@
+#!/bin/sh
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: google_startup_scripts
+# Required-Start: $all $google_instance_setup
+# Required-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop:
+# Short-Description: Google Compute Engine Startup Scripts
+# Description: Runs user specified startup scripts from metadata.
+### END INIT INFO
+
+NAME=google-startup-scripts
+SCRIPTNAME=/etc/init.d/$NAME
+
+# Load the rcS variables.
+. /lib/init/vars.sh
+
+#
+# Function that starts the daemon/service.
+#
+do_start()
+{
+ /usr/bin/google_metadata_script_runner --script-type startup > /dev/null
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME start" >&2
+ exit 1
+ ;;
+esac
+
+:
diff --git a/legacy/gcimagebundle/gcimagebundlelib/opensuse.py b/package/sysvinit/postinst.sh
index 9f709ff..281f6ee 100644..100755
--- a/legacy/gcimagebundle/gcimagebundlelib/opensuse.py
+++ b/package/sysvinit/postinst.sh
@@ -1,4 +1,5 @@
-# Copyright 2013 SUSE LLC All Rights Reserved
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,18 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-
-"""openSUSE platform info."""
-
-from gcimagebundlelib import suse
-
-class OpenSUSE(suse.SUSE):
- """openSUSE platform info."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- return 'openSUSE' == suse.SUSE().distribution
-
- def __init__(self):
- super(OpenSUSE, self).__init__()
-
+update-rc.d google-accounts-daemon defaults
+update-rc.d google-clock-skew-daemon defaults
+update-rc.d google-instance-setup defaults
+update-rc.d google-ip-forwarding-daemon defaults
+update-rc.d google-shutdown-scripts defaults
+update-rc.d google-startup-scripts defaults
diff --git a/package/sysvinit/prerm.sh b/package/sysvinit/prerm.sh
new file mode 100755
index 0000000..abc306d
--- /dev/null
+++ b/package/sysvinit/prerm.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "$1" = purge ]; then
+ update-rc.d google-accounts-daemon remove
+ update-rc.d google-clock-skew-daemon remove
+ update-rc.d google-instance-setup remove
+ update-rc.d google-ip-forwarding-daemon remove
+ update-rc.d google-shutdown-scripts defaults
+ update-rc.d google-startup-scripts defaults
+fi
diff --git a/package/upstart/google-accounts-daemon.conf b/package/upstart/google-accounts-daemon.conf
new file mode 100644
index 0000000..85ef7a5
--- /dev/null
+++ b/package/upstart/google-accounts-daemon.conf
@@ -0,0 +1,5 @@
+# Manages accounts from metadata SSH keys.
+start on stopped google-instance-setup
+
+respawn
+exec /usr/bin/google_accounts_daemon
diff --git a/package/upstart/google-clock-skew-daemon.conf b/package/upstart/google-clock-skew-daemon.conf
new file mode 100644
index 0000000..5213e9c
--- /dev/null
+++ b/package/upstart/google-clock-skew-daemon.conf
@@ -0,0 +1,5 @@
+# Sync the system clock on migration.
+start on stopped google-instance-setup
+
+respawn
+exec /usr/bin/google_clock_skew_daemon
diff --git a/package/upstart/google-instance-setup.conf b/package/upstart/google-instance-setup.conf
new file mode 100644
index 0000000..2c7d596
--- /dev/null
+++ b/package/upstart/google-instance-setup.conf
@@ -0,0 +1,6 @@
+# Runs instance setup on boot.
+start on started rsyslog
+
+task
+
+exec /usr/bin/google_instance_setup
diff --git a/package/upstart/google-ip-forwarding-daemon.conf b/package/upstart/google-ip-forwarding-daemon.conf
new file mode 100644
index 0000000..6d1fd05
--- /dev/null
+++ b/package/upstart/google-ip-forwarding-daemon.conf
@@ -0,0 +1,5 @@
+# Manages IP forwarding.
+start on stopped google-instance-setup
+
+respawn
+exec /usr/bin/google_ip_forwarding_daemon
diff --git a/package/upstart/google-shutdown-scripts.conf b/package/upstart/google-shutdown-scripts.conf
new file mode 100644
index 0000000..b9323a5
--- /dev/null
+++ b/package/upstart/google-shutdown-scripts.conf
@@ -0,0 +1,5 @@
+# Runs a shutdown script from metadata.
+start on starting rc RUNLEVEL=[06]
+task
+
+exec /usr/bin/google_metadata_script_runner --script-type shutdown
diff --git a/package/upstart/google-startup-scripts.conf b/package/upstart/google-startup-scripts.conf
new file mode 100644
index 0000000..6fa68a7
--- /dev/null
+++ b/package/upstart/google-startup-scripts.conf
@@ -0,0 +1,4 @@
+# Runs a startup script from metadata.
+start on stopped google-instance-setup
+
+exec /usr/bin/google_metadata_script_runner --script-type startup
diff --git a/legacy/gcimagebundle/gcimagebundle b/package/upstart/postinst.sh
index 3ab7ec1..5cbe3c8 100755
--- a/legacy/gcimagebundle/gcimagebundle
+++ b/package/upstart/postinst.sh
@@ -1,6 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright 2013 Google Inc. All Rights Reserved.
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,15 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Stop existing daemons
+stop --no-wait google-accounts-daemon
+stop --no-wait google-clock-skew-daemon
+stop --no-wait google-ip-forwarding-daemon
-"""Wrapper for gcimagebundle main."""
-
-from gcimagebundlelib import imagebundle
-
-
-def main():
- imagebundle.main()
-
-
-if __name__ == '__main__':
- main()
+# Start daemons
+start --no-wait google-accounts-daemon
+start --no-wait google-clock-skew-daemon
+start --no-wait google-ip-forwarding-daemon
diff --git a/google-startup-scripts/etc/rc.local b/package/upstart/prerm.sh
index ac6f13c..0b161ab 100755
--- a/google-startup-scripts/etc/rc.local
+++ b/package/upstart/prerm.sh
@@ -1,5 +1,5 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,5 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-[ -x /sbin/initctl ] && initctl emit --no-wait google-rc-local-has-run
-exit 0
+if [ "$1" = purge ]; then
+ stop --no-wait google-accounts-daemon
+ stop --no-wait google-clock-skew-daemon
+ stop --no-wait google-ip-forwarding-daemon
+fi
diff --git a/scripts/optimize_local_ssd b/scripts/optimize_local_ssd
new file mode 100755
index 0000000..15238b9
--- /dev/null
+++ b/scripts/optimize_local_ssd
@@ -0,0 +1,95 @@
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+total_cpus=$(nproc)
+
+config_nvme()
+{
+ current_cpu=0
+ for dev in /sys/bus/pci/drivers/nvme/*
+ do
+ if [ ! -d "$dev" ]
+ then
+ continue
+ fi
+ for irq_info in $dev/msi_irqs/*
+ do
+ if [ ! -f "$irq_info" ]
+ then
+ continue
+ fi
+ current_cpu=$((current_cpu % total_cpus))
+ cpu_mask=$(printf "%x" $((1<<current_cpu)))
+ irq=$(basename "$irq_info")$a
+ echo "Setting IRQ $irq smp_affinity to $cpu_mask."
+ echo "$cpu_mask" > "/proc/irq/$irq/smp_affinity"
+ current_cpu=$((current_cpu+1))
+ done
+ done
+}
+
+config_scsi()
+{
+ irqs=()
+ for device in /sys/bus/virtio/drivers/virtio_scsi/virtio*
+ do
+ ssd=0
+ for target_path in $device/host*/target*/*
+ do
+ if [ ! -f "$target_path/model" ]
+ then
+ continue
+ fi
+ model=$(cat "$target_path/model")
+ if [[ $model =~ .*EphemeralDisk.* ]]
+ then
+ ssd=1
+ for queue_path in $target_path/block/sd*/queue
+ do
+ echo noop > "$queue_path/scheduler"
+ echo 0 > "$queue_path/add_random"
+ echo 512 > "$queue_path/nr_requests"
+ echo 0 > "$queue_path/rotational"
+ echo 0 > "$queue_path/rq_affinity"
+ echo 1 > "$queue_path/nomerges"
+ done
+ fi
+ done
+ if [[ $ssd == 1 ]]
+ then
+ request_queue=$(basename "$device")-request
+ irq=$(cat /proc/interrupts | grep "$request_queue" | awk '{print $1}'| sed 's/://')
+ irqs+=($irq)
+ fi
+ done
+ irq_count=${#irqs[@]}
+ if [ "$irq_count" != 0 ]
+ then
+ stride=$((total_cpus / irq_count))
+ stride=$((stride < 1 ? 1 : stride))
+ current_cpu=0
+ for irq in "${irqs[@]}"
+ do
+ current_cpu=$(($current_cpu % $total_cpus))
+ cpu_mask=$(printf "%x" $((1<<$current_cpu)))
+ echo "Setting IRQ $irq smp_affinity to $cpu_mask."
+ echo "$cpu_mask" > "/proc/irq/$irq/smp_affinity"
+ current_cpu=$((current_cpu+stride))
+ done
+ fi
+}
+
+config_nvme
+config_scsi
diff --git a/google-startup-scripts/usr/share/google/virtionet-irq-affinity b/scripts/set_multiqueue
index 6b86ee2..5c722e3 100755
--- a/google-startup-scripts/usr/share/google/virtionet-irq-affinity
+++ b/scripts/set_multiqueue
@@ -1,5 +1,5 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
+#!/bin/bash
+# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -27,14 +27,6 @@
# each CPU a dedicated TX and RX network queue, while ensuring that all packets
# from a single flow are delivered to the same CPU.
-function log() {
- if [[ -x ${PREFIX}/usr/bin/logger ]]; then
- echo $* | ${PREFIX}/usr/bin/logger -t virtionet-irq-affinity -p daemon.info
- else
- echo $* >&2
- fi
-}
-
function is_decimal_int() {
[ "${1}" -eq "${1}" ] > /dev/null 2>&1
}
@@ -43,7 +35,7 @@ function set_channels() {
ethtool -L "${1}" combined "${2}" > /dev/null 2>&1
}
-log "Running $(basename $0)"
+echo "Running $(basename "$0")."
NET_DEVS=/sys/bus/virtio/drivers/virtio_net/virtio*
# Loop through all the virtionet devices and enable multi-queue
@@ -53,21 +45,21 @@ if [ -x /sbin/ethtool ]; then
for eth_dev in $ETH_DEVS; do
eth_dev=$(basename "$eth_dev")
if ! errormsg=$(ethtool -l "$eth_dev" 2>&1); then
- log "/sbin/ethtool says that $eth_dev does not support virtionet multiqueue: $errormsg"
+ echo "/sbin/ethtool says that $eth_dev does not support virtionet multiqueue: $errormsg."
continue
fi
num_max_channels=$(ethtool -l "$eth_dev" | grep -m 1 Combined | cut -f2)
[ "${num_max_channels}" -eq "1" ] && continue
if is_decimal_int "$num_max_channels" && \
set_channels "$eth_dev" "$num_max_channels"; then
- log "Set channels for $eth_dev to $num_max_channels"
+ echo "Set channels for $eth_dev to $num_max_channels."
else
- log "Could not set channels for $eth_dev to $num_max_channels"
+ echo "Could not set channels for $eth_dev to $num_max_channels."
fi
done
done
else
- log "/sbin/ethtool not found: cannot configure virtionet multiqueue"
+ echo "/sbin/ethtool not found: cannot configure virtionet multiqueue."
fi
for dev in $NET_DEVS
@@ -89,8 +81,8 @@ do
virtionet_msix_dir_regex=".*/${dev}-(input|output)\.[0-9]+$"
if [ -d "${virtionet_intx_dir}" ]; then
# All virtionet intx IRQs are delivered to CPU 0
- log "Setting ${smp_affinity} to 01 for device ${dev}"
- echo "01" > ${smp_affinity}
+ echo "Setting ${smp_affinity} to 01 for device ${dev}."
+ echo "01" > "${smp_affinity}"
continue
fi
# Not virtionet intx, probe for MSI-X
@@ -122,7 +114,7 @@ do
for cpu_bitmap in ${affinity_cpumask}; do
bitmap_val=$(printf "%d" "0x${cpu_bitmap}" 2>/dev/null)
if [ "$?" -ne 0 ]; then
- log "Invalid affinity hint ${affinity_hint}: ${affinity_cpumask}"
+ echo "Invalid affinity hint ${affinity_hint}: ${affinity_cpumask}."
affinity_hint_enabled=0
break
elif [ "${bitmap_val}" -ne 0 ]; then
@@ -131,10 +123,10 @@ do
done
IFS=${OIFS}
if [ "${affinity_hint_enabled}" -eq 0 ]; then
- log "Cannot set IRQ affinity ${smp_affinity}, affinity hint disabled"
+ echo "Cannot set IRQ affinity ${smp_affinity}, affinity hint disabled."
else
# Set the IRQ CPU affinity to the virtionet-initialized affinity hint
- log "Setting ${smp_affinity} to ${affinity_cpumask} for device ${dev}"
+ echo "Setting ${smp_affinity} to ${affinity_cpumask} for device ${dev}."
echo "${affinity_cpumask}" > "${smp_affinity}"
fi
done
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..4f667a6
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create a Python package of the Linux guest environment."""
+
+import glob
+import os
+import sys
+
+import setuptools
+
+
+def GetInitFiles(path):
+ """Get the list of relative paths to init files.
+
+ Args:
+ path: string, the relative path to the source directory.
+
+ Returns:
+ list, the relative path strings for init files.
+ """
+ valid = '%s/*' % path
+ invalid = '%s/*.sh' % path
+ return list(set(glob.glob(valid)) - set(glob.glob(invalid)))
+
+
+# Common data files to add as part of all Linux distributions.
+data_files = [
+ ('/etc/default', ['package/instance_configs.cfg']),
+]
+
+
+# Data files specific to the various Linux init systems.
+data_files_dict = {
+ 'systemd': [('/usr/lib/systemd/system', GetInitFiles('package/systemd'))],
+ 'sysvinit': [('/etc/init.d', GetInitFiles('package/sysvinit'))],
+ 'upstart': [('/etc/init', GetInitFiles('package/upstart'))],
+}
+
+
+if os.environ.get('CONFIG') not in data_files_dict.keys():
+ keys = ', '.join(data_files_dict.keys())
+ sys.exit('Expected "CONFIG" environment variable set to one of [%s].' % keys)
+
+
+setuptools.setup(
+ author='Google Compute Engine Team',
+ author_email='gc-team@google.com',
+ data_files=data_files + data_files_dict.get(os.environ['CONFIG']),
+ description='Google Compute Engine',
+ include_package_data=True,
+ install_requires=['boto'],
+ license='Apache Software License',
+ long_description='Google Compute Engine guest environment.',
+ name='google-compute-engine',
+ packages=setuptools.find_packages(),
+ scripts=glob.glob('scripts/*'),
+ url='https://github.com/GoogleCloudPlatform/compute-image-packages',
+ version='2.0.0',
+ # Entry points create scripts in /usr/bin that call a function.
+ entry_points={
+ 'console_scripts': [
+ 'google_accounts_daemon=google_compute_engine.accounts.accounts_daemon:main',
+ 'google_clock_skew_daemon=google_compute_engine.clock_skew.clock_skew_daemon:main',
+ 'google_ip_forwarding_daemon=google_compute_engine.ip_forwarding.ip_forwarding_daemon:main',
+ 'google_instance_setup=google_compute_engine.instance_setup.instance_setup:main',
+ 'google_metadata_script_runner=google_compute_engine.metadata_scripts.script_manager:main',
+ ],
+ },
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: No Input/Output (Daemon)',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.2',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Topic :: Internet',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: System :: Installation/Setup',
+ 'Topic :: System :: Systems Administration',
+ ],
+)
diff --git a/unit-tests/travis-run-tests.sh b/unit-tests/travis-run-tests.sh
deleted file mode 100644
index e8f3c52..0000000
--- a/unit-tests/travis-run-tests.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-# This is just a stub script for now. Unit tests will be placed in this directory and
-# run by this script.
-exit 0 \ No newline at end of file