summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md50
-rw-r--r--VERSION1
-rw-r--r--google-daemon/README.md59
-rwxr-xr-xgoogle-daemon/etc/init.d/google-accounts-manager193
-rwxr-xr-xgoogle-daemon/etc/init.d/google-address-manager153
-rwxr-xr-xgoogle-daemon/etc/init.d/google-clock-sync-manager153
-rwxr-xr-xgoogle-daemon/etc/init/google-accounts-manager-service.conf10
-rwxr-xr-xgoogle-daemon/etc/init/google-accounts-manager-task.conf18
-rwxr-xr-xgoogle-daemon/etc/init/google-address-manager.conf5
-rwxr-xr-xgoogle-daemon/etc/init/google-clock-sync-manager.conf5
-rw-r--r--google-daemon/usr/lib/systemd/system/google-accounts-manager.service12
-rw-r--r--google-daemon/usr/lib/systemd/system/google-address-manager.service11
-rw-r--r--google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service11
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/accounts.py431
-rw-r--r--google-daemon/usr/share/google/google_daemon/accounts_manager.py127
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py89
-rw-r--r--google-daemon/usr/share/google/google_daemon/address_manager.py179
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/desired_accounts.py191
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/manage_accounts.py94
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/manage_addresses.py52
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/manage_clock_sync.py85
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/metadata_watcher.py97
-rwxr-xr-xgoogle-daemon/usr/share/google/google_daemon/utils.py193
-rw-r--r--google-startup-scripts/README.md52
-rwxr-xr-xgoogle-startup-scripts/etc/init.d/google75
-rwxr-xr-xgoogle-startup-scripts/etc/init.d/google-startup-scripts89
-rwxr-xr-xgoogle-startup-scripts/etc/init/google.conf9
-rwxr-xr-xgoogle-startup-scripts/etc/init/google_run_shutdown_scripts.conf10
-rwxr-xr-xgoogle-startup-scripts/etc/init/google_run_startup_scripts.conf10
-rwxr-xr-xgoogle-startup-scripts/etc/rc.local17
-rw-r--r--google-startup-scripts/etc/rsyslog.d/90-google.conf9
-rw-r--r--google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf47
-rw-r--r--google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules17
-rw-r--r--google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules24
-rw-r--r--google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset3
-rw-r--r--google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service15
-rw-r--r--google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service13
-rw-r--r--google-startup-scripts/usr/lib/systemd/system/google.service13
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/boto/boot_setup.py92
-rw-r--r--google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py85
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/fetch_script148
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/first-boot94
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/get_metadata_value73
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/onboot162
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/regenerate-host-keys81
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/run-scripts54
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/run-shutdown-scripts31
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/run-startup-scripts27
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/safe_format_and_mount152
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/set-hostname40
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/set-interrupts82
-rwxr-xr-xgoogle-startup-scripts/usr/share/google/virtionet-irq-affinity141
-rw-r--r--legacy/README.md5
-rw-r--r--legacy/gcimagebundle/LICENSE201
-rw-r--r--legacy/gcimagebundle/MANIFEST.in4
-rw-r--r--legacy/gcimagebundle/README30
-rw-r--r--legacy/gcimagebundle/README.md48
-rw-r--r--legacy/gcimagebundle/VERSION1
-rw-r--r--legacy/gcimagebundle/distribute_setup.py556
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundle28
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/__init__.py0
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/block_disk.py389
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/centos.py66
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/debian.py36
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py82
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/fedora.py56
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/fs_copy.py180
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/gcel.py57
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/imagebundle.py265
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/linux.py135
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/manifest.py79
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/opensuse.py29
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/os_platform.py70
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/platform_factory.py60
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/rhel.py42
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/sle.py34
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/suse.py91
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py16
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py512
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py140
-rwxr-xr-xlegacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py49
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/ubuntu.py54
-rw-r--r--legacy/gcimagebundle/gcimagebundlelib/utils.py455
-rwxr-xr-xlegacy/gcimagebundle/setup.py58
-rw-r--r--legacy/gcimagebundle/stdeb.cfg3
-rw-r--r--unit-tests/travis-run-tests.sh3
86 files changed, 1 insertions, 7717 deletions
diff --git a/README.md b/README.md
index d00790b..73a950f 100644
--- a/README.md
+++ b/README.md
@@ -1,52 +1,4 @@
-## [Image Packages](https://cloud.google.com/compute/docs/images) for [Google Compute Engine](https://cloud.google.com/compute/)
-This repository is the collection of packages that are installed on the standard Google Compute Engine images.
-
-1. [Google Startup Scripts](https://cloud.google.com/compute/docs/startupscript) - Scripts and configuration files that setup a Linux-based image to work smoothly with GCE.
-1. Google Daemon - A service that manages user accounts, maintains ssh login keys, syncs the system clock after migration, and syncs public endpoint IP addresses.
-1. Disk Expand - Scripts to expand the root partition on GCE VM's for CentOS 6 and RHEL 6 images.
-
-## Installation
-
-### From Release Tarballs
-The easiest way to install these packages into a Linux-based image is to extract each tarball to `/` (root). Image Bundle does not have a directory structure, it is recommended to it extract to `/usr/share/imagebundle`. The tarballs are available in [releases](https://github.com/GoogleCloudPlatform/compute-image-packages/releases).
-
-Refer to [Building a Google Compute Engine Image](https://cloud.google.com/compute/docs/images) for the complete guide.
-
-### From Source Repository
-Occasionally you may want to install the latest commits to the [repository](https://github.com/GoogleCloudPlatform/compute-image-packages/) even if they have not been released. This is not recommended unless there is a change that you specifically need and cannot wait for. To do this:
-
-1. Log in to your target machine.
-1. Clone the repository with
-
- git clone https://github.com/GoogleCloudPlatform/compute-image-packages.git
-
-1. Copy the google-daemon and google-startup-scripts files to your root directory with
-
- sudo cp -R compute-image-packages/{google-daemon/{etc,usr},google-startup-scripts/{etc,usr,lib}} /
-
-1. Configure the packages to run on startup with (Debian)
-
- sudo update-rc.d google-startup-scripts defaults && sudo update-rc.d google-accounts-manager defaults && sudo update-rc.d google-address-manager defaults && sudo update-rc.d google-clock-sync-manager defaults
-
- or (Redhat)
-
- sudo chkconfig --add google-startup-scripts && sudo chkconfig --add google-accounts-manager && sudo chkconfig --add google-address-manager && sudo chkconfig --add google-clock-sync-manager
-
-1. Either restart so the packages run or start them with (Debian and Redhat)
-
- sudo service google-accounts-manager restart && sudo service google-address-manager restart && sudo service google-clock-sync-manager restart
-
-## Source Code
-This repository is structured so that each package is located in its own top-level directory. [`google-startup-scripts`](google-startup-scripts/) and [`google-daemon`](google-daemon/) are stored as the directory structure of where the files would be from root.
-
-## Contributing
-Have a patch that will benefit this project? Awesome! Follow these steps to have it accepted.
-
-1. Please sign our [Contributor License Agreement](CONTRIB.md).
-1. Fork this Git repository and make your changes.
-1. Create a Pull Request
-1. Incorporate review feedback to your changes.
-1. Accepted!
+## DEVELOPMENT
## License
All files in this repository are under the [Apache License, Version 2.0](LICENSE) unless noted otherwise.
diff --git a/VERSION b/VERSION
deleted file mode 100644
index 31e5c84..0000000
--- a/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.3.3
diff --git a/google-daemon/README.md b/google-daemon/README.md
deleted file mode 100644
index 0e44860..0000000
--- a/google-daemon/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-## Google Daemon
-Google daemon runs in the background and provides the following services:
-
-+ Creates new accounts based on the instance metadata.
-+ Configures SSH to accept the accounts' public keys from the instance metadata.
-+ Adds IP addresses of network load balancers as aliases of the external Ethernet interface
-+ Resyncs clock if skewed due to [live migration](https://googlecloudplatform.blogspot.com/2015/03/Google-Compute-Engine-uses-Live-Migration-technology-to-service-infrastructure-without-application-downtime.html)
-
-Google Daemon services are typically located at:
-
- /usr/share/google/google_daemon/
-
-#### Account synchronization
-
-Your users can create SSH keys for accounts on a virtual machine using [gcloud compute](https://cloud.google.com/compute/docs/gcloud-compute/) or manually using these steps:
-
- # Generate the ssh keys
- $ ssh-keygen -t rsa -f ~/.ssh/google_compute_engine
-
- # Create public RSA key in OpenSSH format
- $ ssh-rsa [base-64-encoded-public-key] [comment]
-
-In the metadata server, the SSH keys are passed to a virtual machine individually, or to the project using the `commoninstancemetadata` property:
-
- {
- kind: "compute#metadata",
- items: [
- "key": "sshKeys",
- "value": "<ssh-keys-value>"
- ]
- }
-
-`<ssh-keys-value>` is a newline-separated list of individual authorized public ssh key records, each in the format:
-
- <username>:<public-ssh-key-file-contents>
-
-For example:
-
- {
- "kind": "compute#project",
- "name": "project-name",
- "commonInstanceMetadata": {
- "kind": "compute#metadata",
- "items": [
- {
- "key": "sshKeys",
- "value": "user1:ssh-rsa AAAA...pIy9 user@host.domain.com\nuser2:ssh-rsa AAAA...ujOz user@host.domain.com"
- }
- ]
- }
-
-For more information about the metadata server, read the [metadata server](http://developers.google.com/compute/docs/metadata "metadata server") documentation.
-
-Inside a virtual machine, a cron job runs every minute to check if project or instance metadata was updated with the new sshKeys value, and makes sure those users exist. It also checks that the keys are in the `~$USER/.ssh/authorized_keys` file.
-
-__Note:__ It is recommended that you use a `wait-for-change` request through the metadata server to detect updates. See [metadata server](https://developers.google.com/compute/docs/metadata#waitforchange) for more information.
-
-Other account management software can be used instead of Google Daemon but you will have to configure the software to read user accounts from the metadata server.
-
diff --git a/google-daemon/etc/init.d/google-accounts-manager b/google-daemon/etc/init.d/google-accounts-manager
deleted file mode 100755
index 4f414c4..0000000
--- a/google-daemon/etc/init.d/google-accounts-manager
+++ /dev/null
@@ -1,193 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: gce_manage_accounts
-# X-Start-Before: ssh
-# Required-Start: $local_fs $network $named $syslog
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Google Compute Engine accounts manager service
-# Description: This launches the Google Compute Engine accounts manager
-# daemon.
-### END INIT INFO
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Google Compute Engine accounts manager service"
-NAME=manage_accounts
-DAEMON=/usr/share/google/google_daemon/manage_accounts.py
-DAEMON_ARGS="--daemon"
-PIDFILE=/var/run/$NAME.pid
-SCRIPTNAME=/etc/init.d/google-manage-accounts
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # One-shot run prior to daemonizing.
- $DAEMON --single-pass
-
- # In case of power outage or hard reboot, ensure that SSH keys have been
- # written to disk before starting the daemon. At this point the other
- # Google-specific startup logic will have already occurred, sometimes
- # including other steps which would be good to write to disk; since syncs are
- # expensive and we don't want to do it twice during boot, just do it once
- # here.
- sync
-
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
- $DAEMON_ARGS \
- || return 2
- # Add code here, if necessary, that waits for the process to be ready
- # to handle requests from services started subsequently which depend
- # on this one. As a last resort, sleep for some time.
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-#
-# Function that sends a SIGHUP to the daemon/service
-#
-do_reload() {
- #
- # If the daemon can reload its configuration without
- # restarting (for example, when it is sent a SIGHUP),
- # then implement that here.
- #
- start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
- return 0
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- #reload|force-reload)
- #
- # If do_reload() is not implemented then leave this commented out
- # and leave 'force-reload' as an alias for 'restart'.
- #
- #log_daemon_msg "Reloading $DESC" "$NAME"
- #do_reload
- #log_end_msg $?
- #;;
- restart|force-reload)
- #
- # If the "reload" option is implemented then remove the
- # 'force-reload' alias
- #
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-daemon/etc/init.d/google-address-manager b/google-daemon/etc/init.d/google-address-manager
deleted file mode 100755
index e74cab6..0000000
--- a/google-daemon/etc/init.d/google-address-manager
+++ /dev/null
@@ -1,153 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: google-address-manager
-# Required-Start: $network $syslog
-# Required-Stop: $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Example initscript
-# Description: This file should be used to construct scripts to be
-# placed in /etc/init.d.
-### END INIT INFO
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Google IP address manager"
-NAME=google-address-manager
-DAEMON=/usr/share/google/google_daemon/manage_addresses.py
-DAEMON_ARGS=""
-PIDFILE=/var/run/$NAME.pid
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON -- \
- $DAEMON_ARGS || return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 \
- --pidfile $PIDFILE
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- restart|force-reload)
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-daemon/etc/init.d/google-clock-sync-manager b/google-daemon/etc/init.d/google-clock-sync-manager
deleted file mode 100755
index b85f9de..0000000
--- a/google-daemon/etc/init.d/google-clock-sync-manager
+++ /dev/null
@@ -1,153 +0,0 @@
-#! /bin/sh
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: google-clock-manager
-# Required-Start: $network $syslog
-# Required-Stop: $network
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Example initscript
-# Description: This file should be used to construct scripts to be
-# placed in /etc/init.d.
-### END INIT INFO
-
-# Do NOT "set -e"
-
-# PATH should only include /usr/* if it runs after the mountnfs.sh script
-PATH=/sbin:/usr/sbin:/bin:/usr/bin
-DESC="Google clock sync manager"
-NAME=google-clock-sync-manager
-DAEMON=/usr/share/google/google_daemon/manage_clock_sync.py
-DAEMON_ARGS=""
-PIDFILE=/var/run/$NAME.pid
-SCRIPTNAME=/etc/init.d/$NAME
-
-# Exit if the package is not installed
-[ -x "$DAEMON" ] || exit 0
-
-# Read configuration variable file if it is present
-[ -r /etc/default/$NAME ] && . /etc/default/$NAME
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- # Return
- # 0 if daemon has been started
- # 1 if daemon was already running
- # 2 if daemon could not be started
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
- || return 1
- start-stop-daemon --start --quiet --make-pidfile --background \
- --pidfile $PIDFILE --exec $DAEMON -- \
- $DAEMON_ARGS || return 2
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- # Return
- # 0 if daemon has been stopped
- # 1 if daemon was already stopped
- # 2 if daemon could not be stopped
- # other if a failure occurred
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
- RETVAL="$?"
- [ "$RETVAL" = 2 ] && return 2
- # Wait for children to finish too if this is a daemon that forks
- # and if the daemon is only ever run from this initscript.
- # If the above conditions are not satisfied then add some other code
- # that waits for the process to drop all resources that could be
- # needed by services started subsequently. A last resort is to
- # sleep for some time.
- start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 \
- --pidfile $PIDFILE
- [ "$?" = 2 ] && return 2
- # Many daemons don't delete their pidfiles when they exit.
- rm -f $PIDFILE
- return "$RETVAL"
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- status)
- status_of_proc -p "$PIDFILE" "$DAEMON" "$NAME" && exit 0 || exit $?
- ;;
- restart|force-reload)
- log_daemon_msg "Restarting $DESC" "$NAME"
- do_stop
- case "$?" in
- 0|1)
- do_start
- case "$?" in
- 0) log_end_msg 0 ;;
- 1) log_end_msg 1 ;; # Old process is still running
- *) log_end_msg 1 ;; # Failed to start
- esac
- ;;
- *)
- # Failed to stop
- log_end_msg 1
- ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-daemon/etc/init/google-accounts-manager-service.conf b/google-daemon/etc/init/google-accounts-manager-service.conf
deleted file mode 100755
index 8707ca7..0000000
--- a/google-daemon/etc/init/google-accounts-manager-service.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# google - Run account manager as a service after the one-shot account manager
-# task is done.
-#
-#
-start on (stopped google-accounts-manager-task
- and (starting ssh or starting sshd))
-stop on (stopping ssh or stopping sshd)
-respawn
-
-exec /usr/share/google/google_daemon/manage_accounts.py
diff --git a/google-daemon/etc/init/google-accounts-manager-task.conf b/google-daemon/etc/init/google-accounts-manager-task.conf
deleted file mode 100755
index 533ef6b..0000000
--- a/google-daemon/etc/init/google-accounts-manager-task.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-# google - Run account manager as a one-shot task prior to sshd starting.
-#
-#
-start on (starting ssh or starting sshd)
-task
-
-script
- # One-shot run prior to daemonizing (in a different upstart job config file).
- /usr/share/google/google_daemon/manage_accounts.py --single-pass
-
- # In case of power outage or hard reboot, ensure that SSH keys have been
- # written to disk before starting the daemon. At this point the other
- # Google-specific startup logic will have already occurred, sometimes
- # including other steps which would be good to write to disk; since syncs are
- # expensive and we don't want to do it twice during boot, just do it once
- # here.
- sync
-end script
diff --git a/google-daemon/etc/init/google-address-manager.conf b/google-daemon/etc/init/google-address-manager.conf
deleted file mode 100755
index 9cf217c..0000000
--- a/google-daemon/etc/init/google-address-manager.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-# This service configures local addresses in a Google Compute Engine instance.
-start on google-rc-local-has-run
-
-respawn
-exec /usr/share/google/google_daemon/manage_addresses.py
diff --git a/google-daemon/etc/init/google-clock-sync-manager.conf b/google-daemon/etc/init/google-clock-sync-manager.conf
deleted file mode 100755
index 6d23a28..0000000
--- a/google-daemon/etc/init/google-clock-sync-manager.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-# This service syncs the clock after migration in a Google Compute Engine instance.
-start on google-rc-local-has-run
-
-respawn
-exec /usr/share/google/google_daemon/manage_clock_sync.py
diff --git a/google-daemon/usr/lib/systemd/system/google-accounts-manager.service b/google-daemon/usr/lib/systemd/system/google-accounts-manager.service
deleted file mode 100644
index 660cf54..0000000
--- a/google-daemon/usr/lib/systemd/system/google-accounts-manager.service
+++ /dev/null
@@ -1,12 +0,0 @@
-[Unit]
-Description=Google Compute Engine User Accounts Manager Daemon
-After=network.target
-Before=sshd.service
-Requires=network.target
-
-[Service]
-Type=simple
-ExecStart=/usr/share/google/google_daemon/manage_accounts.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-daemon/usr/lib/systemd/system/google-address-manager.service b/google-daemon/usr/lib/systemd/system/google-address-manager.service
deleted file mode 100644
index eadd2b0..0000000
--- a/google-daemon/usr/lib/systemd/system/google-address-manager.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=Google Compute Engine Address Manager Daemon
-After=network.target
-Requires=network.target
-
-[Service]
-Type=simple
-ExecStart=/usr/share/google/google_daemon/manage_addresses.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service b/google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service
deleted file mode 100644
index c4fcc9b..0000000
--- a/google-daemon/usr/lib/systemd/system/google-clock-sync-manager.service
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description=Google Compute Engine Clock Sync Daemon
-After=network.target
-Requires=network.target
-
-[Service]
-Type=simple
-ExecStart=/usr/share/google/google_daemon/manage_clock_sync.py
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-daemon/usr/share/google/google_daemon/accounts.py b/google-daemon/usr/share/google/google_daemon/accounts.py
deleted file mode 100755
index e14f5c0..0000000
--- a/google-daemon/usr/share/google/google_daemon/accounts.py
+++ /dev/null
@@ -1,431 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Update accounts needed on this GCE instance.
-
-Update accounts based on the contents of ACCOUNTS_URL, which should contain a
-newline-delimited file of accounts and SSH public keys. Each line represents a
-SSH public key which should be allowed to log in to that account.
-
-If the account does not already exist on the system, it is created and added
-to /etc/sudoers to allow that account to administer the machine without needing
-a password.
-"""
-
-import errno
-import grp
-import logging
-import os
-import pwd
-import re
-import stat
-import tempfile
-import time
-
-
-def EnsureTrailingNewline(line):
- if line.endswith('\n'):
- return line
- return line + '\n'
-
-
-def IsUserSudoerInLines(user, sudoer_lines):
- """Return whether the user has an entry in the sudoer lines."""
-
- def IsUserSudoerEntry(line):
- return re.match(r'^%s\s+' % user, line)
-
- return filter(IsUserSudoerEntry, sudoer_lines)
-
-
-class Accounts(object):
- """Manage accounts on a machine."""
-
- # Comes from IEEE Std 1003.1-2001. Characters from the portable
- # filename character set. The hyphen should not be the first char
- # of a portable user name.
- VALID_USERNAME_CHARS = set(
- map(chr, range(ord('A'), ord('Z') + 1)) +
- map(chr, range(ord('a'), ord('z') + 1)) +
- map(chr, range(ord('0'), ord('9') + 1)) +
- ['_', '-', '.'])
-
- def __init__(self, grp_module=grp, os_module=os,
- pwd_module=pwd, system_module=None,
- urllib2_module=None, time_module=time):
- """Construct an Accounts given the module injections."""
- self.system_module = system_module
-
- self.grp = grp_module
- self.os = os_module
- self.pwd = pwd_module
- self.system = system_module
- self.time_module = time_module
- self.urllib2 = urllib2_module
-
- self.default_user_groups = self.GroupsThatExist(
- ['adm', 'video', 'dip', 'plugdev', 'sudo'])
-
- def UpdateUser(self, username, ssh_keys):
- """Create username on the system, with authorized ssh_keys."""
-
- if not self.IsValidUsername(username):
- logging.warning(
- 'Not creating account for user %s. Usernames must comprise'
- ' characters [A-Za-z0-9._-] and not start with \'-\'.', username)
- return
-
- if not self.UserExists(username):
- self.system.UserAdd(username, self.default_user_groups)
-
- if self.UserExists(username):
- # Don't try to manage the sshkeys of an account with a shell set to
- # disable logins. Helps avoid problems caused by operator and root
- # sharing a home directory in CentOS and RHEL
- if self.UserNoLogin(username):
- logging.debug(
- 'Not processing account for user %s. User has /sbin/nologin'
- ' set as login shell', username)
- return
-
- # If we're just removing keys from a user who may have been in the
- # metadata server but isn't currently, we should never increase their
- # privileges. Therefore, only grant sudo access if we have ssh keys.
- if ssh_keys:
- self.MakeUserSudoer(username)
- self.AuthorizeSshKeys(username, ssh_keys)
-
- def IsValidUsername(self, username):
- """Return whether username looks like a valid user name."""
-
- def InvalidCharacterFilter(c):
- return c not in Accounts.VALID_USERNAME_CHARS
-
- if filter(InvalidCharacterFilter, username):
- # There's an invalid character in it.
- return False
-
- if username.startswith('-'):
- return False
-
- return True
-
- def GroupsThatExist(self, groups_list):
- """Return all the groups in groups_list that exist on the machine."""
-
- def GroupExists(group):
- try:
- self.grp.getgrnam(group)
- return True
- except KeyError:
- return False
-
- return filter(GroupExists, groups_list)
-
- def GetUserInfo(self, user):
- """Return a tuple of the user's (home_dir, pid, gid)."""
- pwent = self.pwd.getpwnam(user)
- return (pwent.pw_dir, pwent.pw_uid, pwent.pw_gid)
-
- def UserExists(self, user):
- """Test whether a given user exists or not."""
- try:
- self.pwd.getpwnam(user)
- return True
- except KeyError:
- return False
-
- def UserNoLogin(self, user):
- """Test whether a user's shell is /sbin/nologin."""
- pwent = self.pwd.getpwnam(user)
- return pwent.pw_shell == '/sbin/nologin'
-
- def LockSudoers(self):
- """Create an advisory lock on /etc/sudoers.tmp.
-
- Returns:
- True if successful, False if not.
- """
- try:
- f = self.os.open('/etc/sudoers.tmp', os.O_EXCL|os.O_CREAT)
- self.os.close(f)
- return True
- except OSError as e:
- if e.errno == errno.EEXIST:
- logging.warning('/etc/sudoers.tmp lock file already exists')
- else:
- logging.warning('Could not create /etc/sudoers.tmp lock file: %s', e)
- return False
-
- def UnlockSudoers(self):
- """Remove the advisory lock on /etc/sudoers.tmp."""
- try:
- self.os.unlink('/etc/sudoers.tmp')
- return True
- except OSError as e:
- if e.errno == errno.ENOENT:
- return True
- logging.warning('Could not remove /etc/sudoers.tmp: %s', e)
- return False
-
- def MakeUserSudoer(self, user):
- """Add user to the sudoers file."""
- # If the user has no sudoers file, don't add an entry.
- if not self.os.path.isfile('/etc/sudoers'):
- logging.info('Did not grant admin access to %s. /etc/sudoers not found.',
- user)
- return
-
- with self.system.OpenFile('/etc/sudoers', 'r') as sudoer_f:
- sudoer_lines = sudoer_f.readlines()
-
- if IsUserSudoerInLines(user, sudoer_lines):
- # User is already sudoer. Done. We don't have to check for a lock
- # file.
- return
-
- # Lock sudoers.
- if not self.LockSudoers():
- logging.warning('Did not grant admin access to %s. /etc/sudoers locked.',
- user)
- return
-
- try:
- # First read in the sudoers file (this time under the lock).
- with self.system.OpenFile('/etc/sudoers', 'r') as sudoer_f:
- sudoer_lines = sudoer_f.readlines()
-
- if IsUserSudoerInLines(user, sudoer_lines):
- # User is already sudoer. Done.
- return
-
- # Create a temporary sudoers file with the contents we want.
- sudoer_lines.append('%s ALL=NOPASSWD: ALL' % user)
- sudoer_lines = [EnsureTrailingNewline(line) for line in sudoer_lines]
- (tmp_sudoers_fd, tmp_sudoers_fname) = tempfile.mkstemp()
- with self.os.fdopen(tmp_sudoers_fd, 'w+') as tmp_sudoer_f:
- # Put the old lines.
- tmp_sudoer_f.writelines(sudoer_lines)
- tmp_sudoer_f.seek(0)
-
- try:
- # Validate our result.
- if not self.system.IsValidSudoersFile(tmp_sudoers_fname):
- logging.warning(
- 'Did not grant admin access to %s. Sudoers was invalid.', user)
- return
-
- self.os.chmod('/etc/sudoers', 0640)
- with self.system.OpenFile('/etc/sudoers', 'w') as sudoer_f:
- sudoer_f.writelines(sudoer_lines)
- # Make sure we're still 0640.
- self.os.fchmod(sudoer_f.fileno(), stat.S_IWUSR | 0640)
- try:
- self.os.fchmod(sudoer_f.fileno(), 0440)
- except (IOError, OSError) as e:
- logging.warning('Could not restore perms to /etc/sudoers: %s', e)
- finally:
- # Clean up the temp file.
- try:
- self.os.unlink(tmp_sudoers_fname)
- except (IOError, OSError) as e:
- pass
- except (IOError, OSError) as e:
- logging.warning('Could not grant %s admin access: %s', user, e)
- finally:
- self.UnlockSudoers()
-
- def AuthorizeSshKeys(self, user, ssh_keys):
- """Add ssh_keys to the user's ssh authorized_keys.gce file."""
- (home_dir, uid, gid) = self.GetUserInfo(user)
-
- ssh_dir = os.path.join(home_dir, '.ssh')
-
- if not self.os.path.isdir(ssh_dir):
- # Create a user's ssh directory, with u+rwx as the only permissions.
- # There's proper handling and logging of OSError within EnsureDir(),
- # so neither of these calls needs th handle that.
- if not self.EnsureHomeDir(home_dir, uid, gid):
- return False
-
- if not self.EnsureDir(ssh_dir, uid, gid, 0700):
- return False
-
- # Not all sshd's support mulitple authorized_keys files. We have to
- # share one with the user. We add our entries as follows:
- # # Added by Google
- # authorized_key_entry
- authorized_keys_file = os.path.join(ssh_dir, 'authorized_keys')
- try:
- self.WriteAuthorizedSshKeysFile(authorized_keys_file, ssh_keys, uid, gid)
- except IOError as e:
- logging.warning('Could not update %s due to %s', authorized_keys_file, e)
-
- def SetSELinuxContext(self, path):
- """Set the appropriate SELinux context, if SELinux tools are installed.
-
- Calls /sbin/restorecon on the provided path to set the SELinux context as
- specified by policy. This call does not operate recursively.
-
- Only some OS configurations use SELinux. It is therefore acceptable for
- restorecon to be missing, in which case we do nothing.
-
- Arguments:
- path: The path on which to fix the SELinux context.
-
- Returns:
- True if successful or if restorecon is missing, False in case of error.
- """
-
- if self.system.IsExecutable('/sbin/restorecon'):
- result = self.system.RunCommand(['/sbin/restorecon', path])
- if self.system.RunCommandFailed(result):
- logging.error('Unable to set SELinux context for %s', path)
- return False
- else:
- return True
- else:
- logging.debug('restorecon missing; not setting SELinux context for %s',
- path)
- return True
-
- def EnsureHomeDir(self, home_dir, uid, gid):
- """Make sure user's home directory exists.
-
- Create the directory and its ancestor directories if necessary.
-
- No changes are made to the ownership or permissions of a directory which
- already exists.
-
- Arguments:
- home_dir: The path to the home directory.
- uid: user ID to own the home dir.
- gid: group ID to own the home dir.
-
- Returns:
- True if successful, False if not.
- """
-
- if self.os.path.isdir(home_dir):
- return True
-
- # Use root as owner when creating ancestor directories.
- if not self.EnsureDir(home_dir, 0, 0, 0755):
- return False
-
- self.os.chown(home_dir, uid, gid)
- return True
-
- def EnsureDir(self, dir_path, uid, gid, mode):
- """Make sure the specified directory exists.
-
- If dir doesn't exist, create it and its ancestor directories, if necessary.
-
- No changes are made to the ownership or permissions of a directory which
- already exists.
-
- Arguments:
- dir_path: The path to the dir.
- uid: user ID of the owner.
- gid: group ID of the owner.
- mode: Permissions for the dir, as an integer (e.g. 0755).
-
- Returns:
- True if successful, False if not.
- """
-
- if self.os.path.isdir(dir_path):
- return True # We are done
-
- parent_dir = self.os.path.dirname(dir_path)
- if not parent_dir == dir_path:
- if not self.EnsureDir(parent_dir, uid, gid, 0755):
- return False
-
- try:
- self.os.mkdir(dir_path, mode)
- self.os.chown(dir_path, uid, gid)
- self.SetSELinuxContext(dir_path)
- except OSError as e:
- if self.os.path.isdir(dir_path):
- logging.warning('Could not prepare %s: %s', dir_path, e)
- return True
- logging.error('Could not create %s: %s', dir_path, e)
- return False
-
- return True
-
- def WriteAuthorizedSshKeysFile(
- self, authorized_keys_file, ssh_keys, uid, gid):
- """Update the authorized_keys_file to contain the given ssh_keys.
-
- Arguments:
- authorized_keys_file: The name of the authorized keys file.
- ssh_keys: The google added ssh keys for the file.
- uid: The uid for the user.
- gid: The gid for the user.
- """
- # Create a temp file to store the new keys.
- with self.system.CreateTempFile(delete=False) as keys_file:
- new_keys_path = keys_file.name
- # Read all the ssh keys in the original key file if it exists.
- if self.os.path.exists(authorized_keys_file):
- with self.system.OpenFile(authorized_keys_file, 'r') as original_keys:
- original_keys.seek(0)
- lines = original_keys.readlines()
- else:
- lines = []
-
- # Pull out the # Added by Google lines.
- google_added_ixs = [i for i in range(len(lines) - 1) if
- lines[i].startswith('# Added by Google')]
- google_added_ixs += [i + 1 for i in google_added_ixs]
-
- user_lines = [
- lines[i] for i in range(len(lines)) if i not in google_added_ixs]
-
- # First write user's entries.
- for user_line in user_lines:
- keys_file.write(EnsureTrailingNewline(user_line))
-
- # Put google entries at the end, each preceeded by Added by Google.
- for ssh_key in ssh_keys:
- keys_file.write('# Added by Google\n')
- keys_file.write(EnsureTrailingNewline(ssh_key))
-
- # Check that we have enough disk space to move the file.
- stat = self.os.statvfs(self.os.path.dirname(authorized_keys_file))
- available_space = stat.f_bavail * stat.f_bsize
- required_space = self.os.path.getsize(new_keys_path) + 1024 * 1024
- logging.debug('Writing keys file: %s bytes required; %s available.',
- required_space, available_space)
- if available_space < required_space:
- raise IOError('Disk is too full')
-
- try:
- # Override the old authorized keys file with the new one.
- self.system.MoveFile(new_keys_path, authorized_keys_file)
- finally:
- try:
- self.system.DeleteFile(new_keys_path)
- except:
- pass
-
- # Make sure the authorized_keys_file has the right perms (u+rw).
- self.os.chmod(authorized_keys_file, 0600)
- self.os.chown(authorized_keys_file, uid, gid)
-
- # Set SELinux context, if applicable to this system
- self.SetSELinuxContext(authorized_keys_file)
diff --git a/google-daemon/usr/share/google/google_daemon/accounts_manager.py b/google-daemon/usr/share/google/google_daemon/accounts_manager.py
deleted file mode 100644
index 5932796..0000000
--- a/google-daemon/usr/share/google/google_daemon/accounts_manager.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main driver logic for managing accounts on GCE instances."""
-
-import logging
-import os
-import pwd
-import time
-
-LOCKFILE = '/var/lock/manage-accounts.lock'
-
-
-class AccountsManager(object):
- """Create accounts on a machine."""
-
- def __init__(self, accounts_module, desired_accounts, system, lock_file,
- lock_fname, single_pass=True):
- """Construct an AccountsFromMetadata with the given module injections."""
- if not lock_fname:
- lock_fname = LOCKFILE
- self.accounts = accounts_module
- self.desired_accounts = desired_accounts
- self.lock_file = lock_file
- self.lock_fname = lock_fname
- self.system = system
- self.single_pass = single_pass
-
- def Main(self):
- logging.debug('AccountsManager main loop')
- # If this is a one-shot execution, then this can be run normally.
- # Otherwise, run the actual operations in a subprocess so that any
- # errors don't kill the long-lived process.
- if self.single_pass:
- self.RegenerateKeysAndUpdateAccounts()
- return
- # Run this forever in a loop.
- while True:
- # Fork and run the key regeneration and account update while the
- # parent waits for the subprocess to finish before continuing.
-
- # Create a pipe used to get the new etag value from child
- reader, writer = os.pipe() # these are file descriptors, not file objects
- pid = os.fork()
- if pid:
- # We are the parent.
- os.close(writer)
- reader = os.fdopen(reader) # turn reader into a file object
- etag = reader.read()
- if etag:
- self.desired_accounts.etag = etag
- reader.close()
- logging.debug('New etag: %s', self.desired_accounts.etag)
- os.waitpid(pid, 0)
- else:
- # We are the child.
- os.close(reader)
- writer = os.fdopen(writer, 'w')
- try:
- self.RegenerateKeysAndUpdateAccounts()
- except Exception as e:
- logging.warning('error while trying to update accounts: %s', e)
- # An error happened while trying to update the accounts.
- # Sleep for five seconds before trying again.
- time.sleep(5)
-
- # Write the etag to pass to parent.
- etag = self.desired_accounts.etag or ''
- writer.write(etag)
- writer.close()
-
- # The use of os._exit here is recommended for subprocesses spawned
- # by forking to avoid issues with running the cleanup tasks that
- # sys.exit() runs by preventing issues from the cleanup being run
- # once by the subprocess and once by the parent process.
- os._exit(0)
-
- def RegenerateKeysAndUpdateAccounts(self):
- """Regenerate the keys and update accounts as needed."""
- logging.debug('RegenerateKeysAndUpdateAccounts')
- if self.system.IsExecutable('/usr/share/google/first-boot'):
- self.system.RunCommand('/usr/share/google/first-boot')
-
- self.lock_file.RunExclusively(self.lock_fname, self.UpdateAccounts)
-
- def UpdateAccounts(self):
- """Update all accounts that should be present or exist already."""
-
- # Note GetDesiredAccounts() returns a dict of username->sshKeys mappings.
- desired_accounts = self.desired_accounts.GetDesiredAccounts()
-
- # Plan a processing pass for extra accounts existing on the system with a
- # ~/.ssh/authorized_keys file, even if they're not otherwise in the metadata
- # server; this will only ever remove the last added-by-Google key from
- # accounts which were formerly in the metadata server but are no longer.
- all_accounts = pwd.getpwall()
- keyfile_suffix = os.path.join('.ssh', 'authorized_keys')
- sshable_usernames = [
- entry.pw_name
- for entry in all_accounts
- if os.path.isfile(os.path.join(entry.pw_dir, keyfile_suffix))]
- extra_usernames = set(sshable_usernames) - set(desired_accounts.keys())
-
- if desired_accounts:
- for username, ssh_keys in desired_accounts.iteritems():
- if not username:
- continue
-
- self.accounts.UpdateUser(username, ssh_keys)
-
- for username in extra_usernames:
- # If a username is present in extra_usernames, it is no longer reflected
- # in the metadata server but has an authorized_keys file. Therefore, we
- # should pass the empty list for sshKeys to ensure that any Google-managed
- # keys are no longer authorized.
- self.accounts.UpdateUser(username, [])
diff --git a/google-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py b/google-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py
deleted file mode 100755
index d489112..0000000
--- a/google-daemon/usr/share/google/google_daemon/accounts_manager_daemon.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tool for running account manager as a daemon."""
-
-import fcntl
-import logging
-import os
-import signal
-
-PIDFILE = '/var/run/manage_accounts.pid'
-
-
-class AccountsManagerDaemon(object):
- """Creates a daemon process to run the accounts manager in."""
-
- def __init__(self, pidfile, accounts_manager, fcntl_module=fcntl):
- logging.debug('Initializing Daemon Module')
- if not pidfile:
- pidfile = PIDFILE
-
- self.pidfile = pidfile
- self.accounts_manager = accounts_manager
- self.fcntl_module = fcntl_module
-
- def StartDaemon(self):
- """Spins off a process that runs as a daemon."""
- # To spin off the process, use what seems to be the "standard" way to spin
- # off daemons: fork a child process, make it the session and process group
- # leader, then fork it again so that the actual daemon process is no longer
- # a session leader.
- #
- # This is a very simplified (with significantly reduced features) version of
- # the python-daemon library at https://pypi.python.org/pypi/python-daemon/.
- pid = os.fork()
- logging.debug('Forked new process, pid= {0}'.format(pid))
- if pid == 0:
- os.setsid()
- pid = os.fork()
- if pid == 0:
- os.chdir('/')
- os.umask(0)
- else:
- # The use of os._exit here is recommended for parents of a daemon
- # process to avoid issues with running the cleanup tasks that
- # sys.exit() runs by preventing issues from the cleanup being run
- # more than once when the two parents exit and later when the daemon
- # exits.
- os._exit(0)
- else:
- os._exit(0)
-
- # Set up pidfile and signal handlers.
- pidf = open(self.pidfile, 'w')
- pidf.write(str(os.getpid()))
- pidf.close()
-
- logging.debug('Sending signal SIGTERM to shutdown daemon')
- signal.signal(signal.SIGTERM, self.ShutdownDaemon)
-
- self.accounts_manager.Main()
-
- def ShutdownDaemon(self, signal_number, unused_stack_frame):
- # Grab the lock on the lock file, ensuring that the accounts manager is not
- # in the middle of something. Using a different file reference guarantees
- # that the lock can only be grabbed once the accounts manager is done with
- # it and holding it guarantees that the accounts manager won't start up
- # again while shutting down.
- logging.debug('Acquiring Daemon lock.')
- lockfile = open(self.accounts_manager.lock_fname, 'r')
- self.fcntl_module.flock(lockfile.fileno(), fcntl.LOCK_EX)
-
- logging.debug('Shutting down Daemon module.')
- # Clean up pidfile and terminate. Lock will be released with termination.
- os.remove(self.pidfile)
- exception = SystemExit('Terminating on signal number %d' % signal_number)
- raise exception
diff --git a/google-daemon/usr/share/google/google_daemon/address_manager.py b/google-daemon/usr/share/google/google_daemon/address_manager.py
deleted file mode 100644
index 7a0e911..0000000
--- a/google-daemon/usr/share/google/google_daemon/address_manager.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Manage extra network interface addresses on a GCE instance.
-
-Fetch a list of public endpoint IPs from the metadata server, compare it with
-what's configured on eth0, and add/remove addresses from eth0 to make them
-match. Only remove those which match our proto code.
-
-This must be run by root. If it reads any malformed data, it will take no
-action.
-
-Command used to add ips:
- ip route add to local $IP/32 dev eth0 proto 66
-Command used to fetch list of configured IPs:
- ip route ls table local type local dev eth0 scope host proto 66
-"""
-
-
-import logging
-import os
-import re
-import socket
-import time
-import urllib2
-
-PUBLIC_ENDPOINT_URL_PREFIX = (
-'http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/forwarded-ips/?recursive=true&alt=text&wait_for_change=true&timeout_sec=60&last_etag=')
-GOOGLE_PROTO_ID = 66 # "GG"
-
-class InputError(Exception):
- pass
-
-class AddressManager(object):
- """Manage public endpoint IPs."""
-
- def __init__(self, system_module, urllib2_module=urllib2, time_module=time):
- self.system = system_module
- self.urllib2 = urllib2_module
- self.time = time_module
- self.ip_path = '/sbin/ip'
- if not os.path.exists(self.ip_path):
- self.ip_path = '/bin/ip'
-
- # etag header value is hex, so this is guaranteed to not match.
- self.default_last_etag = 'NONE'
- self.ResetEtag()
-
- def SyncAddressesForever(self):
- while True:
- try:
- # Block until the metadata changes or there is a timeout or error.
- self.SyncAddresses()
- except socket.timeout as e:
- self.ResetEtag()
- logging.warning('Backend timeout. Retrying.')
- except Exception as e:
- self.ResetEtag()
- logging.error('SyncAddresses exception: %s', e)
- # Don't spin
- self.time.sleep(5)
-
- def SyncAddresses(self):
- """Main entry point -- syncs configured w/ desired IP addresses."""
-
- addrs_wanted = self.ReadPublicEndpoints()
- addrs_configured = self.ReadLocalConfiguredAddrs()
- (to_add, to_remove) = self.DiffAddrs(addrs_wanted, addrs_configured)
- self.LogChanges(addrs_wanted, addrs_configured, to_add, to_remove)
- self.AddAddresses(to_add)
- self.DeleteAddresses(to_remove)
-
- def ResetEtag(self):
- """Reset the etag so the next call will return the current data."""
- self.last_etag = self.default_last_etag
-
- def ReadPublicEndpoints(self):
- """Fetch list of public endpoint IPs from metadata server."""
- try:
- # If the connection gets abandoned, ensure we don't hang more than
- # 70 seconds.
- url = PUBLIC_ENDPOINT_URL_PREFIX + self.last_etag
- request = urllib2.Request(url)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- u = self.urllib2.urlopen(request, timeout=70)
- addrs_data = u.read()
- headers = u.info().dict
- self.last_etag = headers.get('etag', self.default_last_etag)
- except urllib2.HTTPError as h:
- self.ResetEtag()
- # 404 is treated like an empty list, for backward compatibility.
- if h.code == 404:
- return []
- raise h
- return self.ParseIPAddrs(addrs_data)
-
- def ReadLocalConfiguredAddrs(self):
- """Fetch list of addresses we've configured on eth0 already."""
- cmd = ('{0} route ls table local type local dev eth0 scope host ' +
- 'proto {1:d}').format(self.ip_path, GOOGLE_PROTO_ID)
- result = self.system.RunCommand(cmd.split())
- if self.IPCommandFailed(result, cmd):
- raise InputError('Can''t check local addresses')
- (rc, stdout, stderr) = result
- return self.ParseIPAddrs(stdout)
-
- def DiffAddrs(self, addrs_wanted, addrs_configured):
- """"Returns set differences: (to_add, to_remove)."""
- want = set(addrs_wanted)
- have = set(addrs_configured)
- to_add = want - have
- to_remove = have - want
- return (sorted(to_add), sorted(to_remove))
-
- def LogChanges(self, addrs_wanted, addrs_configured, to_add, to_remove):
- """Log what addrs we are going to change."""
- if not to_add and not to_remove:
- return
- logging.info(
- 'Changing public IPs from %s to %s by adding %s and removing %s' % (
- addrs_configured or None,
- addrs_wanted or None,
- to_add or None,
- to_remove or None))
-
- def AddAddresses(self, to_add):
- """Configure new addresses on eth0."""
- for addr in to_add:
- self.AddOneAddress(addr)
-
- def AddOneAddress(self, addr):
- """Configure one address on eth0."""
- cmd = '%s route add to local %s/32 dev eth0 proto %d' % (
- self.ip_path, addr, GOOGLE_PROTO_ID)
- result = self.system.RunCommand(cmd.split())
- self.IPCommandFailed(result, cmd) # Ignore return code
-
- def DeleteAddresses(self, to_remove):
- """Un-configure a list of addresses from eth0."""
- for addr in to_remove:
- self.DeleteOneAddress(addr)
-
- def DeleteOneAddress(self, addr):
- """Delete one address from eth0."""
- # This will fail if it doesn't match exactly the specs listed.
- # That'll help ensure we don't remove one added by someone else.
- cmd = '%s route delete to local %s/32 dev eth0 proto %d' % (
- self.ip_path, addr, GOOGLE_PROTO_ID)
- result = self.system.RunCommand(cmd.split())
- self.IPCommandFailed(result, cmd) # Ignore return code
-
- # Helper methods
- def ParseIPAddrs(self, addrs_data):
- """Parse and validate IP addrs, return list of strings or None."""
- addrs = addrs_data.strip().split()
- reg = re.compile(r'^([0-9]+.){3}[0-9]+$')
- for addr in addrs:
- if not reg.search(addr):
- raise InputError('Failed to parse ip addr: "%s"' % addr)
- return addrs
-
- def IPCommandFailed(self, result, cmd):
- """If an /sbin/ip command failed, log and return True."""
- if self.system.RunCommandFailed(
- result, 'Non-zero exit status from: "%s"' % cmd):
- return True
- else:
- return False
diff --git a/google-daemon/usr/share/google/google_daemon/desired_accounts.py b/google-daemon/usr/share/google/google_daemon/desired_accounts.py
deleted file mode 100755
index f6fb5ff..0000000
--- a/google-daemon/usr/share/google/google_daemon/desired_accounts.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Get the accounts desired to be present on the VM."""
-
-import datetime
-import json
-import logging
-import time
-import urllib2
-
-
-METADATA_URL = 'http://metadata.google.internal/computeMetadata/v1'
-METADATA_HANG = ('/?recursive=true&alt=json&wait_for_change=true'
- '&timeout_sec=%s&last_etag=%s')
-
-
-def KeyHasExpired(key):
- """Check to see whether an SSH key has expired.
-
- Uses Google-specific (for now) semantics of the OpenSSH public key format's
- comment field to determine if an SSH key is past its expiration timestamp, and
- therefore no longer to be trusted. This format is still subject to change.
- Reliance on it in any way is at your own risk.
-
- Args:
- key: A single public key entry in OpenSSH public key file format. This will
- be checked for Google-specific comment semantics, and if present, those
- will be analysed.
-
- Returns:
- True if the key has Google-specific comment semantics and has an expiration
- timestamp in the past, or False otherwise.
- """
-
- logging.debug('Processing key: %s', key)
-
- try:
- schema, json_str = key.split(None, 3)[2:]
- except ValueError:
- logging.debug('Key does not seem to have a schema identifier.')
- logging.debug('Not expiring key.')
- return False
-
- if schema != 'google-ssh':
- logging.debug('Rejecting %s as potential key schema identifier.', schema)
- return False
-
- logging.debug('Google SSH key schema identifier found.')
- logging.debug('JSON string detected: %s', json_str)
-
- try:
- json_obj = json.loads(json_str)
- except ValueError:
- logging.error('Invalid JSON. Not expiring key.')
- return False
-
- if 'expireOn' not in json_obj:
- # Use warning instead of error for this failure mode in case we
- # add future use cases for this JSON which are unrelated to expiration.
- logging.warning('No expiration timestamp. Not expiring key.')
- return False
-
- expire_str = json_obj['expireOn']
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
-
- try:
- expire_time = datetime.datetime.strptime(expire_str, format_str)
- except ValueError:
- logging.error(
- 'Expiration timestamp "%s" not in format %s.', expire_str, format_str)
- logging.error('Not expiring key.')
- return False
-
- # Expire the key if and only if we have exceeded the expiration timestamp.
- return datetime.datetime.utcnow() > expire_time
-
-
-def AccountDataToDictionary(data):
- """Given SSH key data, construct a usermap.
-
- Args:
- data: The data returned from the metadata server's SSH key attributes.
-
- Returns:
- A map of {'username': ssh_keys_list}.
- """
- if not data:
- return {}
- lines = [line for line in data.splitlines() if line]
- usermap = {}
- for line in lines:
- split_line = line.split(':', 1)
- if len(split_line) != 2:
- logging.warning(
- 'SSH key is not a complete entry: %s', split_line)
- continue
- user, key = split_line
- if KeyHasExpired(key):
- logging.debug(
- 'Skipping expired SSH key for user %s: %s', user, key)
- continue
- if user not in usermap:
- usermap[user] = []
- usermap[user].append(key)
- logging.debug('User accounts: %s', usermap)
- return usermap
-
-
-class DesiredAccounts(object):
- """Interface to determine the accounts desired on this instance."""
-
- def __init__(self, time_module=time, urllib2_module=urllib2):
- self.urllib2 = urllib2_module
- self.time = time_module
- self.etag = 0
-
- def _WaitForUpdate(self, timeout_secs):
- """Makes a hanging get request for the contents of the metadata server."""
- request_url = METADATA_URL + METADATA_HANG % (timeout_secs, self.etag)
- logging.debug('Getting url: %s', request_url)
- request = urllib2.Request(request_url)
- request.add_header('Metadata-Flavor', 'Google')
- return self.urllib2.urlopen(request, timeout=timeout_secs*1.1)
-
- def _GetMetadataUpdate(self, timeout_secs=60):
- """Fetches the content of the metadata server.
-
- Args:
- timeout_secs: The timeout in seconds.
-
- Returns:
- The JSON formatted string content of the metadata server.
- """
- try:
- response = self._WaitForUpdate(timeout_secs=timeout_secs)
- response_info = response.info()
- if response_info and response_info.has_key('etag'):
- self.etag = response_info.getheader('etag')
- content = response.read()
- logging.debug('response: %s', content)
- return content
- except urllib2.HTTPError as e:
- if e.code == 404:
- # The metadata server content doesn't exist. Return None.
- # No need to log a warning.
- return None
- # Rethrow the exception since we don't know what it is. Let the
- # top layer handle it.
- raise
- return None
-
- def GetDesiredAccounts(self):
- """Get a list of the accounts desired on the system.
-
- Returns:
- A dict of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
- """
- logging.debug('Getting desired accounts from metadata.')
- # Fetch the top level attribute with a hanging get.
- metadata_content = self._GetMetadataUpdate()
- metadata_dict = json.loads(metadata_content or '{}')
- account_data = None
-
- try:
- instance_data = metadata_dict['instance']['attributes']
- project_data = metadata_dict['project']['attributes']
- # Instance SSH keys to use regardless of project metadata.
- valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
- block_project = instance_data.get('block-project-ssh-keys', '').lower()
- if block_project != 'true' and not instance_data.get('sshKeys'):
- valid_keys.append(project_data.get('ssh-keys'))
- valid_keys.append(project_data.get('sshKeys'))
- valid_keys = [key for key in valid_keys if key]
- account_data = '\n'.join(valid_keys)
- except KeyError:
- logging.debug('Project or instance attributes were not found.')
-
- return AccountDataToDictionary(account_data)
diff --git a/google-daemon/usr/share/google/google_daemon/manage_accounts.py b/google-daemon/usr/share/google/google_daemon/manage_accounts.py
deleted file mode 100755
index 9f3bb33..0000000
--- a/google-daemon/usr/share/google/google_daemon/manage_accounts.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main driver logic for managing accounts on GCE instances."""
-
-import logging
-import optparse
-import os
-import os.path
-import sys
-
-
-def FixPath():
- parent_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- if os.path.isdir(parent_dir):
- sys.path.append(parent_dir)
-
-
-FixPath()
-
-
-from accounts import Accounts
-from accounts_manager import AccountsManager
-from accounts_manager_daemon import AccountsManagerDaemon
-from desired_accounts import DesiredAccounts
-from utils import LockFile
-from utils import System
-
-
-def Main(accounts, desired_accounts, system, logger,
- log_handler, lock_file, lock_fname=None, single_pass=True,
- daemon_mode=False, force_mode=False, debug_mode=False):
-
- if not log_handler:
- log_handler = system.MakeLoggingHandler(
- 'accounts-from-metadata', logging.handlers.SysLogHandler.LOG_AUTH)
- system.SetLoggingHandler(logger, log_handler)
-
- if debug_mode:
- system.EnableDebugLogging(logger)
- logging.debug('Running in Debug Mode')
-
- if not force_mode and os.path.isfile('/usr/share/google/gcua'):
- logging.error('Google Compute User Accounts is installed.')
- sys.exit(1)
-
- accounts_manager = AccountsManager(
- accounts, desired_accounts, system, lock_file, lock_fname,
- single_pass)
-
- if daemon_mode:
- manager_daemon = AccountsManagerDaemon(None, accounts_manager)
- manager_daemon.StartDaemon()
- else:
- accounts_manager.Main()
-
-
-if __name__ == '__main__':
- parser = optparse.OptionParser()
- parser.add_option('--daemon', dest='daemon', action='store_true')
- parser.add_option('--no-daemon', dest='daemon', action='store_false')
- # Leaving --interval flag for now to allow some time for each platform to move to
- # new flag
- parser.add_option('--interval', type='int', dest='interval')
- parser.add_option('--single-pass', dest='single_pass', action='store_true')
- parser.add_option('--no-single-pass', dest='single_pass', action='store_false')
- parser.add_option('--force', dest='force', action='store_true')
- parser.add_option('--debug', dest='debug', action='store_true')
- parser.set_defaults(interval=60)
- parser.set_defaults(single_pass=False)
- parser.set_defaults(daemon=False)
- parser.set_defaults(force=False)
- parser.set_defaults(debug=False)
- (options, args) = parser.parse_args()
-
- # set single_pass to True if interval is -1.
- if options.interval == -1:
- options.single_pass = True
-
- Main(Accounts(system_module=System()), DesiredAccounts(),
- System(), logging.getLogger(), None, LockFile(), None, options.single_pass,
- options.daemon, options.force, options.debug)
diff --git a/google-daemon/usr/share/google/google_daemon/manage_addresses.py b/google-daemon/usr/share/google/google_daemon/manage_addresses.py
deleted file mode 100755
index 5e9ade6..0000000
--- a/google-daemon/usr/share/google/google_daemon/manage_addresses.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Main driver logic for managing public IPs on GCE instances."""
-
-import logging
-import os
-import sys
-
-def FixPath():
- parent_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- if os.path.isdir(parent_dir):
- sys.path.append(parent_dir)
-
-
-FixPath()
-
-from utils import LockFile
-from utils import System
-from address_manager import AddressManager
-
-
-LOCKFILE = '/var/lock/google-address-manager.lock'
-
-def Main(system=System(), logger=logging.getLogger(), log_handler=None,
- lock_file=LockFile(), lock_fname=None):
- if not log_handler:
- log_handler = system.MakeLoggingHandler(
- 'google-address-manager', logging.handlers.SysLogHandler.LOG_SYSLOG)
- system.SetLoggingHandler(logger, log_handler)
- logging.info('Starting GCE address manager')
-
- if not lock_fname:
- lock_fname = LOCKFILE
- manager = AddressManager(system_module=system)
- lock_file.RunExclusively(lock_fname, manager.SyncAddressesForever)
-
-
-if __name__ == '__main__':
- Main()
diff --git a/google-daemon/usr/share/google/google_daemon/manage_clock_sync.py b/google-daemon/usr/share/google/google_daemon/manage_clock_sync.py
deleted file mode 100755
index c49f699..0000000
--- a/google-daemon/usr/share/google/google_daemon/manage_clock_sync.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/python
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Manages clock syncing after migration on GCE instances."""
-
-import logging
-import os
-import sys
-
-def FixPath():
- parent_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- if os.path.isdir(parent_dir):
- sys.path.append(parent_dir)
-
-
-FixPath()
-
-from utils import LockFile
-from utils import System
-from metadata_watcher import MetadataWatcher
-
-
-LOCKFILE = '/var/lock/google-clock-sync.lock'
-
-
-def HandleClockDriftToken(metadata_watcher, on_change):
- """Watches for and responds to drift-token changes.
-
- Args:
- metadata_watcher: a MetadataWatcher object.
- on_change: a callable to call for any change.
- """
- clock_drift_token_key = 'instance/virtual-clock/drift-token'
-
- def Handler(event):
- on_change(event)
-
- metadata_watcher.WatchMetadataForever(clock_drift_token_key,
- Handler, initial_value='')
-
-
-def OnChange(event):
- """Called when clock drift token changes.
-
- Args:
- event: the new value of the drift token.
- """
- system = System()
- logging.info('Clock drift token has changed: %s', event)
- logging.info('Syncing system time with hardware clock...')
- result = system.RunCommand(['/sbin/hwclock', '--hctosys'])
- if system.RunCommandFailed(result):
- logging.error('Syncing system time failed.')
- else:
- logging.info('Synced system time with hardware clock.')
-
-
-def Main(system=System(), logger=logging.getLogger(), log_handler=None,
- lock_file=LockFile(), lock_fname=None):
- if not log_handler:
- log_handler = system.MakeLoggingHandler(
- 'google-clock-sync', logging.handlers.SysLogHandler.LOG_SYSLOG)
- system.SetLoggingHandler(logger, log_handler)
- logging.info('Starting GCE clock sync')
-
- if not lock_fname:
- lock_fname = LOCKFILE
- watcher = MetadataWatcher()
- lock_file.RunExclusively(lock_fname, HandleClockDriftToken(watcher, OnChange))
-
-
-if __name__ == '__main__':
- Main()
diff --git a/google-daemon/usr/share/google/google_daemon/metadata_watcher.py b/google-daemon/usr/share/google/google_daemon/metadata_watcher.py
deleted file mode 100755
index af0a90a..0000000
--- a/google-daemon/usr/share/google/google_daemon/metadata_watcher.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/python
-# Copyright 2015 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import httplib
-import time
-import urllib
-import urllib2
-
-
-METADATA_URL = 'http://metadata.google.internal/computeMetadata/v1/'
-
-
-class Error(Exception):
- pass
-
-
-class UnexpectedStatusException(Error):
- pass
-
-
-class MetadataWatcher(object):
- """Watches for changing metadata."""
-
- def __init__(self, httplib_module=httplib, time_module=time,
- urllib_module=urllib, urllib2_module=urllib2):
- self.httplib = httplib_module
- self.time = time_module
- self.urllib = urllib_module
- self.urllib2 = urllib2_module
-
- def WatchMetadataForever(self, metadata_key, handler, initial_value=None):
- """Watches for a change in the value of metadata.
-
- Args:
- metadata_key: The key identifying which metadata to watch for changes.
- handler: A callable to call when the metadata value changes. Will be passed
- a single parameter, the new value of the metadata.
- initial_value: The expected initial value for the metadata. The handler will
- not be called on the initial metadata request unless the value differs
- from this.
-
- Raises:
- UnexpectedStatusException: If the http request is unsuccessful for an
- unexpected reason.
- """
- params = {
- 'wait_for_change': 'true',
- 'last_etag': 0,
- }
-
- value = initial_value
- while True:
- # start a hanging-GET request for metadata key.
- url = '{base_url}{key}?{params}'.format(
- base_url=METADATA_URL,
- key=metadata_key,
- params=self.urllib.urlencode(params)
- )
- req = self.urllib2.Request(url, headers={'Metadata-Flavor': 'Google'})
-
- try:
- response = self.urllib2.urlopen(req)
- content = response.read()
- status = response.getcode()
- except self.urllib2.HTTPError as e:
- content = None
- status = e.code
-
- if status == self.httplib.SERVICE_UNAVAILABLE:
- self.time.sleep(1)
- continue
- elif status == self.httplib.OK:
- # Extract new metadata value and latest etag.
- new_value = content
- headers = response.info()
- params['last_etag'] = headers['ETag']
- else:
- raise UnexpectedStatusException(status)
-
- # If the metadata value changed, call the appropriate handler.
- if value == initial_value:
- value = new_value
- elif value != new_value:
- value = new_value
- handler(value)
diff --git a/google-daemon/usr/share/google/google_daemon/utils.py b/google-daemon/usr/share/google/google_daemon/utils.py
deleted file mode 100755
index 0c7fe5c..0000000
--- a/google-daemon/usr/share/google/google_daemon/utils.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Library functions and interfaces for manipulating accounts."""
-
-import errno
-import fcntl
-import logging
-import logging.handlers
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-
-class RunCommandException(Exception):
- """Could not run a command."""
- pass
-
-
-class System(object):
- """Interface for interacting with the system."""
-
- def __init__(self, subprocess_module=subprocess, os_module=os):
- self.subprocess = subprocess_module
- self.os = os_module
-
- def MakeLoggingHandler(self, prefix, facility):
- """Make a logging handler to send logs to syslog."""
- handler = logging.handlers.SysLogHandler(
- address='/dev/log', facility=facility)
- formatter = logging.Formatter(prefix + ': %(levelname)s %(message)s')
- handler.setFormatter(formatter)
- return handler
-
- def SetLoggingHandler(self, logger, handler):
- """Setup logging w/ a specific handler."""
- handler.setLevel(logging.INFO)
- logger.setLevel(logging.INFO)
- logger.addHandler(handler)
-
- def EnableDebugLogging(self, logger):
- debug_handler = logging.StreamHandler(sys.stdout)
- debug_handler.setLevel(logging.DEBUG)
- logger.addHandler(debug_handler)
- logger.setLevel(logging.DEBUG)
-
- def OpenFile(self, *args, **kwargs):
- return open(*args, **kwargs)
-
- def MoveFile(self, src, dst):
- return shutil.move(src, dst)
-
- def CreateTempFile(self, delete=True):
- return tempfile.NamedTemporaryFile(delete=delete)
-
- def DeleteFile(self, name):
- return os.remove(name)
-
- def UserAdd(self, user, groups):
- logging.info('Creating account %s', user)
-
- # We must set the crypto passwd via useradd to '*' to make ssh work
- # on Linux systems without PAM.
- #
- # Unfortunately, there is no spec that I can find that defines how
- # this stuff is used and from the manpage of shadow it says that "!"
- # or "*" or any other invalid crypt can be used.
- #
- # ssh just takes it upon itself to use "!" as its locked account token:
- # https://github.com/openssh/openssh-portable/blob/master/configure.ac#L705
- #
- # If '!' token is used then it simply denies logins:
- # https://github.com/openssh/openssh-portable/blob/master/auth.c#L151
- #
- # To solve the issue make the passwd '*' which is also recognized as
- # locked but doesn't prevent ssh logins.
- result = self.RunCommand([
- '/usr/sbin/useradd', user, '-m', '-s', '/bin/bash', '-p', '*', '-G',
- ','.join(groups)])
- if self.RunCommandFailed(result, 'Could not create user %s', user):
- return False
- return True
-
- def IsValidSudoersFile(self, filename):
- result = self.RunCommand(['/usr/sbin/visudo', '-c', '-f', filename])
- if result[0] != 0:
- with self.system.OpenFile(filename, 'r') as f:
- contents = f.read()
- self.RunCommandFailed(
- result, 'Could not produce valid sudoers file\n%s' % contents)
- return False
- return True
-
- def IsExecutable(self, path):
- """Return whether path exists and is an executable binary."""
- return self.os.path.isfile(path) and self.os.access(path, os.X_OK)
-
- def RunCommand(self, args):
- """Run a command, return a retcode, stdout, stderr tuple."""
- try:
- p = self.subprocess.Popen(
- args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
- return (p.returncode, stdout, stderr)
- except OSError, e:
- raise RunCommandException('Could not run %s due to %s' % (args, e))
-
- def RunCommandFailed(self, result, *msg_args):
- retcode, stdout, stderr = result
- if retcode != 0:
- logging.warning('%s\nSTDOUT:\n%s\nSTDERR:\n%s\n',
- msg_args[0] % msg_args[1:], stdout, stderr)
- return True
- return False
-
-
-class CouldNotLockException(Exception):
- """Someone else seems to be holding the lock."""
- pass
-
-
-class UnexpectedLockException(Exception):
- """We genuinely failed to lock the file."""
- pass
-
-
-class CouldNotUnlockException(Exception):
- """Someone else seems to be holding the lock."""
- pass
-
-
-class UnexpectedUnlockException(Exception):
- """We genuinely failed to unlock the file."""
- pass
-
-
-class LockFile(object):
- """Lock a file to prevent multiple concurrent executions."""
-
- def __init__(self, fcntl_module=fcntl):
- self.fcntl_module = fcntl_module
-
- def RunExclusively(self, lock_fname, method):
- try:
- self.Lock(lock_fname)
- method()
- self.Unlock()
- except CouldNotLockException:
- logging.warning(
- 'Could not lock %s. Is it locked by another program?',
- lock_fname)
- except UnexpectedLockException as e:
- logging.warning(
- 'Could not lock %s due to %s', lock_fname, e)
- except CouldNotUnlockException:
- logging.warning(
- 'Could not unlock %s. Is it locked by another program?',
- lock_fname)
- except UnexpectedUnlockException as e:
- logging.warning(
- 'Could not unlock %s due to %s', lock_fname, e)
-
- def Lock(self, lock_fname):
- """Lock the lock file."""
- try:
- self.fh = open(lock_fname, 'w+b')
- self.fcntl_module.flock(self.fh.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
- except IOError as e:
- if e.errno == errno.EWOULDBLOCK:
- raise CouldNotLockException()
- raise UnexpectedLockException('Failed to lock: %s' % e)
-
- def Unlock(self):
- """Unlock the lock file."""
- try:
- self.fcntl_module.flock(self.fh.fileno(), fcntl.LOCK_UN|fcntl.LOCK_NB)
- except IOError as e:
- if e.errno == errno.EWOULDBLOCK:
- raise CouldNotUnlockException()
- raise UnexpectedUnlockException('Failed to unlock: %s' % e)
diff --git a/google-startup-scripts/README.md b/google-startup-scripts/README.md
deleted file mode 100644
index 2602143..0000000
--- a/google-startup-scripts/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-## Google Startup Scripts
-Google provides a set of startup scripts that interact with the virtual machine environment. On boot, the startup script `/usr/share/google/onboot` queries the instance metadata for a user-provided startup script to run. User-provided startup scripts can be specified in the instance metadata under `startup-script` or, if the metadata is in a small script or a downloadable file, it can be specified `via startup-script-url`. You can use [gcloud compute](https://cloud.google.com/compute/docs/gcloud-compute/) or the [Google Compute Engine API](https://developers.google.com/compute/docs/reference/latest) to specify a startup script.
-
-For more information on how to use startup scripts, read the [Using Start Up Scripts documentation](https://developers.google.com/compute/docs/howtos/startupscript#storescriptremotely).
-
-Below is an example of metadata that indicates a startup script URL and a startup script file was passed to the instance:
-
- { // instance
- metadata: {
- "kind": "compute#metadata",
- "items": [
- {
- "key": "startup-script-url",
- "value": "http://startup-script-url:
- }
- ]
- }
- }
- { // instance
- metadata: {
- "kind": "compute#metadata",
- "items": [
- {
- "key": "startup-script",
- "value": "#! /bin/python\nprint ‘startup’\n"
- }
- ]
- }
- }
-
-
-Google startup scripts also perform the following actions:
-
-+ __Checks the value of the instance id key__
-
- Startup scripts check the value of the instance ID at:
-
- http://169.254.169.254/computeMetadata/v1/instance/id
-
- and compares it to the last instance ID the disk booted on.
-
-+ __Sets the [hostname](https://github.com/GoogleCloudPlatform/compute-image-packages/blob/master/google-startup-scripts/usr/share/google/set-hostname) from the metadata server via DHCP exit hooks.__
-
-+ __Updates gsutil authentication.__
-
- Startup scripts run `/usr/share/google/boto/boot_setup.py` which configures and copies `/usr/share/google/boto/boto_plugins/compute_auth.py` into the boto plugin directory.
-
-+ __Provides udev rules to give friendly names to disks.__
-
- Google Compute Engine provides `/lib/udev/rules.d/65-gce-disk-naming.rules` in our images.
-
-+ __Safely formats persistent disks via `/usr/share/google/safe_format_and_mount`.__
diff --git a/google-startup-scripts/etc/init.d/google b/google-startup-scripts/etc/init.d/google
deleted file mode 100755
index 469d282..0000000
--- a/google-startup-scripts/etc/init.d/google
+++ /dev/null
@@ -1,75 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: gce_onboot
-# X-Start-Before: ssh
-# Required-Start: $local_fs $network $syslog
-# Required-Stop:
-# Default-Start: 2 3 4 5
-# Default-Stop:
-# Short-Description: Google Compute Engine on-boot services
-# Description: This launches the Google Compute Engine
-# VM initialization scripts.
-### END INIT INFO
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-DESC="Google Compute Engine on-boot services"
-NAME="onboot"
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- /usr/share/google/onboot
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- *) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME start" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-startup-scripts/etc/init.d/google-startup-scripts b/google-startup-scripts/etc/init.d/google-startup-scripts
deleted file mode 100755
index 3a7e051..0000000
--- a/google-startup-scripts/etc/init.d/google-startup-scripts
+++ /dev/null
@@ -1,89 +0,0 @@
-#! /bin/sh
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-### BEGIN INIT INFO
-# Provides: gce_run_startup_scripts
-# Required-Start: $all
-# Required-Stop: $remote_fs $syslog docker kubelet
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: Google Compute Engine user scripts
-# Description: This runs user-specified VM startup and shutdown scripts.
-### END INIT INFO
-
-# Load the VERBOSE setting and other rcS variables
-. /lib/init/vars.sh
-
-# Define LSB log_* functions.
-# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
-# and status_of_proc is working.
-. /lib/lsb/init-functions
-
-DESC="Google Compute Engine user startup scripts"
-NAME="run_startup_scripts"
-
-# If we're running under upstart, let the upstart config file handle things.
-# Debian 7 and newer have a near-one-liner function to detect this...
-if type init_is_upstart >/dev/null 2>&1; then
- # ... which we can use if present.
- init_is_upstart && exit 0
-else
- # Otherwise, directly include the core line of Debian 7's version.
- # Authorship credit: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=661109
- if [ -x /sbin/initctl ] && /sbin/initctl version | /bin/grep -q upstart; then
- exit 0
- fi
-fi
-
-#
-# Function that starts the daemon/service
-#
-do_start()
-{
- /usr/share/google/run-startup-scripts
-}
-
-#
-# Function that stops the daemon/service
-#
-do_stop()
-{
- /usr/share/google/run-shutdown-scripts
-}
-
-case "$1" in
- start)
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
- do_start
- case "$?" in
- 0) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- *) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- stop)
- [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
- do_stop
- case "$?" in
- 0) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
- *) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
- esac
- ;;
- *)
- echo "Usage: $SCRIPTNAME start" >&2
- exit 3
- ;;
-esac
-
-:
diff --git a/google-startup-scripts/etc/init/google.conf b/google-startup-scripts/etc/init/google.conf
deleted file mode 100755
index d47fb0d..0000000
--- a/google-startup-scripts/etc/init/google.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# google - Run google startup script
-#
-# Start when rc.local loads, so we run after all the booty stuff.
-start on google-rc-local-has-run
-
-task
-script
- /usr/share/google/onboot
-end script
diff --git a/google-startup-scripts/etc/init/google_run_shutdown_scripts.conf b/google-startup-scripts/etc/init/google_run_shutdown_scripts.conf
deleted file mode 100755
index 69fc61c..0000000
--- a/google-startup-scripts/etc/init/google_run_shutdown_scripts.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# google - Run google shutdown script
-#
-#
-start on starting rc RUNLEVEL=[06]
-task
-
-script
- /usr/bin/logger -s -t google -p local0.info "Running google_run_shutdown_scripts.conf"
- /usr/share/google/run-shutdown-scripts
-end script
diff --git a/google-startup-scripts/etc/init/google_run_startup_scripts.conf b/google-startup-scripts/etc/init/google_run_startup_scripts.conf
deleted file mode 100755
index c81efd8..0000000
--- a/google-startup-scripts/etc/init/google_run_startup_scripts.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# google - Run google startup script
-#
-#
-start on google-rc-local-has-run and google-onboot-has-run
-
-script
- /usr/bin/logger -s -t google -p local0.info "Running google_run_startup_scripts.conf"
- /usr/share/google/run-startup-scripts
- initctl emit --no-wait google-startup-scripts-have-run
-end script
diff --git a/google-startup-scripts/etc/rc.local b/google-startup-scripts/etc/rc.local
deleted file mode 100755
index ac6f13c..0000000
--- a/google-startup-scripts/etc/rc.local
+++ /dev/null
@@ -1,17 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-[ -x /sbin/initctl ] && initctl emit --no-wait google-rc-local-has-run
-exit 0
diff --git a/google-startup-scripts/etc/rsyslog.d/90-google.conf b/google-startup-scripts/etc/rsyslog.d/90-google.conf
deleted file mode 100644
index 4448836..0000000
--- a/google-startup-scripts/etc/rsyslog.d/90-google.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-# Google Compute Engine default console logging.
-#
-auth,daemon,kern.* /dev/console
-
-# Dump startup script output to /var/log/startupscript.log.
-:syslogtag,startswith,"startupscript" /var/log/startupscript.log
-
-# Dump shutdown script output to /var/log/shutdownscript.log.
-:syslogtag,startswith,"shutdownscript" /var/log/shutdownscript.log
diff --git a/google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf b/google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf
deleted file mode 100644
index 0f70b99..0000000
--- a/google-startup-scripts/etc/sysctl.d/11-gce-network-security.conf
+++ /dev/null
@@ -1,47 +0,0 @@
-# Google-recommended kernel parameters
-
-# Turn on SYN-flood protections. Starting with 2.6.26, there is no loss
-# of TCP functionality/features under normal conditions. When flood
-# protections kick in under high unanswered-SYN load, the system
-# should remain more stable, with a trade off of some loss of TCP
-# functionality/features (e.g. TCP Window scaling).
-net.ipv4.tcp_syncookies=1
-
-# Ignore source-routed packets
-net.ipv4.conf.all.accept_source_route=0
-net.ipv4.conf.default.accept_source_route=0
-
-# Ignore ICMP redirects from non-GW hosts
-net.ipv4.conf.all.accept_redirects=0
-net.ipv4.conf.default.accept_redirects=0
-net.ipv4.conf.all.secure_redirects=1
-net.ipv4.conf.default.secure_redirects=1
-
-# Don't pass traffic between networks or act as a router
-net.ipv4.ip_forward=0
-net.ipv4.conf.all.send_redirects=0
-net.ipv4.conf.default.send_redirects=0
-
-# Turn on Source Address Verification in all interfaces to
-# prevent some spoofing attacks.
-net.ipv4.conf.all.rp_filter=1
-net.ipv4.conf.default.rp_filter=1
-
-# Ignore ICMP broadcasts to avoid participating in Smurf attacks
-net.ipv4.icmp_echo_ignore_broadcasts=1
-
-# Ignore bad ICMP errors
-net.ipv4.icmp_ignore_bogus_error_responses=1
-
-# Log spoofed, source-routed, and redirect packets
-net.ipv4.conf.all.log_martians=1
-net.ipv4.conf.default.log_martians=1
-
-# RFC 1337 fix
-net.ipv4.tcp_rfc1337=1
-
-# Addresses of mmap base, heap, stack and VDSO page are randomized
-kernel.randomize_va_space=2
-
-# Reboot the machine soon after a kernel panic.
-kernel.panic=10
diff --git a/google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules b/google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules
deleted file mode 100644
index 004f64a..0000000
--- a/google-startup-scripts/lib/udev/rules.d/64-gce-disk-removal.rules
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# When a disk is removed, unmount any remaining attached volumes.
-
-ACTION=="remove", SUBSYSTEM=="block", KERNEL=="sd*|vd*", RUN+="/bin/sh -c '/bin/umount -fl /dev/$name && /usr/bin/logger -p daemon.warn -s WARNING: hot-removed /dev/$name that was still mounted, data may have been corrupted'"
diff --git a/google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules b/google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules
deleted file mode 100644
index eef1e17..0000000
--- a/google-startup-scripts/lib/udev/rules.d/65-gce-disk-naming.rules
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2011 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Name the attached disks as the specified by deviceName.
-
-ACTION!="add|change", GOTO="gce_disk_naming_end"
-SUBSYSTEM!="block", GOTO="gce_disk_naming_end"
-
-KERNEL=="sd*|vd*", IMPORT{program}="scsi_id --export --whitelisted -d $tempnode"
-KERNEL=="sd*|vd*", ENV{ID_SERIAL_SHORT}=="?*", ENV{DEVTYPE}=="disk", SYMLINK+="disk/by-id/google-$env{ID_SERIAL_SHORT}"
-KERNEL=="sd*|vd*", ENV{ID_SERIAL_SHORT}=="?*", ENV{DEVTYPE}=="partition", SYMLINK+="disk/by-id/google-$env{ID_SERIAL_SHORT}-part%n"
-
-LABEL="gce_disk_naming_end"
diff --git a/google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset b/google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset
deleted file mode 100644
index f981a63..0000000
--- a/google-startup-scripts/usr/lib/systemd/system-preset/50-google.preset
+++ /dev/null
@@ -1,3 +0,0 @@
-enable google.service
-enable google-shutdown-scripts.service
-enable google-startup-scripts.service
diff --git a/google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service b/google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service
deleted file mode 100644
index 73adab8..0000000
--- a/google-startup-scripts/usr/lib/systemd/system/google-shutdown-scripts.service
+++ /dev/null
@@ -1,15 +0,0 @@
-[Unit]
-Description=Google Compute Engine user shutdown scripts
-After=local-fs.target network-online.target network.target
-After=google.service rsyslog.service
-Wants=local-fs.target network-online.target network.target
-
-[Service]
-ExecStart=/bin/true
-ExecStop=/usr/share/google/run-shutdown-scripts
-Type=oneshot
-RemainAfterExit=true
-TimeoutStopSec=0
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service b/google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service
deleted file mode 100644
index a99a160..0000000
--- a/google-startup-scripts/usr/lib/systemd/system/google-startup-scripts.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=Google Compute Engine user startup scripts
-After=local-fs.target network-online.target network.target
-After=google.service rsyslog.service
-Wants=local-fs.target network-online.target network.target
-
-[Service]
-ExecStart=/usr/share/google/run-startup-scripts
-KillMode=process
-Type=oneshot
-
-[Install]
-WantedBy=multi-user.target
diff --git a/google-startup-scripts/usr/lib/systemd/system/google.service b/google-startup-scripts/usr/lib/systemd/system/google.service
deleted file mode 100644
index ea76a46..0000000
--- a/google-startup-scripts/usr/lib/systemd/system/google.service
+++ /dev/null
@@ -1,13 +0,0 @@
-[Unit]
-Description=Google Compute Engine VM initialization
-After=local-fs.target network-online.target network.target
-Before=sshd.service
-Wants=local-fs.target network-online.target network.target
-
-[Service]
-ExecStart=/usr/share/google/onboot
-Type=oneshot
-
-[Install]
-WantedBy=sshd.service
-WantedBy=multi-user.target
diff --git a/google-startup-scripts/usr/share/google/boto/boot_setup.py b/google-startup-scripts/usr/share/google/boto/boot_setup.py
deleted file mode 100755
index e9f3924..0000000
--- a/google-startup-scripts/usr/share/google/boto/boot_setup.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#! /usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""A simple start up script to set up the system boto.cfg file.
-
-This will hit the metadata server to get the appropriate project id
-and install the compute authenication plugin.
-
-Note that this starts with whatever is in /etc/boto.cfg.template, adds
-to that and then persists it into /etc/boto.cfg. This is done so that
-the system boto.cfg can be removed prior to image packaging.
-"""
-
-from ConfigParser import SafeConfigParser
-import os
-import sys
-import textwrap
-import urllib2
-
-NUMERIC_PROJECT_ID_URL=('http://169.254.169.254/'
- 'computeMetadata/v1/project/numeric-project-id')
-SYSTEM_BOTO_CONFIG_TEMPLATE='/etc/boto.cfg.template'
-SYSTEM_BOTO_CONFIG='/etc/boto.cfg'
-AUTH_PLUGIN_DIR='/usr/share/google/boto/boto_plugins'
-
-
-def GetNumericProjectId():
- """Get the numeric project ID for this VM."""
- try:
- request = urllib2.Request(NUMERIC_PROJECT_ID_URL)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- return urllib2.urlopen(request).read()
- except (urllib2.URLError, urllib2.HTTPError, IOError), e:
- return None
-
-
-def AddConfigFileHeader(fp):
- s = ("""\
- This file is automatically created at boot time by the %s script.
- Do not edit this file directly. If you need to add items to this
- file, create/edit %s instead and then re-run the script."""
- % (os.path.abspath(__file__), SYSTEM_BOTO_CONFIG_TEMPLATE))
- fp.write('\n'.join(['# ' + s for s in textwrap.wrap(textwrap.dedent(s),
- break_on_hyphens=False)]))
- fp.write('\n\n')
-
-
-def main(argv):
- config = SafeConfigParser()
- config.read(SYSTEM_BOTO_CONFIG_TEMPLATE)
-
- # TODO(user): Figure out if we need a retry here.
- project_id = GetNumericProjectId()
- if not project_id:
- # Our project doesn't support service accounts.
- return
-
- if not config.has_section('GSUtil'):
- config.add_section('GSUtil')
- config.set('GSUtil', 'default_project_id', project_id)
- config.set('GSUtil', 'default_api_version', '2')
-
- if not config.has_section('GoogleCompute'):
- config.add_section('GoogleCompute')
- # TODO(user): Plumb a metadata value to set this. We probably want
- # to namespace the metadata values in some way like
- # 'boto_auth.servicee_account'.
- config.set('GoogleCompute', 'service_account', 'default')
-
- if not config.has_section('Plugin'):
- config.add_section('Plugin')
- config.set('Plugin', 'plugin_directory', AUTH_PLUGIN_DIR)
-
- with open(SYSTEM_BOTO_CONFIG, 'w') as configfile:
- AddConfigFileHeader(configfile)
- config.write(configfile)
-
-
-if __name__ == '__main__':
- main(sys.argv[1:])
diff --git a/google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py b/google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py
deleted file mode 100644
index 97d3e20..0000000
--- a/google-startup-scripts/usr/share/google/boto/boto_plugins/compute_auth.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Authentication module for using Google Compute service accounts."""
-
-import json
-import urllib2
-
-from boto.auth_handler import AuthHandler
-from boto.auth_handler import NotReadyToAuthenticate
-
-META_DATA_SERVER_BASE_URL=(
- 'http://169.254.169.254/computeMetadata/v1')
-
-SERVICE_ACCOUNT_SCOPES_URL=(META_DATA_SERVER_BASE_URL +
- '/instance/service-accounts/%s/scopes?alt=json')
-SERVICE_ACCOUNT_TOKEN_URL=(META_DATA_SERVER_BASE_URL +
- '/instance/service-accounts/%s/token?alt=json')
-
-GS_SCOPES = set([
- 'https://www.googleapis.com/auth/devstorage.read_only',
- 'https://www.googleapis.com/auth/devstorage.read_write',
- 'https://www.googleapis.com/auth/devstorage.full_control',
- ])
-
-
-class ComputeAuth(AuthHandler):
- """Google Compute service account auth handler.
-
- What happens is that the boto library reads the system config file
- (/etc/boto.cfg) and looks at a config value called 'plugin_directory'. It
- then loads the python files in that and find classes derived from
- boto.auth_handler.AuthHandler.
- """
-
- capability = ['google-oauth2', 's3']
-
- def __init__(self, path, config, provider):
- self.service_account = config.get('GoogleCompute', 'service_account', '')
- if provider.name == 'google' and self.service_account:
- self.scopes = self.__GetGSScopes()
- if not self.scopes:
- raise NotReadyToAuthenticate()
- else:
- raise NotReadyToAuthenticate()
-
- def __GetJSONMetadataValue(self, url):
- try:
- request = urllib2.Request(url)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- data = urllib2.urlopen(request).read()
- return json.loads(data)
- except (urllib2.URLError, urllib2.HTTPError, IOError):
- return None
-
- def __GetGSScopes(self):
- """Return all Google Storage scopes available on this VM."""
- scopes = self.__GetJSONMetadataValue(
- SERVICE_ACCOUNT_SCOPES_URL % self.service_account)
- if scopes:
- return list(GS_SCOPES.intersection(set(scopes)))
- return None
-
- def __GetAccessToken(self):
- """Return an oauth2 access token for Google Storage."""
- token_info = self.__GetJSONMetadataValue(
- SERVICE_ACCOUNT_TOKEN_URL % self.service_account)
- if token_info:
- return token_info['access_token']
- return None
-
- def add_auth(self, http_request):
- http_request.headers['Authorization'] = (
- 'OAuth %s' % self.__GetAccessToken())
diff --git a/google-startup-scripts/usr/share/google/fetch_script b/google-startup-scripts/usr/share/google/fetch_script
deleted file mode 100755
index 72ba9ac..0000000
--- a/google-startup-scripts/usr/share/google/fetch_script
+++ /dev/null
@@ -1,148 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Fetch a script from metadata and store it in the /var/run directory.
-declare -r LOGFILE=/var/log/google.log
-
-if [[ -x /usr/bin/logger ]]; then
- declare -r LOGGER=/usr/bin/logger
-else
- declare -r LOGGER=/bin/logger
-fi
-
-declare -r CURL_RETRY_LIMIT=10
-declare -r CURL_TIMEOUT=10
-
-function log() {
- echo "$@" | ${LOGGER} -t google -p daemon.info
- echo "$@" >> ${LOGFILE}
-}
-
-function download_url_with_logfile() {
- local readonly url=$1
- local readonly dest=$2
- local readonly logfile=$3
-
- if [[ "$url" =~ gs://* ]]; then
- log "Downloading url from ${url} to ${dest} using gsutil"
- gsutil cp "${url}" "${dest}" > "${logfile}" 2>&1 && return 0
- log "Failed to download $url"
- return 1
- fi
-
- # Many of the Google Storage URLs are supported below.
- # It is prefered that customers specify their object using
- # its gs://<bucket>/<object> url.
-
- bucket="[a-z0-9][-_.a-z0-9]*[a-z0-9]"
-
- # Accept any non-empty string that doesn't contain a wildcard character
- # gsutil interprets some characters as wildcards
- # These characters in object names make it difficult or impossible
- # to perform various wildcard operations using gsutil
- # For a complete list use "gsutil help naming"
- object="[^\*\?][^\*\?]*"
-
- # For all validation tests:
- # alphanumeric ranges should only include ascii characters
- export LC_COLLATE=C
-
- # Check for the Google Storage URLs:
- # http://<bucket>.storage.googleapis.com/<object>/
- # https://<bucket>.storage.googleapis.com/<object>/
- if [[ "$url" =~ http[s]?://${bucket}\.storage\.googleapis\.com/${object} ]]; then
- log "Downloading url from ${url} to ${dest} using gsutil"
- # Create a url that can be interpreted by gsutil
- gsurl=$(echo "$url" | sed "s/^https\?:\/\/\($bucket\)\.storage\.googleapis\.com\/\($object\)$/gs:\/\/\1\/\2/")
- gsutil cp ${gsurl} ${dest} 2> ${logfile} && return 0
- # Check for the other possible Google Storage URLS:
- # http://storage.googleapis.com/<bucket>/<object>/
- # https://storage.googleapis.com/<bucket>/<object>/
- #
- # The following are deprecated but checked
- # http://commondatastorage.googleapis.com/<bucket>/<object>/
- # https://commondatastorage.googleapis.com/<bucket>/<object>/
- elif [[ "$url" =~ http[s]?://(commondata)?storage\.googleapis\.com/${bucket}/${object} ]]; then
- log "Downloading url from ${url} to ${dest} using gsutil"
- # Create a url that can be interpreted by gsutil
- gsurl=$(echo "$url" | sed "s/^https\?:\/\/\(commondata\|\)storage\.googleapis\.com\/\($bucket\)\/\($object\)$/gs:\/\/\2\/\3/")
- gsutil cp "${gsurl}" "${dest}" 2> "${logfile}" && return 0
- else
- log "URL ${url} is not located in Google Storage"
- fi
-
- # Unauthenticated download of the object.
- log "Downloading url from ${url} to ${dest} using curl"
- curl --max-time "${CURL_TIMEOUT}" --retry "${CURL_RETRY_LIMIT}" \
- 2>> "${logfile}" -o "${dest}" -L -- "${url}" && return 0;
-
- log "Failed to download $url"
- return 1
-}
-
-function download_url() {
- local readonly url=$1
- local readonly dest=$2
- local readonly logfile=$(mktemp)
- download_url_with_logfile "${url}" "${dest}" "${logfile}"
- return_code=$?
- # If the script was unable to download then report to the syslog.
- if [[ "${return_code}" != "0" ]]; then
- log "$(<"${logfile}")"
- else
- rm -f "${logfile}"
- fi
- return "${return_code}"
-}
-
-function get_metadata_attribute() {
- local readonly varname=$1
- /usr/share/google/get_metadata_value "attributes/${varname}"
- return $?
-}
-
-function fetch_script() {
- # Try to use the script-url, then the script metadata.
- # Check the script url first.
- script=$1
- script_type=$2
- url_type="${script_type}-script"
- url="${url_type}-url"
-
- local readonly script_url="$(get_metadata_attribute ${url})"
- if [[ -n "${script_url}" ]]; then
- log "${url} metadata flag: ${script_url}"
- download_url "${script_url}" "${script}"
- if [[ $? != 0 ]]; then
- log "Could not download ${script_type} script ${script_url}."
- else
- log "Successfully downloaded ${script_type} script ${script_url}."
- fi
- else
- local readonly metadata_script="$(get_metadata_attribute ${url_type})"
- if [[ -n "${metadata_script}" ]]; then
- echo "${metadata_script}" > "${script}"
- log "${script_type} script found in metadata."
- else
- log $(curl "http://metadata.google.internal/computeMetadata/v1/instance/?recursive=True" -H "Metadata-Flavor: Google")
- log "No ${script_type} script found in metadata."
- fi
- fi
- [[ -e "${script}" ]] && chmod 700 "${script}"
-
- return 0
-}
-
-fetch_script "$1" "$2"
diff --git a/google-startup-scripts/usr/share/google/first-boot b/google-startup-scripts/usr/share/google/first-boot
deleted file mode 100755
index b346b65..0000000
--- a/google-startup-scripts/usr/share/google/first-boot
+++ /dev/null
@@ -1,94 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run initialization code the first time this image boots on a given instance.
-
-declare -r INSTANCE_FILE=${PREFIX}/var/lib/google/vm-instance-id
-declare -r LOCK_FILE=${INSTANCE_FILE}.lock
-
-mkdir -p ${PREFIX}/var/lib/google/per-instance
-
-function log() {
- if [[ -x ${PREFIX}/usr/bin/logger ]]; then
- echo $* | ${PREFIX}/usr/bin/logger -t first-boot -p auth.info
- else
- echo $* >&2
- fi
-}
-
-function get_instance_id() {
- ${PREFIX}/usr/share/google/get_metadata_value id 2>/dev/null
-}
-
-# Checks the instance id has changed.
-# Exits with return code 0 if the instance id hasn't changed.
-function check_stored_instance_id() {
- local readonly instance_id=$1
-
- if [[ "${instance_id}" == "" ]]; then
- # Cannot determine instance id. Either we're not running on a Compute VM,
- # or networking hasn't started up yet, etc.
- exit 1
- fi
-
- if [[ "${instance_id}" != "unknown-instance" &&
- "${instance_id}" == "$(cat ${INSTANCE_FILE} 2>/dev/null)" ]]; then
- # Instance id is same as on disk.
- exit 1
- fi
-}
-
-# Performs host key setup if the instance has changed.
-# Otherwise we exit with a non-zero return code.
-function manage_stored_instance_id() {
- local readonly instance_id=$1
-
- # Create a subshell to manage the lock file. The file lock is released
- # when the subshell exits.
- (
- # Open LOCK_FILE on FD 200 and lock it. This prevents concurrent calls
- # to regenerate host keys that spam console output.
- flock -e 200
-
- # Checks whether the instance has changed.
- # If the instance hasn't changed, exit the script.
- check_stored_instance_id ${instance_id}
-
- # If the instance hasn't changed, we have now exited the subshell.
- # Since the instance changed, we do host key regeneration.
- log "Running first-boot"
-
- # Regenerate host keys for ssh.
- if [[ -x ${PREFIX}/usr/share/google/regenerate-host-keys ]]; then
- ${PREFIX}/usr/share/google/regenerate-host-keys
- fi
-
- # We are booting this instance for the first time.
- echo ${instance_id} > ${INSTANCE_FILE}
- ) 200> ${LOCK_FILE}
-
- return $?
-}
-
-declare -r INSTANCE_ID=$(get_instance_id)
-
-manage_stored_instance_id ${INSTANCE_ID}
-if [[ $? != 0 ]]; then
- # The instance hasn't changed so exit.
- exit 0
-fi
-
-# Make a per-instance data directory.
-mkdir -p ${PREFIX}/var/lib/google/per-instance/${INSTANCE_ID}
diff --git a/google-startup-scripts/usr/share/google/get_metadata_value b/google-startup-scripts/usr/share/google/get_metadata_value
deleted file mode 100755
index c4e0eb6..0000000
--- a/google-startup-scripts/usr/share/google/get_metadata_value
+++ /dev/null
@@ -1,73 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Get a metadata value from the metadata server.
-declare -r VARNAME=$1
-declare -r MDS_PREFIX=http://metadata.google.internal/computeMetadata/v1
-declare -r MDS_TRIES=${MDS_TRIES:-100}
-
-function print_metadata_value() {
- local readonly tmpfile=$(mktemp)
- http_code=$(curl -f "${1}" -H "Metadata-Flavor: Google" -w "%{http_code}" \
- -s -o ${tmpfile} 2>/dev/null)
- local readonly return_code=$?
- # If the command completed successfully, print the metadata value to stdout.
- if [[ ${return_code} == 0 && ${http_code} == 200 ]]; then
- cat ${tmpfile}
- fi
- rm -f ${tmpfile}
- return ${return_code}
-}
-
-function print_metadata_value_if_exists() {
- local return_code=1
- local readonly url=$1
- print_metadata_value ${url}
- return_code=$?
- return ${return_code}
-}
-
-function get_metadata_value() {
- local readonly varname=$1
- # Print the instance metadata value.
- print_metadata_value_if_exists ${MDS_PREFIX}/instance/${varname}
- return_code=$?
- # If the instance doesn't have the value, try the project.
- if [[ ${return_code} != 0 ]]; then
- print_metadata_value_if_exists ${MDS_PREFIX}/project/${varname}
- return_code=$?
- fi
- return ${return_code}
-}
-
-function get_metadata_value_with_retries() {
- local return_code=1 # General error code.
- for ((count=0; count <= ${MDS_TRIES}; count++)); do
- get_metadata_value $VARNAME
- return_code=$?
- case $return_code in
- # No error. We're done.
- 0) exit ${return_code};;
- # Failed to resolve host or connect to host. Retry.
- 6|7) sleep 0.3; continue;;
- # A genuine error. Exit.
- *) exit ${return_code};
- esac
- done
- # Exit with the last return code we got.
- exit ${return_code}
-}
-
-get_metadata_value_with_retries
diff --git a/google-startup-scripts/usr/share/google/onboot b/google-startup-scripts/usr/share/google/onboot
deleted file mode 100755
index 482d384..0000000
--- a/google-startup-scripts/usr/share/google/onboot
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Prep the image for Google Compute services.
-#
-# Do NOT "set -e"
-
-# Exit out early if we've run before.
-declare -r RUNFILE=/var/run/google.onboot
-if [ -f ${RUNFILE} ]; then
- exit 0
-fi
-
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-
-declare -r LOGFILE=/var/log/google.log
-
-if [ -x /usr/bin/logger ]; then
- declare -r LOGGER=/usr/bin/logger
-else
- declare -r LOGGER=/bin/logger
-fi
-
-declare -r BOTO_SETUP_SCRIPT=/usr/share/google/boto/boot_setup.py
-declare -r GOOGLE_ENVIRONMENT=/var/run/google.environment
-
-function log() {
- echo $* | ${LOGGER} -t google -p daemon.info
- echo $* >> ${LOGFILE}
-}
-
-function set_interrupts() {
- if [[ -x /usr/share/google/set-interrupts ]]; then
- /usr/share/google/set-interrupts
- fi
-}
-
-function virtionet_irq_affinity() {
- if [[ -x /usr/share/google/virtionet-irq-affinity ]]; then
- /usr/share/google/virtionet-irq-affinity
- fi
-}
-
-function first_boot() {
- if [[ -x /usr/share/google/first-boot ]]; then
- /usr/share/google/first-boot
- fi
-}
-
-function get_metadata_value() {
- local readonly varname=$1
- /usr/share/google/get_metadata_value ${varname}
- return $?
-}
-
-function do_environment() {
- echo "INSTANCE_ID=$(get_metadata_value id)" > ${GOOGLE_ENVIRONMENT}
-}
-
-function do_init() {
- log "onboot initializing"
-
- do_environment
-
- # If it exists, run the boto bootstrap script. This will set things
- # up so that gsutil will just work with any provisioned service
- # account.
- if [ -x ${BOTO_SETUP_SCRIPT} ]; then
- log "Running Boto setup script at ${BOTO_SETUP_SCRIPT}"
- ${BOTO_SETUP_SCRIPT} >> ${LOGFILE} 2>&1
- fi
-
- return 0
-}
-
-function print_ssh_key_fingerprints() {
- log "SSH public key fingerprints"
-
- if [ -e /etc/ssh/ssh_host_rsa_key.pub ]; then
- log "RSA public key"
- ssh-keygen -lf /etc/ssh/ssh_host_rsa_key.pub
- else
- log "No RSA public key found."
- fi
-
- if [ -e /etc/ssh/ssh_host_dsa_key.pub ]; then
- log "DSA public key"
- ssh-keygen -lf /etc/ssh/ssh_host_dsa_key.pub
- else
- log "No DSA public key found."
- fi
-
- if [ -e /etc/ssh/ssh_host_ecdsa_key.pub ]; then
- log "ECDSA public key"
- ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key.pub
- else
- log "No ECDSA public key found."
- fi
-
- return 0
-}
-
-function check_for_connection() {
- local count=0
- local return_code=0
-
- log "Checking for metadata server connection."
- while true; do
- ((count++))
- MDS_TRIES=1 /usr/share/google/get_metadata_value "?recursive=True"
- return_code=$?
- case ${return_code} in
- # No error. Connection is active.
- 0) break;;
- # Failed to resolve host or connect to host. Retry indefinitely.
- 6|7) sleep 1.0
- log "Waiting for metadata server, attempt ${count}"
- # After 7 minutes, add a console message denoting a probable network
- # issue. On systems using dhclient there is an attempt to obtain an IP
- # for 60 seconds followed by a 5 minute wait period. After 7 minutes,
- # this cycle will have run through twice. After this period of time, it
- # is not known when a DHCP lease might be obtained and the network
- # interface fully operational.
- if ((count >= 7*60+1)); then
- log "There is likely a problem with the network."
- fi
- continue;;
- # A genuine error but a connection exists.
- *)
- log "Check for connection non-fatal error getting metadata ${return_code}"
- break;;
- esac
- done
- # Return the last return code we got.
- return ${return_code}
-}
-
-set_interrupts
-virtionet_irq_affinity
-check_for_connection
-first_boot
-do_init
-print_ssh_key_fingerprints
-
-if [ -x /sbin/initctl ]; then
- /sbin/initctl emit --no-wait google-onboot-has-run
-fi
-
-# Indicate that we've run already.
-touch ${RUNFILE}
diff --git a/google-startup-scripts/usr/share/google/regenerate-host-keys b/google-startup-scripts/usr/share/google/regenerate-host-keys
deleted file mode 100755
index fb9d7fd..0000000
--- a/google-startup-scripts/usr/share/google/regenerate-host-keys
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Regenerates the SSH host keys when the VM is restarted with a new IP
-# address. Booting a VM from an image with a known SSH key allows a
-# number of attacks, so this script regenerates the host key whenever
-# the IP address changes. (This applies on firstboot, but also if the
-# VM disk has been used for another image.)
-
-log() {
- logger -t regenerate-host-keys -p auth.info -s "$@"
-}
-
-sshd_cmd() {
- local cmd=$1
- log "${cmd}ing sshd"
- if [[ -x /etc/init.d/ssh || -f /etc/init/ssh.conf ]]; then
- service ssh ${cmd}
- fi
- if [[ -x /etc/init.d/sshd || -f /etc/init/sshd.conf ]]; then
- service sshd ${cmd}
- fi
-}
-
-generate_key() {
- local key_type=$1
- local key_dest=$2
- local tmp_dir=$(mktemp -d /tmp/keystore.XXXXXXXX)
- local tmp_file="/${tmp_dir}/keyfile.$$";
- local log_file=$(mktemp);
- log "Regenerating sshd key ${key_dest}"
- ssh-keygen -N '' -t ${key_type} -f ${tmp_file} > ${log_file} 2>&1
- if [[ $? == 0 ]]; then
- rm -f ${key_dest} ${key_dest}.pub
- cp -f ${tmp_file} ${key_dest}
- cp -f ${tmp_file}.pub ${key_dest}.pub
- else
- log "Could not create sshd key ${key_dest}"
- log "$(cat ${log_file})"
- fi
- rm -rf ${tmp_dir}
- rm -f ${log_file}
-}
-
-regenerate_host_keys() {
- log "Regenerating SSH Host Keys for: $new_ip_address (previously $old_ip_address)."
- rm -f /etc/ssh/ssh_host_key /etc/ssh_host_key.pub # SSH1 RSA key.
- for key_file in /etc/ssh/ssh_host_*_key; do
- # Parse out the type of key, matching the * in the for loop command above.
- key_type=$(basename "${key_file}" _key)
- key_type=${key_type#ssh_host_}
-
- generate_key "${key_type}" "${key_file}"
- done
- # Allow sshd to come up if we were suppressing it.
- if [[ $(cat /etc/ssh/sshd_not_to_be_run 2>/dev/null) == "GOOGLE" ]]; then
- rm -f /etc/ssh/sshd_not_to_be_run
- fi
- if [[ -x /bin/systemctl ]]; then
- exit
- else
- # Start sshd if it was not running.
- sshd_cmd start
- # Reload sshd config if it already was running.
- sshd_cmd reload
- fi
-}
-
-regenerate_host_keys
diff --git a/google-startup-scripts/usr/share/google/run-scripts b/google-startup-scripts/usr/share/google/run-scripts
deleted file mode 100755
index 46af979..0000000
--- a/google-startup-scripts/usr/share/google/run-scripts
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run startup scripts that should happen "Late" at boot.
-# Run shutdown scripts that should happen as soon as the instances
-# begin to power down.
-#
-# Do NOT "set -e"
-declare -r SCRIPT=$1
-declare -r SCRIPT_TYPE=$2
-
-if [ -x /usr/bin/logger ]; then
- declare -r LOGGER=/usr/bin/logger
-else
- declare -r LOGGER=/bin/logger
-fi
-
-LOG_CMD="${LOGGER} -t ${SCRIPT_TYPE}script -p daemon.info"
-
-function log() {
- echo "$@" | ${LOG_CMD}
-}
-
-declare -r GOOGLE_ENVIRONMENT=/var/run/google.environment
-
-function copy_and_run() {
- local source=$1
- local dest=$(mktemp)
- cat "${source}" >> "${dest}"
- chmod u+x "${dest}"
- log "Running ${SCRIPT_TYPE} script ${source}"
- "${dest}" 2>&1 | ${LOG_CMD}
- log "Finished running ${SCRIPT_TYPE} script ${source}"
- rm -f "${dest}"
-}
-
-if [[ -e "${SCRIPT}" ]]; then
- (
- [ -r ${GOOGLE_ENVIRONMENT} ] && source ${GOOGLE_ENVIRONMENT};
- copy_and_run "${SCRIPT}"
- )
-fi
diff --git a/google-startup-scripts/usr/share/google/run-shutdown-scripts b/google-startup-scripts/usr/share/google/run-shutdown-scripts
deleted file mode 100755
index 61377e9..0000000
--- a/google-startup-scripts/usr/share/google/run-shutdown-scripts
+++ /dev/null
@@ -1,31 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run shutdown scripts that should happen as soon as the instances
-# begin to power down.
-#
-# Do NOT "set -e"
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-
-declare -r SHUTDOWN_SCRIPT=/var/run/google.shutdown.script
-
-# NOTE
-# Make sure that the shutdown script completes within 90 seconds, so
-# that the OS has time to complete its shutdown, including flushing
-# buffers to disk.
-#
-# The shutdown script blocks other shutdown operations from proceeding.
-/usr/share/google/fetch_script ${SHUTDOWN_SCRIPT} shutdown
-/usr/share/google/run-scripts ${SHUTDOWN_SCRIPT} shutdown
diff --git a/google-startup-scripts/usr/share/google/run-startup-scripts b/google-startup-scripts/usr/share/google/run-startup-scripts
deleted file mode 100755
index b9e2667..0000000
--- a/google-startup-scripts/usr/share/google/run-startup-scripts
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run startup scripts that should happen "Late" at boot.
-#
-# Do NOT "set -e"
-PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin
-
-declare -r STARTUP_SCRIPT=/var/run/google.startup.script
-
-# Make sure all udev changes settle before running startup scripts.
-udevadm settle
-
-/usr/share/google/fetch_script ${STARTUP_SCRIPT} startup
-/usr/share/google/run-scripts ${STARTUP_SCRIPT} startup
diff --git a/google-startup-scripts/usr/share/google/safe_format_and_mount b/google-startup-scripts/usr/share/google/safe_format_and_mount
deleted file mode 100755
index 8e68037..0000000
--- a/google-startup-scripts/usr/share/google/safe_format_and_mount
+++ /dev/null
@@ -1,152 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Mount a disk, formatting it if necessary. If the disk looks like it may
-# have been formatted before, we will not format it.
-#
-# This script uses blkid and file to search for magic "formatted" bytes
-# at the beginning of the disk. Furthermore, it attempts to use fsck to
-# repair the filesystem before formatting it.
-
-FSCK=fsck.ext4
-MOUNT_OPTIONS="discard,defaults"
-MKFS="mkfs.ext4 -E lazy_itable_init=0,lazy_journal_init=0 -F"
-if [ -f /etc/redhat-release ]; then
- if grep -q '6\..' /etc/redhat-release; then
- # lazy_journal_init is not recognized in redhat 6
- MKFS="mkfs.ext4 -E lazy_itable_init=0 -F"
- elif grep -q '7\..' /etc/redhat-release; then
- FSCK=fsck.xfs
- MKFS=mkfs.xfs
- fi
-fi
-
-LOGTAG=safe_format_and_mount
-LOGFACILITY=user
-
-function log() {
- local readonly severity=$1; shift;
- logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@"
-}
-
-function log_command() {
- local readonly log_file=$(mktemp)
- local readonly retcode
- log info "Running: $*"
- $* > ${log_file} 2>&1
- retcode=$?
- # only return the last 1000 lines of the logfile, just in case it's HUGE.
- tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s
- rm -f ${log_file}
- return ${retcode}
-}
-
-function help() {
- cat >&2 <<EOF
-$0 [-f fsck_cmd] [-m mkfs_cmd] [-o mount_opts] <device> <mountpoint>
-EOF
- exit 0
-}
-
-while getopts ":hf:o:m:" opt; do
- case $opt in
- h) help;;
- f) FSCK=$OPTARG;;
- o) MOUNT_OPTIONS=$OPTARG;;
- m) MKFS=$OPTARG;;
- -) break;;
- \?) log error "Invalid option: -${OPTARG}"; exit 1;;
- :) log "Option -${OPTARG} requires an argument."; exit 1;;
- esac
-done
-
-shift $(($OPTIND - 1))
-readonly DISK=$1
-readonly MOUNTPOINT=$2
-
-[[ -z ${DISK} ]] && help
-[[ -z ${MOUNTPOINT} ]] && help
-
-function disk_looks_unformatted() {
- blkid ${DISK}
- if [[ $? == 0 ]]; then
- return 0
- fi
-
- local readonly file_type=$(file --special-files ${DISK})
- case ${file_type} in
- *filesystem*)
- return 0;;
- esac
-
- return 1
-}
-
-function format_disk() {
- log_command ${MKFS} ${DISK}
-}
-
-function try_repair_disk() {
- log_command ${FSCK} -a ${DISK}
- local readonly fsck_return=$?
- if [[ ${fsck_return} -ge 8 ]]; then
- log error "Fsck could not correct errors on ${DISK}"
- return 1
- fi
- if [[ ${fsck_return} -gt 0 ]]; then
- log warning "Fsck corrected errors on ${DISK}"
- fi
- return 0
-}
-
-function try_mount() {
- local mount_retcode
- try_repair_disk
-
- log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
- mount_retcode=$?
- if [[ ${mount_retcode} == 0 ]]; then
- return 0
- fi
-
- # Check to see if it looks like a filesystem before formatting it.
- disk_looks_unformatted ${DISK}
- if [[ $? == 0 ]]; then
- log error "Disk ${DISK} looks formatted but won't mount. Giving up."
- return ${mount_retcode}
- fi
-
- # The disk looks like it's not been formatted before.
- format_disk
- if [[ $? != 0 ]]; then
- log error "Format of ${DISK} failed."
- fi
-
- log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT}
- mount_retcode=$?
- if [[ ${mount_retcode} == 0 ]]; then
- return 0
- fi
- log error "Tried everything we could, but could not mount ${DISK}."
- return ${mount_retcode}
-}
-
-log warn "====================================================================="
-log warn "WARNING: safe_format_and_mount is deprecated."
-log warn "See https://cloud.google.com/compute/docs/disks/persistent-disks"
-log warn "for additional instructions."
-log warn "====================================================================="
-try_mount
-exit $?
diff --git a/google-startup-scripts/usr/share/google/set-hostname b/google-startup-scripts/usr/share/google/set-hostname
deleted file mode 100755
index 9b71e4d..0000000
--- a/google-startup-scripts/usr/share/google/set-hostname
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Deal with a new hostname assignment.
-
-if [ -n "$new_host_name" ] && [ -n "$new_ip_address" ]; then
- # Delete entries with new_host_name or new_ip_address in /etc/hosts.
- sed -i '/Added by Google/d' /etc/hosts
-
- # Add an entry for our new_host_name/new_ip_address in /etc/hosts.
- echo "${new_ip_address} ${new_host_name} ${new_host_name%%.*} # Added by Google" >> /etc/hosts
-fi
-
-# /sbin/dhclient-scripts in both ubuntu and centos have some problems for us:
-# 1) BOUND doesn't always set hostname (e.g. if old_host_name is unset in
-# precise pangolin)
-# 2) Using too long of a FQDN as a hostname causes some tools to break in
-# some distros (e.g. ssh-keygen) and hostname tool complains when given
-# a FQDN that is > 64 bytes.
-#
-# As a result, we set the host name in all circumstances here, to the truncated
-# unqualified domain name.
-
-if [ -n "$new_host_name" ]; then
- hostname ${new_host_name%%.*}
-
- # Let syslogd know we've changed the hostname.
- pkill -HUP syslogd
-fi
diff --git a/google-startup-scripts/usr/share/google/set-interrupts b/google-startup-scripts/usr/share/google/set-interrupts
deleted file mode 100755
index 36ccaec..0000000
--- a/google-startup-scripts/usr/share/google/set-interrupts
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-total_cpus=`nproc`
-
-config_nvme()
-{
- current_cpu=0
- for dev in /sys/bus/pci/drivers/nvme/*
- do
- if [ ! -d $dev ]
- then
- continue
- fi
- for irq_info in $dev/msi_irqs/*
- do
- if [ ! -f $irq_info ]
- then
- continue
- fi
- current_cpu=$((current_cpu % total_cpus))
- cpu_mask=`printf "%x" $((1<<current_cpu))`
- irq=$(basename $irq_info)$a
- echo Setting IRQ $irq smp_affinity to $cpu_mask
- echo $cpu_mask > /proc/irq/$irq/smp_affinity
- current_cpu=$((current_cpu+1))
- done
- done
-}
-
-config_scsi()
-{
- irqs=()
- for device in /sys/bus/virtio/drivers/virtio_scsi/virtio*
- do
- ssd=0
- for target_path in $device/host*/target*/*
- do
- if [ ! -f $target_path/model ]
- then
- continue
- fi
- model=$(cat $target_path/model)
- if [[ $model =~ .*EphemeralDisk.* ]]
- then
- ssd=1
- for queue_path in $target_path/block/sd*/queue
- do
- echo noop > $queue_path/scheduler
- echo 0 > $queue_path/add_random
- echo 512 > $queue_path/nr_requests
- echo 0 > $queue_path/rotational
- echo 0 > $queue_path/rq_affinity
- echo 1 > $queue_path/nomerges
- done
- fi
- done
- if [[ $ssd == 1 ]]
- then
- request_queue=$(basename $device)-request
- irq=$(cat /proc/interrupts |grep $request_queue| awk '{print $1}'| sed 's/://')
- irqs+=($irq)
- fi
- done
- irq_count=${#irqs[@]}
- if [ $irq_count != 0 ]
- then
- stride=$((total_cpus / irq_count))
- stride=$((stride < 1 ? 1 : stride))
- current_cpu=0
- for irq in "${irqs[@]}"
- do
- current_cpu=$(($current_cpu % $total_cpus))
- cpu_mask=`printf "%x" $((1<<$current_cpu))`
- echo Setting IRQ $irq smp_affinity to $cpu_mask
- echo $cpu_mask > /proc/irq/$irq/smp_affinity
- current_cpu=$((current_cpu+stride))
- done
- fi
-}
-
-config_nvme
-config_scsi
diff --git a/google-startup-scripts/usr/share/google/virtionet-irq-affinity b/google-startup-scripts/usr/share/google/virtionet-irq-affinity
deleted file mode 100755
index 6b86ee2..0000000
--- a/google-startup-scripts/usr/share/google/virtionet-irq-affinity
+++ /dev/null
@@ -1,141 +0,0 @@
-#! /bin/bash
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# For a single-queue / no MSI-X virtionet device, sets the IRQ affinities to
-# processor 0. For this virtionet configuration, distributing IRQs to all
-# processors results in comparatively high cpu utilization and comparatively
-# low network bandwidth.
-#
-# For a multi-queue / MSI-X virtionet device, sets the IRQ affinities to the
-# per-IRQ affinity hint. The virtionet driver maps each virtionet TX (RX) queue
-# MSI-X interrupt to a unique single CPU if the number of TX (RX) queues equals
-# the number of online CPUs. The mapping of network MSI-X interrupt vector to
-# CPUs is stored in the virtionet MSI-X interrupt vector affinity hint. This
-# configuration allows network traffic to be spread across the CPUs, giving
-# each CPU a dedicated TX and RX network queue, while ensuring that all packets
-# from a single flow are delivered to the same CPU.
-
-function log() {
- if [[ -x ${PREFIX}/usr/bin/logger ]]; then
- echo $* | ${PREFIX}/usr/bin/logger -t virtionet-irq-affinity -p daemon.info
- else
- echo $* >&2
- fi
-}
-
-function is_decimal_int() {
- [ "${1}" -eq "${1}" ] > /dev/null 2>&1
-}
-
-function set_channels() {
- ethtool -L "${1}" combined "${2}" > /dev/null 2>&1
-}
-
-log "Running $(basename $0)"
-NET_DEVS=/sys/bus/virtio/drivers/virtio_net/virtio*
-
-# Loop through all the virtionet devices and enable multi-queue
-if [ -x /sbin/ethtool ]; then
- for dev in $NET_DEVS; do
- ETH_DEVS=${dev}/net/*
- for eth_dev in $ETH_DEVS; do
- eth_dev=$(basename "$eth_dev")
- if ! errormsg=$(ethtool -l "$eth_dev" 2>&1); then
- log "/sbin/ethtool says that $eth_dev does not support virtionet multiqueue: $errormsg"
- continue
- fi
- num_max_channels=$(ethtool -l "$eth_dev" | grep -m 1 Combined | cut -f2)
- [ "${num_max_channels}" -eq "1" ] && continue
- if is_decimal_int "$num_max_channels" && \
- set_channels "$eth_dev" "$num_max_channels"; then
- log "Set channels for $eth_dev to $num_max_channels"
- else
- log "Could not set channels for $eth_dev to $num_max_channels"
- fi
- done
- done
-else
- log "/sbin/ethtool not found: cannot configure virtionet multiqueue"
-fi
-
-for dev in $NET_DEVS
-do
- dev=$(basename "$dev")
- irq_dir=/proc/irq/*
- for irq in $irq_dir
- do
- smp_affinity="${irq}/smp_affinity"
- [ ! -f "${smp_affinity}" ] && continue
- # Classify this IRQ as virtionet intx, virtionet MSI-X, or non-virtionet
- # If the IRQ type is virtionet intx, a subdirectory with the same name as
- # the device will be present. If the IRQ type is virtionet MSI-X, then
- # a subdirectory of the form <device name>-<input|output>.N will exist.
- # In this case, N is the input (output) queue number, and is specified as
- # a decimal integer ranging from 0 to K - 1 where K is the number of
- # input (output) queues in the virtionet device.
- virtionet_intx_dir="${irq}/${dev}"
- virtionet_msix_dir_regex=".*/${dev}-(input|output)\.[0-9]+$"
- if [ -d "${virtionet_intx_dir}" ]; then
- # All virtionet intx IRQs are delivered to CPU 0
- log "Setting ${smp_affinity} to 01 for device ${dev}"
- echo "01" > ${smp_affinity}
- continue
- fi
- # Not virtionet intx, probe for MSI-X
- virtionet_msix_found=0
- for entry in ${irq}/${dev}*; do
- if [[ "$entry" =~ ${virtionet_msix_dir_regex} ]]; then
- virtionet_msix_found=1
- fi
- done
- affinity_hint="${irq}/affinity_hint"
- [ "$virtionet_msix_found" -eq 0 -o ! -f "${affinity_hint}" ] && continue
-
- # The affinity hint file contains a CPU mask, consisting of
- # groups of up to 8 hexadecimal digits, separated by a comma. Each bit
- # position in the CPU mask hex value specifies whether this interrupt
- # should be delivered to the corresponding CPU. For example, if bits 0
- # and 3 are set in the affinity hint CPU mask hex value, then the
- # interrupt should be delivered to CPUs 0 and 3. The virtionet device
- # driver should set only a single bit in the affinity hint per MSI-X
- # interrupt, ensuring each TX (RX) queue is used only by a single CPU.
- # The virtionet driver will only specify an affinity hint if the number of
- # TX (RX) queues equals the number of online CPUs. If no affinity hint is
- # specified for an IRQ, the affinity hint file will contain all zeros.
- affinity_cpumask=$(cat "${affinity_hint}")
- affinity_hint_enabled=0
- # Parse the affinity hint, skip if mask is invalid or is empty (all-zeros)
- OIFS=${IFS}
- IFS=","
- for cpu_bitmap in ${affinity_cpumask}; do
- bitmap_val=$(printf "%d" "0x${cpu_bitmap}" 2>/dev/null)
- if [ "$?" -ne 0 ]; then
- log "Invalid affinity hint ${affinity_hint}: ${affinity_cpumask}"
- affinity_hint_enabled=0
- break
- elif [ "${bitmap_val}" -ne 0 ]; then
- affinity_hint_enabled=1
- fi
- done
- IFS=${OIFS}
- if [ "${affinity_hint_enabled}" -eq 0 ]; then
- log "Cannot set IRQ affinity ${smp_affinity}, affinity hint disabled"
- else
- # Set the IRQ CPU affinity to the virtionet-initialized affinity hint
- log "Setting ${smp_affinity} to ${affinity_cpumask} for device ${dev}"
- echo "${affinity_cpumask}" > "${smp_affinity}"
- fi
- done
-done
diff --git a/legacy/README.md b/legacy/README.md
deleted file mode 100644
index 524aeb2..0000000
--- a/legacy/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-## Legacy packages
-
-gcimagebundle is deprecated and is provided here as is with no further
-support or maintenance. See [replacement
-instructions](https://cloud.google.com/compute/docs/creating-custom-image#export_an_image_to_google_cloud_storage).
diff --git a/legacy/gcimagebundle/LICENSE b/legacy/gcimagebundle/LICENSE
deleted file mode 100644
index 04cb0d7..0000000
--- a/legacy/gcimagebundle/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2013 Google Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/legacy/gcimagebundle/MANIFEST.in b/legacy/gcimagebundle/MANIFEST.in
deleted file mode 100644
index 6bbb29c..0000000
--- a/legacy/gcimagebundle/MANIFEST.in
+++ /dev/null
@@ -1,4 +0,0 @@
-include *.md
-include distribute_setup.py
-include LICENSE
-include VERSION
diff --git a/legacy/gcimagebundle/README b/legacy/gcimagebundle/README
deleted file mode 100644
index 13afc26..0000000
--- a/legacy/gcimagebundle/README
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-Image bundling tool for root file system.
-
-Note: This tool is deprecated. Please see alternate instructions at
-https://cloud.google.com/compute/docs/creating-custom-image#export_an_image_to_google_cloud_storage
-
-To build a root filesystem tar
-$ sudo gcimagebundle -d /dev/sda -r / -o /tmp \
- --loglevel=DEBUG --log_file=/tmp/gcimagebundle.log
-
-This will output the image tar in the output directory
-specified with -o option.
-
-Note that this is copied out file by file into the default google image.
-
-To run unittest:
-$ sudo python setup.py test
diff --git a/legacy/gcimagebundle/README.md b/legacy/gcimagebundle/README.md
deleted file mode 100644
index e3ee03d..0000000
--- a/legacy/gcimagebundle/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-Image Bundle
-============
-
-_Image Bundle is deprecated. Please see alternate instructions for [Exporting an image to Google Cloud Storage](https://cloud.google.com/compute/docs/creating-custom-image#export_an_image_to_google_cloud_storage)._
-
-Image Bundle is a python package that allows users to create an image from the current state of the running virtual machine. Image Bundle creates the image with the recommended packaging format and also allows you to run unit tests to verify that image bundle works properly on your operating system. See [Custom Images](https://cloud.google.com/compute/docs/creating-custom-image) for more information.
-
-### Installation
-
- $ sudo python setup.py install
-
-### Usage
-
-To build a root filesystem tar:
-
- $ sudo gcimagebundle -d /dev/sda -r / -o /tmp \
- --loglevel=DEBUG --log_file=/tmp/image_bundle.log
-
-This will output the image tar in the output directory specified with -o option.
-
-For details on all the parameters use...
-
- $ sudo gcimagebundle --help
-
-### Unit Tests
-
-Image Bundle includes unit tests that should be run if you make any changes. These tests perform mount operations so root access is required.
-
- $ sudo python setup.py test
-
-### Packaging
-
-Since Image Bundle uses setuptools it can be packaged into a DEB or RPM.
-
-Install the required dependencies:
-
- # For Debian based distributions
- $ sudo apt-get install python-stdeb rpm
- # For Red-Hat based distributions
- $ sudo yum install rpmbuild
-
-DEB package:
-
- $ python setup.py --command-packages=stdeb.command bdist_deb
-
-RPM package:
-
- $ python setup.py bdist_rpm
diff --git a/legacy/gcimagebundle/VERSION b/legacy/gcimagebundle/VERSION
deleted file mode 100644
index 3a3cd8c..0000000
--- a/legacy/gcimagebundle/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-1.3.1
diff --git a/legacy/gcimagebundle/distribute_setup.py b/legacy/gcimagebundle/distribute_setup.py
deleted file mode 100644
index 3553b21..0000000
--- a/legacy/gcimagebundle/distribute_setup.py
+++ /dev/null
@@ -1,556 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
- from distribute_setup import use_setuptools
- use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
- from site import USER_SITE
-except ImportError:
- USER_SITE = None
-
-try:
- import subprocess
-
- def _python_cmd(*args):
- args = (sys.executable,) + args
- return subprocess.call(args) == 0
-
-except ImportError:
- # will be used for python 2.3
- def _python_cmd(*args):
- args = (sys.executable,) + args
- # quoting arguments if windows
- if sys.platform == 'win32':
- def quote(arg):
- if ' ' in arg:
- return '"%s"' % arg
- return arg
- args = [quote(arg) for arg in args]
- return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.49"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
- # extracting the tarball
- tmpdir = tempfile.mkdtemp()
- log.warn('Extracting in %s', tmpdir)
- old_wd = os.getcwd()
- try:
- os.chdir(tmpdir)
- tar = tarfile.open(tarball)
- _extractall(tar)
- tar.close()
-
- # going in the directory
- subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
- os.chdir(subdir)
- log.warn('Now working in %s', subdir)
-
- # installing
- log.warn('Installing Distribute')
- if not _python_cmd('setup.py', 'install', *install_args):
- log.warn('Something went wrong during the installation.')
- log.warn('See the error message above.')
- # exitcode will be 2
- return 2
- finally:
- os.chdir(old_wd)
- shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
- # extracting the tarball
- tmpdir = tempfile.mkdtemp()
- log.warn('Extracting in %s', tmpdir)
- old_wd = os.getcwd()
- try:
- os.chdir(tmpdir)
- tar = tarfile.open(tarball)
- _extractall(tar)
- tar.close()
-
- # going in the directory
- subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
- os.chdir(subdir)
- log.warn('Now working in %s', subdir)
-
- # building an egg
- log.warn('Building a Distribute egg in %s', to_dir)
- _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
- finally:
- os.chdir(old_wd)
- shutil.rmtree(tmpdir)
- # returning the result
- log.warn(egg)
- if not os.path.exists(egg):
- raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
- egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
- % (version, sys.version_info[0], sys.version_info[1]))
- if not os.path.exists(egg):
- tarball = download_setuptools(version, download_base,
- to_dir, download_delay)
- _build_egg(egg, tarball, to_dir)
- sys.path.insert(0, egg)
- import setuptools
- setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
- to_dir=os.curdir, download_delay=15, no_fake=True):
- # making sure we use the absolute path
- to_dir = os.path.abspath(to_dir)
- was_imported = 'pkg_resources' in sys.modules or \
- 'setuptools' in sys.modules
- try:
- try:
- import pkg_resources
-
- # Setuptools 0.7b and later is a suitable (and preferable)
- # substitute for any Distribute version.
- try:
- pkg_resources.require("setuptools>=0.7b")
- return
- except (pkg_resources.DistributionNotFound,
- pkg_resources.VersionConflict):
- pass
-
- if not hasattr(pkg_resources, '_distribute'):
- if not no_fake:
- _fake_setuptools()
- raise ImportError
- except ImportError:
- return _do_download(version, download_base, to_dir, download_delay)
- try:
- pkg_resources.require("distribute>=" + version)
- return
- except pkg_resources.VersionConflict:
- e = sys.exc_info()[1]
- if was_imported:
- sys.stderr.write(
- "The required version of distribute (>=%s) is not available,\n"
- "and can't be installed while this script is running. Please\n"
- "install a more recent version first, using\n"
- "'easy_install -U distribute'."
- "\n\n(Currently using %r)\n" % (version, e.args[0]))
- sys.exit(2)
- else:
- del pkg_resources, sys.modules['pkg_resources'] # reload ok
- return _do_download(version, download_base, to_dir,
- download_delay)
- except pkg_resources.DistributionNotFound:
- return _do_download(version, download_base, to_dir,
- download_delay)
- finally:
- if not no_fake:
- _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
- to_dir=os.curdir, delay=15):
- """Download distribute from a specified location and return its filename
-
- `version` should be a valid distribute version number that is available
- as an egg for download under the `download_base` URL (which should end
- with a '/'). `to_dir` is the directory where the egg will be downloaded.
- `delay` is the number of seconds to pause before an actual download
- attempt.
- """
- # making sure we use the absolute path
- to_dir = os.path.abspath(to_dir)
- try:
- from urllib.request import urlopen
- except ImportError:
- from urllib2 import urlopen
- tgz_name = "distribute-%s.tar.gz" % version
- url = download_base + tgz_name
- saveto = os.path.join(to_dir, tgz_name)
- src = dst = None
- if not os.path.exists(saveto): # Avoid repeated downloads
- try:
- log.warn("Downloading %s", url)
- src = urlopen(url)
- # Read/write all in one block, so we don't create a corrupt file
- # if the download is interrupted.
- data = src.read()
- dst = open(saveto, "wb")
- dst.write(data)
- finally:
- if src:
- src.close()
- if dst:
- dst.close()
- return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
- def __no_sandbox(*args, **kw):
- try:
- from setuptools.sandbox import DirectorySandbox
- if not hasattr(DirectorySandbox, '_old'):
- def violation(*args):
- pass
- DirectorySandbox._old = DirectorySandbox._violation
- DirectorySandbox._violation = violation
- patched = True
- else:
- patched = False
- except ImportError:
- patched = False
-
- try:
- return function(*args, **kw)
- finally:
- if patched:
- DirectorySandbox._violation = DirectorySandbox._old
- del DirectorySandbox._old
-
- return __no_sandbox
-
-
-def _patch_file(path, content):
- """Will backup the file then patch it"""
- f = open(path)
- existing_content = f.read()
- f.close()
- if existing_content == content:
- # already patched
- log.warn('Already patched.')
- return False
- log.warn('Patching...')
- _rename_path(path)
- f = open(path, 'w')
- try:
- f.write(content)
- finally:
- f.close()
- return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
- f = open(path)
- existing_content = f.read()
- f.close()
- return existing_content == content
-
-
-def _rename_path(path):
- new_name = path + '.OLD.%s' % time.time()
- log.warn('Renaming %s to %s', path, new_name)
- os.rename(path, new_name)
- return new_name
-
-
-def _remove_flat_installation(placeholder):
- if not os.path.isdir(placeholder):
- log.warn('Unkown installation at %s', placeholder)
- return False
- found = False
- for file in os.listdir(placeholder):
- if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
- found = True
- break
- if not found:
- log.warn('Could not locate setuptools*.egg-info')
- return
-
- log.warn('Moving elements out of the way...')
- pkg_info = os.path.join(placeholder, file)
- if os.path.isdir(pkg_info):
- patched = _patch_egg_dir(pkg_info)
- else:
- patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
- if not patched:
- log.warn('%s already patched.', pkg_info)
- return False
- # now let's move the files out of the way
- for element in ('setuptools', 'pkg_resources.py', 'site.py'):
- element = os.path.join(placeholder, element)
- if os.path.exists(element):
- _rename_path(element)
- else:
- log.warn('Could not find the %s element of the '
- 'Setuptools distribution', element)
- return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
- log.warn('After install bootstrap.')
- placeholder = dist.get_command_obj('install').install_purelib
- _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
- if not placeholder or not os.path.exists(placeholder):
- log.warn('Could not find the install location')
- return
- pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
- setuptools_file = 'setuptools-%s-py%s.egg-info' % \
- (SETUPTOOLS_FAKED_VERSION, pyver)
- pkg_info = os.path.join(placeholder, setuptools_file)
- if os.path.exists(pkg_info):
- log.warn('%s already exists', pkg_info)
- return
-
- log.warn('Creating %s', pkg_info)
- try:
- f = open(pkg_info, 'w')
- except EnvironmentError:
- log.warn("Don't have permissions to write %s, skipping", pkg_info)
- return
- try:
- f.write(SETUPTOOLS_PKG_INFO)
- finally:
- f.close()
-
- pth_file = os.path.join(placeholder, 'setuptools.pth')
- log.warn('Creating %s', pth_file)
- f = open(pth_file, 'w')
- try:
- f.write(os.path.join(os.curdir, setuptools_file))
- finally:
- f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
- _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
- # let's check if it's already patched
- pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- if os.path.exists(pkg_info):
- if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
- log.warn('%s already patched.', pkg_info)
- return False
- _rename_path(path)
- os.mkdir(path)
- os.mkdir(os.path.join(path, 'EGG-INFO'))
- pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
- f = open(pkg_info, 'w')
- try:
- f.write(SETUPTOOLS_PKG_INFO)
- finally:
- f.close()
- return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
- log.warn('Before install bootstrap.')
- _fake_setuptools()
-
-
-def _under_prefix(location):
- if 'install' not in sys.argv:
- return True
- args = sys.argv[sys.argv.index('install') + 1:]
- for index, arg in enumerate(args):
- for option in ('--root', '--prefix'):
- if arg.startswith('%s=' % option):
- top_dir = arg.split('root=')[-1]
- return location.startswith(top_dir)
- elif arg == option:
- if len(args) > index:
- top_dir = args[index + 1]
- return location.startswith(top_dir)
- if arg == '--user' and USER_SITE is not None:
- return location.startswith(USER_SITE)
- return True
-
-
-def _fake_setuptools():
- log.warn('Scanning installed packages')
- try:
- import pkg_resources
- except ImportError:
- # we're cool
- log.warn('Setuptools or Distribute does not seem to be installed.')
- return
- ws = pkg_resources.working_set
- try:
- setuptools_dist = ws.find(
- pkg_resources.Requirement.parse('setuptools', replacement=False)
- )
- except TypeError:
- # old distribute API
- setuptools_dist = ws.find(
- pkg_resources.Requirement.parse('setuptools')
- )
-
- if setuptools_dist is None:
- log.warn('No setuptools distribution found')
- return
- # detecting if it was already faked
- setuptools_location = setuptools_dist.location
- log.warn('Setuptools installation detected at %s', setuptools_location)
-
- # if --root or --preix was provided, and if
- # setuptools is not located in them, we don't patch it
- if not _under_prefix(setuptools_location):
- log.warn('Not patching, --root or --prefix is installing Distribute'
- ' in another location')
- return
-
- # let's see if its an egg
- if not setuptools_location.endswith('.egg'):
- log.warn('Non-egg installation')
- res = _remove_flat_installation(setuptools_location)
- if not res:
- return
- else:
- log.warn('Egg installation')
- pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
- if (os.path.exists(pkg_info) and
- _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
- log.warn('Already patched.')
- return
- log.warn('Patching...')
- # let's create a fake egg replacing setuptools one
- res = _patch_egg_dir(setuptools_location)
- if not res:
- return
- log.warn('Patching complete.')
- _relaunch()
-
-
-def _relaunch():
- log.warn('Relaunching...')
- # we have to relaunch the process
- # pip marker to avoid a relaunch bug
- _cmd1 = ['-c', 'install', '--single-version-externally-managed']
- _cmd2 = ['-c', 'install', '--record']
- if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
- sys.argv[0] = 'setup.py'
- args = [sys.executable] + sys.argv
- sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
- """Extract all members from the archive to the current working
- directory and set owner, modification time and permissions on
- directories afterwards. `path' specifies a different directory
- to extract to. `members' is optional and must be a subset of the
- list returned by getmembers().
- """
- import copy
- import operator
- from tarfile import ExtractError
- directories = []
-
- if members is None:
- members = self
-
- for tarinfo in members:
- if tarinfo.isdir():
- # Extract directories with a safe mode.
- directories.append(tarinfo)
- tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
- self.extract(tarinfo, path)
-
- # Reverse sort directories.
- if sys.version_info < (2, 4):
- def sorter(dir1, dir2):
- return cmp(dir1.name, dir2.name)
- directories.sort(sorter)
- directories.reverse()
- else:
- directories.sort(key=operator.attrgetter('name'), reverse=True)
-
- # Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- dirpath = os.path.join(path, tarinfo.name)
- try:
- self.chown(tarinfo, dirpath)
- self.utime(tarinfo, dirpath)
- self.chmod(tarinfo, dirpath)
- except ExtractError:
- e = sys.exc_info()[1]
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
- """
- Build the arguments to 'python setup.py install' on the distribute package
- """
- install_args = []
- if options.user_install:
- if sys.version_info < (2, 6):
- log.warn("--user requires Python 2.6 or later")
- raise SystemExit(1)
- install_args.append('--user')
- return install_args
-
-def _parse_args():
- """
- Parse the command line for options
- """
- parser = optparse.OptionParser()
- parser.add_option(
- '--user', dest='user_install', action='store_true', default=False,
- help='install in user site package (requires Python 2.6 or later)')
- parser.add_option(
- '--download-base', dest='download_base', metavar="URL",
- default=DEFAULT_URL,
- help='alternative URL from where to download the distribute package')
- options, args = parser.parse_args()
- # positional arguments are ignored
- return options
-
-def main(version=DEFAULT_VERSION):
- """Install or upgrade setuptools and EasyInstall"""
- options = _parse_args()
- tarball = download_setuptools(download_base=options.download_base)
- return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/legacy/gcimagebundle/gcimagebundle b/legacy/gcimagebundle/gcimagebundle
deleted file mode 100755
index 3ab7ec1..0000000
--- a/legacy/gcimagebundle/gcimagebundle
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Wrapper for gcimagebundle main."""
-
-from gcimagebundlelib import imagebundle
-
-
-def main():
- imagebundle.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/__init__.py b/legacy/gcimagebundle/gcimagebundlelib/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/__init__.py
+++ /dev/null
diff --git a/legacy/gcimagebundle/gcimagebundlelib/block_disk.py b/legacy/gcimagebundle/gcimagebundlelib/block_disk.py
deleted file mode 100644
index a860b89..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/block_disk.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module to create raw disk images.
-
-Stores a copy of directories/files in a file mounted as a partitioned blocked
-device.
-"""
-
-
-
-import hashlib
-import logging
-import os
-import re
-import tempfile
-
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib import fs_copy
-from gcimagebundlelib import utils
-
-
-class RawDiskError(Exception):
- """Error occured during raw disk creation."""
-
-
-class InvalidRawDiskError(Exception):
- """Error when verification fails before copying."""
-
-
-class FsRawDisk(fs_copy.FsCopy):
- """Creates a raw disk copy of OS image and bundles it into gzipped tar."""
-
- def __init__(self, fs_size, fs_type):
- """Constructor for FsRawDisk class.
-
- Args:
- fs_size: Size of the raw disk.
- """
- super(FsRawDisk, self).__init__()
- self._fs_size = fs_size
- self._fs_type = fs_type
-
- def _ResizeFile(self, file_path, file_size):
- logging.debug('Resizing %s to %s', file_path, file_size)
- with open(file_path, 'a') as disk_file:
- disk_file.truncate(file_size)
-
- def _InitializeDiskFileFromDevice(self, file_path):
- """Initializes disk file from the device specified in self._disk.
-
- It preserves whatever may be there on the device prior to the start of the
- first partition.
-
- At the moment this method supports devices with a single partition only.
-
- Args:
- file_path: The path where the disk file should be created.
-
- Returns:
- A tuple with partition_start, uuid. partition_start is the location
- where the first partition on the disk starts and uuid is the filesystem
- UUID to use for the first partition.
-
- Raises:
- RawDiskError: If there are more than one partition on the disk device.
- """
- # Find the disk size
- disk_size = utils.GetDiskSize(self._disk)
- logging.debug('Size of disk is %s', disk_size)
- # Make the disk file big enough to hold the disk
- self._ResizeFile(file_path, disk_size)
- # Find the location where the first partition starts
- partition_start = utils.GetPartitionStart(self._disk, 1)
- logging.debug('First partition starts at %s', partition_start)
- # Copy all the bytes as is from the start of the disk to the start of
- # first partition
- utils.CopyBytes(self._disk, file_path, partition_start)
- # Verify there is only 1 partition on the disk
- with utils.LoadDiskImage(file_path) as devices:
- # For now we only support disks with a single partition.
- if len(devices) == 0:
- raise RawDiskError(
- 'Device %s should be a disk not a partition.' % self._disk)
- elif len(devices) != 1:
- raise RawDiskError(
- 'Device %s has more than 1 partition. Only devices '
- 'with a single partition are supported.' % self._disk)
- # Remove the first partition from the file we are creating. We will
- # recreate a partition that will fit inside _fs_size later.
- utils.RemovePartition(file_path, 1)
- # Resize the disk.raw file down to self._fs_size
- # We do this after removing the first partition to ensure that an
- # existing partition doesn't fall outside the boundary of the disk device.
- self._ResizeFile(file_path, self._fs_size)
- # Get UUID of the first partition on the disk
- # TODO(user): This is very hacky and relies on the disk path being
- # similar to /dev/sda etc which is bad. Need to fix it.
- uuid = utils.GetUUID(self._disk + '1')
- return partition_start, uuid
-
- def Bundleup(self):
- """Creates a raw disk copy of OS image and bundles it into gzipped tar.
-
- Returns:
- A size of a generated raw disk and the SHA1 digest of the the tar archive.
-
- Raises:
- RawDiskError: If number of partitions in a created image doesn't match
- expected count.
- """
-
- # Create sparse file with specified size
- disk_file_path = os.path.join(self._scratch_dir, 'disk.raw')
- with open(disk_file_path, 'wb') as _:
- pass
- self._excludes.append(exclude_spec.ExcludeSpec(disk_file_path))
-
- logging.info('Initializing disk file')
- partition_start = None
- uuid = None
- if self._disk:
- # If a disk device has been provided then preserve whatever is there on
- # the disk before the first partition in case there is an MBR present.
- partition_start, uuid = self._InitializeDiskFileFromDevice(disk_file_path)
- else:
- # User didn't specify a disk device. Initialize a device with a simple
- # partition table.
- self._ResizeFile(disk_file_path, self._fs_size)
- # User didn't specify a disk to copy. Create a new partition table
- utils.MakePartitionTable(disk_file_path)
- # Pass 1MB as start to avoid 'Warning: The resulting partition is not
- # properly aligned for best performance.' from parted.
- partition_start = 1024 * 1024
-
- # Create a new partition starting at partition_start of size
- # self._fs_size - partition_start
- utils.MakePartition(disk_file_path, 'primary', 'ext2', partition_start,
- self._fs_size - partition_start)
- with utils.LoadDiskImage(disk_file_path) as devices:
- # For now we only support disks with a single partition.
- if len(devices) != 1:
- raise RawDiskError(devices)
- # List contents of /dev/mapper to help with debugging. Contents will
- # be listed in debug log only
- utils.RunCommand(['ls', '/dev/mapper'])
- logging.info('Making filesystem')
- uuid = utils.MakeFileSystem(devices[0], self._fs_type, uuid)
- with utils.LoadDiskImage(disk_file_path) as devices:
- if uuid is None:
- raise Exception('Could not get uuid from MakeFileSystem')
- mount_point = tempfile.mkdtemp(dir=self._scratch_dir)
- with utils.MountFileSystem(devices[0], mount_point, self._fs_type):
- logging.info('Copying contents')
- self._CopySourceFiles(mount_point)
- self._CopyPlatformSpecialFiles(mount_point)
- self._ProcessOverwriteList(mount_point)
- self._CleanupNetwork(mount_point)
- self._UpdateFstab(mount_point, uuid)
-
- tar_entries = []
-
- manifest_file_path = os.path.join(self._scratch_dir, 'manifest.json')
- manifest_created = self._manifest.CreateIfNeeded(manifest_file_path)
- if manifest_created:
- tar_entries.append(manifest_file_path)
-
- tar_entries.append(disk_file_path)
- logging.info('Creating tar.gz archive')
- utils.TarAndGzipFile(tar_entries,
- self._output_tarfile)
- for tar_entry in tar_entries:
- os.remove(tar_entry)
-
- # TODO(user): It would be better to compute tar.gz file hash during
- # archiving.
- h = hashlib.sha1()
- with open(self._output_tarfile, 'rb') as tar_file:
- for chunk in iter(lambda: tar_file.read(8192), ''):
- h.update(chunk)
- return (self._fs_size, h.hexdigest())
-
- def _CopySourceFiles(self, mount_point):
- """Copies all source files/directories to a mounted raw disk.
-
- There are several cases which must be handled separately:
- 1. src=dir1 and dest is empty. In this case we simply copy the content of
- dir1 to mount_point.
- 2. src=dir1 and dest=dir2. In this case dir1 is copied to mount_point
- under a new name dir2, so its content would be copied under
- mount_point/dir2.
- 3. src=file1/dir1 and dest=file2/dir2 and is_recursive=False. file1/dir1
- is copied to mount_point/file2 or mount_point/dir2.
-
- Args:
- mount_point: A path to a mounted raw disk.
- """
- for (src, dest, is_recursive) in self._srcs:
- # Generate a list of files/directories excluded from copying to raw disk.
- # rsync expects them to be relative to src directory so we need to
- # regenerate this list for every src separately.
- with tempfile.NamedTemporaryFile(dir=self._scratch_dir) as rsync_file:
- for spec in self._excludes:
- rsync_file.write(spec.GetRsyncSpec(src))
-
- # make sure that rsync utility sees all the content of rsync_file which
- # otherwise can be buffered.
- rsync_file.flush()
- if is_recursive:
- # if a directory ends with / rsync copies the content of a
- # directory, otherwise it also copies the directory itself.
- src = src.rstrip('/')
- if not dest:
- src += '/'
- utils.Rsync(src, mount_point, rsync_file.name,
- self._ignore_hard_links, recursive=True, xattrs=True)
- if dest:
- os.rename(os.path.join(mount_point, os.path.basename(src)),
- os.path.join(mount_point, dest))
- else:
- utils.Rsync(src, os.path.join(mount_point, dest), rsync_file.name,
- self._ignore_hard_links, recursive=False, xattrs=True)
-
- def _CopyPlatformSpecialFiles(self, mount_point):
- """Copies platform special files to a mounted raw disk.
-
- Args:
- mount_point: A path to a mounted raw disk.
- """
- if self._platform:
- special_files = self._platform.GetPlatformSpecialFiles(self._scratch_dir)
- for (src, dest) in special_files:
- # Ensure we don't use extended attributes here, so that copying /selinux
- # on Linux doesn't try and fail to preserve the SELinux context. That
- # doesn't work and causes rsync to return a nonzero status code.
- utils.Rsync(src, os.path.join(mount_point, dest), None,
- self._ignore_hard_links, recursive=False, xattrs=False)
-
- def _ProcessOverwriteList(self, mount_point):
- """Overwrites a set of files/directories requested by platform.
-
- Args:
- mount_point: A path to a mounted raw disk.
- """
- for file_name in self._overwrite_list:
- file_path = os.path.join(mount_point, file_name)
- if os.path.exists(file_path):
- if os.path.isdir(file_path):
- # TODO(user): platform.Overwrite is expected to overwrite the
- # directory in place from what I can tell. In case of a file it will
- # create a new file which must be copied to mounted raw disk. So there
- # some inconsistency which would need to be addresses if and when we
- # encounter a platform which would want to overwrite a directory.
- self._platform.Overwrite(file_path, file_name, self._scratch_dir)
- logging.info('rawdisk: modifying directory %s', file_path)
- else:
- new_file = self._platform.Overwrite(file_path, file_name,
- self._scratch_dir)
- logging.info('rawdisk: modifying %s from %s', file_path, new_file)
- utils.Rsync(new_file, file_path, None, self._ignore_hard_links,
- recursive=False, xattrs=True)
-
-
- def _CleanupNetwork(self, mount_point):
- """Remove any record of our current MAC address."""
- net_rules_path = os.path.join(
- mount_point,
- 'lib/udev/rules.d/75-persistent-net-generator.rules')
- if os.path.exists(net_rules_path):
- os.remove(net_rules_path)
-
- def _UpdateFstab(self, mount_point, uuid):
- """Update /etc/fstab with the new root fs UUID."""
- fstab_path = os.path.join(mount_point, 'etc/fstab')
- if not os.path.exists(fstab_path):
- logging.warning('etc/fstab does not exist. Not updating fstab uuid')
- return
-
- f = open(fstab_path, 'r')
- lines = f.readlines()
- f.close()
-
- def UpdateUUID(line):
- """Replace the UUID on the entry for /."""
- g = re.match(r'UUID=\S+\s+/\s+(.*)', line)
- if not g:
- return line
- return 'UUID=%s / %s\n' % (uuid, g.group(1))
-
- logging.debug('Original /etc/fstab contents:\n%s', lines)
- updated_lines = map(UpdateUUID, lines)
- if lines == updated_lines:
- logging.debug('No changes required to /etc/fstab')
- return
- logging.debug('Updated /etc/fstab contents:\n%s', updated_lines)
- f = open(fstab_path, 'w')
- f.write(''.join(updated_lines))
- f.close()
-
-
-class RootFsRaw(FsRawDisk):
- """Block disk copy of the root file system.
-
- Takes care of additional checks for a root file system.
- """
-
- def __init__(
- self, fs_size, fs_type, skip_disk_space_check, statvfs = os.statvfs):
- # statvfs parameter is for unit test to mock out os.statvfs call.
- super(RootFsRaw, self).__init__(fs_size, fs_type)
- self._skip_disk_space_check = skip_disk_space_check
- self._statvfs = statvfs
-
- def _Verify(self):
- super(RootFsRaw, self)._Verify()
- # exactly one file system to bundle up
- if len(self._srcs) != 1:
- raise InvalidRawDiskError('Root filesystems must have exactly one src.')
- # check that destination field is empty.
- if self._srcs[0][1]:
- raise InvalidRawDiskError('Root filesystems must be copied as /')
- if (not self._skip_disk_space_check and
- self._srcs[0][0] == '/'):
- self._VerifyDiskSpace()
-
- def _VerifyDiskSpace(self):
- """Verify that there is enough free disk space to generate the image file"""
- # We use a very quick and simplistic check,
- # DiskSpaceNeeded = disk.raw + image.tar.gz + LogFile
- # disk.raw = PartitionTable + AllFilesCopied
- # AllFilesCopied = RootDiskSize - RootDiskFreeSize - ExcludedFiles
- # We ignore LogFile, PartitionTable, and ExcludedFiles.
- # Some empirical experience showed that the compression ratio of the
- # tar.gz file is about 1/3. To be conservative, we assume image.tar.gz is
- # about 40% of disk.raw file.
- # As a result, DiskSpaceNeeded=1.4*(RootDiskSize - RootDiskFreeSize)
- # TODO(user): Make this check more accurate because ignoring ExcludedFiles
- # can result in significant overestimation of disk
- # space needed if the user has large disk space used in /tmp, for example.
- root_fs = self._statvfs(self._srcs[0][0])
- disk_space_needed = long(1.4 * root_fs.f_bsize * (root_fs.f_blocks -
- root_fs.f_bfree))
- logging.info(("Root disk on %s: f_bsize=%d f_blocks=%d f_bfree=%d. "
- "Estimated space needed is %d (may be overestimated)."),
- self._srcs[0][0],
- root_fs.f_bsize,
- root_fs.f_blocks,
- root_fs.f_bfree,
- disk_space_needed)
-
- # self._scratch_dir is where we will put the disk.raw and *.tar.gz file.
- scratch_fs = self._statvfs(self._scratch_dir)
- free_space = scratch_fs.f_bsize * scratch_fs.f_bfree
- logging.info("Free disk space for %s is %d bytes.",
- self._scratch_dir,
- free_space)
-
- if disk_space_needed > free_space:
- errorMessage = ("The operation may require up to %d bytes of disk space. "
- "However, the free disk space for %s is %d bytes. Please consider "
- "freeing more disk space. Note that the disk space required may "
- "be overestimated because it does not exclude temporary files that "
- "will not be copied. You may use --skip_disk_space_check to disable "
- "this check.") % (disk_space_needed, self._scratch_dir, free_space)
- raise InvalidRawDiskError(errorMessage)
- if disk_space_needed > self._fs_size:
- errorMessage = ("The root disk files to be copied may require up to %d "
- "bytes. However, the limit on the image disk file is %d bytes. "
- "Please consider deleting unused files from root disk, "
- "or increasing the image disk file limit with --fssize option. "
- "Note that the disk space required may "
- "be overestimated because it does not exclude temporary files that "
- "will not be copied. You may use --skip_disk_space_check to disable "
- "this check.") % (disk_space_needed, self._fs_size)
- raise InvalidRawDiskError(errorMessage)
-
-
-
diff --git a/legacy/gcimagebundle/gcimagebundlelib/centos.py b/legacy/gcimagebundle/gcimagebundlelib/centos.py
deleted file mode 100644
index 1a082de..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/centos.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Centos specific platform info."""
-
-
-
-import os
-import platform
-import re
-
-from gcimagebundlelib import linux
-
-
-class Centos(linux.LinuxPlatform):
- """Centos specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/redhat-release'
- if os.path.exists(release_file):
- (_, _, flavor, _) = Centos.ParseRedhatRelease(release_file)
- if flavor and flavor.lower() == 'centos':
- return True
- return False
-
- @staticmethod
- def ParseRedhatRelease(release_file='/etc/redhat-release'):
- """Parses the /etc/redhat-release file."""
- f = open(release_file)
- lines = f.readlines()
- f.close()
- if not lines:
- return (None, None, None, None)
- line0 = lines[0]
- # Matches both CentOS 6 and CentOS 7 formats.
- # CentOS 6: CentOS release 6.5 (Final)
- # CentOS 7: CentOS Linux release 7.0.1406 (Core)
- g = re.match(r'(\S+)( Linux)? release (\d+(\.\d+)+) \(([^)]*)\)', line0)
- if not g:
- return (None, None, None, None)
- (osname, version, label) = (g.group(1), g.group(3), g.group(5))
- return (osname, label, osname, version)
-
- def __init__(self):
- super(Centos, self).__init__()
- (self.distribution_codename, _, self.distribution,
- self.distribution_version) = Centos.ParseRedhatRelease()
-
- def GetPreferredFilesystemType(self):
- (_,version,_) = platform.linux_distribution()
- if version.startswith('7'):
- return 'xfs'
- return 'ext4'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/debian.py b/legacy/gcimagebundle/gcimagebundlelib/debian.py
deleted file mode 100644
index 957e3a7..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/debian.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Debian Linux specific platform info."""
-
-
-
-import platform
-
-from gcimagebundlelib import linux
-
-
-class Debian(linux.LinuxPlatform):
- """Debian Linux specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- (distribution, _, _) = platform.linux_distribution()
- if distribution and distribution.lower() == 'debian':
- return True
- return False
-
- def __init__(self):
- super(Debian, self).__init__()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py b/legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py
deleted file mode 100644
index b5bc237..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/exclude_spec.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Exclude file specification."""
-
-
-
-import logging
-import os
-
-
-class ExcludeSpec(object):
- """Specifies how exclusion of a path should be handled."""
-
- def __init__(self, path, preserve_file=False, preserve_dir=False,
- preserve_subdir=False):
- self.path = path
- self.preserve_dir = preserve_dir
- self.preserve_file = False
- self.preserve_subdir = False
- # Preserve files and subdirs only if dir is preserved.
- if preserve_file and preserve_dir:
- self.preserve_file = True
- if preserve_subdir and preserve_dir:
- self.preserve_subdir = True
-
- def ShouldExclude(self, filename):
- prefix = os.path.commonprefix([filename, self.path])
- if prefix == self.path:
- if ((self.preserve_dir and filename == self.path) or
- (self.preserve_subdir and os.path.isdir(filename)) or
- (self.preserve_file and os.path.isfile(filename))):
- logging.warning('preserving %s', filename)
- return False
- return True
- return False
-
- def GetSpec(self):
- return '(%s, %d:%d:%d)' % (self.path, self.preserve_file, self.preserve_dir,
- self.preserve_subdir)
-
- def GetRsyncSpec(self, src):
- """Returns exclude spec in a format required by rsync.
-
- Args:
- src: source directory path passed to rsync. rsync expects exclude-spec to
- be relative to src directory.
-
- Returns:
- A string of exclude filters in rsync exclude-from file format.
- """
- spec = ''
- prefix = os.path.commonprefix([src, self.path])
- if prefix == src:
- relative_path = os.path.join('/', self.path[len(prefix):])
- if self.preserve_dir:
- spec += '+ %s\n' % relative_path
- if self.preserve_file or self.preserve_subdir:
- if os.path.isdir(self.path):
- for f in os.listdir(self.path):
- file_path = os.path.join(self.path, f)
- relative_file_path = os.path.join(relative_path, f)
- if self.preserve_file and os.path.isfile(file_path):
- spec += '+ %s\n' % relative_file_path
- if self.preserve_subdir and os.path.isdir(file_path):
- spec += '+ %s\n' % relative_file_path
- else:
- spec += '- %s\n' % relative_path
- spec += '- %s\n' % os.path.join(relative_path, '**')
- return spec
diff --git a/legacy/gcimagebundle/gcimagebundlelib/fedora.py b/legacy/gcimagebundle/gcimagebundlelib/fedora.py
deleted file mode 100644
index 21d098b..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/fedora.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Fedora specific platform info."""
-
-
-
-import os
-import re
-
-from gcimagebundlelib import linux
-
-
-class Fedora(linux.LinuxPlatform):
- """Fedora specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/redhat-release'
- if os.path.exists(release_file):
- (_, _, flavor, _) = Fedora.ParseRedhatRelease(release_file)
- if flavor and flavor.lower() == 'fedora':
- return True
- return False
-
- @staticmethod
- def ParseRedhatRelease(release_file='/etc/redhat-release'):
- """Parses the /etc/redhat-release file."""
- f = open(release_file)
- lines = f.readlines()
- f.close()
- if not lines:
- return (None, None, None, None)
- line0 = lines[0]
- g = re.match(r'(\S+) release (\d+) \(([^)]*)\)', line0)
- if not g:
- return (None, None, None, None)
- (osname, version, label) = (g.group(1), g.group(2), g.group(3))
- return (osname, label, osname, version)
-
- def __init__(self):
- super(Fedora, self).__init__()
- (self.distribution_codename, _, self.distribution,
- self.distribution_version) = Fedora.ParseRedhatRelease()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/fs_copy.py b/legacy/gcimagebundle/gcimagebundlelib/fs_copy.py
deleted file mode 100644
index e9adc91..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/fs_copy.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Creates a copy of specified directories\files."""
-
-
-
-import logging
-import os
-import re
-
-from gcimagebundlelib import manifest
-from gcimagebundlelib import utils
-
-
-class FsCopyError(Exception):
- """Error occured in fs copy operation."""
-
-
-class InvalidFsCopyError(Exception):
- """Error when verification fails before fs copying."""
-
-
-class FsCopy(object):
- """Specifies which files/directories must be copied."""
-
- def __init__(self):
- # Populate the required parameters with None so we can verify.
- self._output_tarfile = None
- self._srcs = []
- self._excludes = []
- self._key = None
- self._recursive = True
- self._fs_size = 0
- self._ignore_hard_links = False
- self._platform = None
- self._overwrite_list = []
- self._scratch_dir = '/tmp'
- self._disk = None
- self._manifest = manifest.ImageManifest(is_gce_instance=utils.IsRunningOnGCE())
-
- def SetTarfile(self, tar_file):
- """Sets tar file which will contain file system copy.
-
- Args:
- tar_file: path to a tar file.
- """
- self._output_tarfile = tar_file
-
- def AddDisk(self, disk):
- """Adds the disk which should be bundled.
-
- Args:
- disk: The block disk that needs to be bundled.
- """
- self._disk = disk
-
- def AddSource(self, src, arcname='', recursive=True):
- """Adds a source to be copied to the tar file.
-
- Args:
- src: path to directory/file to be copied.
- arcname: name of src in the tar archive. If arcname is empty, then instead
- of copying src itself only its content is copied.
- recursive: specifies if src directory should be copied recursively.
-
- Raises:
- ValueError: If src path doesn't exist.
- """
- if not os.path.exists(src):
- raise ValueError('invalid path')
- # Note that there is a fundamental asymmetry here as
- # abspath('/') => '/' while abspath('/usr/') => '/usr'.
- # This creates some subtleties elsewhere in the code.
- self._srcs.append((os.path.abspath(src), arcname, recursive))
-
- def AppendExcludes(self, excludes):
- """Adds a file/directory to be excluded from file copy.
-
- Args:
- excludes: A list of ExcludeSpec objects.
- """
- self._excludes.extend(excludes)
-
- def SetKey(self, key):
- """Sets a key to use to sign the archive digest.
-
- Args:
- key: key to use to sign the archive digest.
- """
- # The key is ignored for now.
- # TODO(user): sign the digest with the key
- self._key = key
-
- def SetPlatform(self, platform):
- """Sets the OS platform which is used to create an image.
-
- Args:
- platform: OS platform specific settings.
- """
- self._platform = platform
- logging.warning('overwrite list = %s',
- ' '.join(platform.GetOverwriteList()))
- self._overwrite_list = [re.sub('^/', '', x)
- for x in platform.GetOverwriteList()]
-
- def _SetManifest(self, image_manifest):
- """For test only, allows to set a test manifest object."""
- self._manifest = image_manifest
-
- def SetScratchDirectory(self, directory):
- """Sets a directory used for storing intermediate results.
-
- Args:
- directory: scratch directory path.
- """
- self._scratch_dir = directory
-
- def IgnoreHardLinks(self):
- """Requests that hard links should not be copied as hard links."""
-
- # TODO(user): I don't see a reason for this option to exist. Currently
- # there is a difference in how this option is interpreted between FsTarball
- # and FsRawDisk. FsTarball only copies one hard link to an inode and ignores
- # the rest of them. FsRawDisk copies the content of a file that hard link is
- # pointing to instead of recreating a hard link. Either option seems useless
- # for creating a copy of a file system.
- self._ignore_hard_links = True
-
- def Verify(self):
- """Verify if we have all the components to build a tar."""
- self._Verify()
-
- def Bundleup(self):
- """Creates the tar image based on set parameters.
-
- Returns:
- the SHA1 digest of the the tar archive.
- """
- return (0, None)
-
- def _Verify(self):
- """Verifies the tar attributes. Raises InvalidTarballError.
-
- Raises:
- InvalidFsCopyError: If not all required parameters are set.
- FsCopyError: If source file does not exist.
- """
- if not self._output_tarfile or not self._srcs or not self._key:
- raise InvalidFsCopyError('Incomplete copy spec')
- for (src, _, _) in self._srcs:
- if not os.path.exists(src):
- raise FsCopyError('%s does not exists' % src)
-
- def _ShouldExclude(self, filename):
- """"Checks if a file/directory are excluded from a copy.
-
- Args:
- filename: a file/directory path.
-
- Returns:
- True if a file/directory shouldn't be copied, False otherwise.
- """
- for spec in self._excludes:
- if spec.ShouldExclude(filename):
- logging.info('tarfile: Excluded %s', filename)
- return True
- return False
diff --git a/legacy/gcimagebundle/gcimagebundlelib/gcel.py b/legacy/gcimagebundle/gcimagebundlelib/gcel.py
deleted file mode 100644
index 2622cf7..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/gcel.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""GCE Linux specific platform info."""
-
-
-
-import csv
-import os
-
-from gcimagebundlelib import linux
-
-
-class Gcel(linux.LinuxPlatform):
- """GCE Linux specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/lsb-release'
- if os.path.exists(release_file):
- (flavor, _, _, _) = Gcel.ParseLsbRelease(release_file)
- if flavor and flavor.lower() == 'gcel':
- return True
- return False
-
- @staticmethod
- def ParseLsbRelease(release_file='/etc/lsb-release'):
- """Parses the /etc/lsb-releases file.
-
- Returns:
- A 4-tuple containing id, release, codename, and description
- """
- release_info = {}
- for line in csv.reader(open(release_file), delimiter='='):
- if len(line) > 1:
- release_info[line[0]] = line[1]
- return (release_info.get('DISTRIB_ID', None),
- release_info.get('DISTRIB_RELEASE', None),
- release_info.get('DISTRIB_CODENAME', None),
- release_info.get('DISTRIB_DESCRIPTION', None))
-
- def __init__(self):
- super(Gcel, self).__init__()
- (self.distribution, self.distribution_version,
- self.distribution_codename, _) = Gcel.ParseLsbRelease()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/imagebundle.py b/legacy/gcimagebundle/gcimagebundlelib/imagebundle.py
deleted file mode 100755
index f275c3c..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/imagebundle.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Tool to bundle root filesystem to a tarball.
-
-Creates a tar bundle and a Manifest, which can be uploaded to image store.
-"""
-
-
-
-import logging
-from optparse import OptionParser
-import os
-import shutil
-import subprocess
-import tempfile
-import time
-
-from gcimagebundlelib import block_disk
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib import platform_factory
-from gcimagebundlelib import utils
-
-def SetupArgsParser():
- """Sets up the command line flags."""
- parser = OptionParser()
- parser.add_option('-d', '--disk', dest='disk',
- default='/dev/sda',
- help='Disk to bundle.')
- parser.add_option('-r', '--root', dest='root_directory',
- default='/', metavar='ROOT',
- help='Root of the file system to bundle.'
- ' Recursively bundles all sub directories.')
- parser.add_option('-e', '--excludes', dest='excludes',
- help='Comma separated list of sub directories to exclude.'
- ' The defaults are platform specific.')
- parser.add_option('-o', '--output_directory', dest='output_directory',
- default='/tmp/', metavar='DIR',
- help='Output directory for image.')
- parser.add_option('--output_file_name', dest='output_file_name',
- default=None, metavar='FILENAME',
- help=('Output filename for the image. Default is a digest'
- ' of the image bytes.'))
- parser.add_option('--include_mounts', dest='include_mounts',
- help='Don\'t ignore mounted filesystems under ROOT.',
- action='store_true', default=False)
- parser.add_option('-v', '--version',
- action='store_true', dest='display_version', default=False,
- help='Print the tool version.')
- parser.add_option('--loglevel', dest='log_level',
- help='Debug logging level.', default='INFO',
- choices=['DEBUG', 'INFO', 'WARNING', 'ERROR' 'CRITICAL'])
- parser.add_option('--log_file', dest='log_file',
- help='Output file for log messages.')
- parser.add_option('-k', '--key', dest='key', default='nebula',
- help='Public key used for signing the image.')
- parser.add_option('--nocleanup', dest='cleanup',
- action='store_false', default=True,
- help=' Do not clean up temporary and log files.')
- #TODO(user): Get dehumanize.
- parser.add_option('--fssize', dest='fs_size', default=10*1024*1024*1024,
- type='int', help='File system size in bytes')
- parser.add_option('-b', '--bucket', dest='bucket',
- help='Destination storage bucket')
- parser.add_option('-f', '--filesystem', dest='file_system',
- default=None,
- help='File system type for the image.')
- parser.add_option('--skip_disk_space_check', dest='skip_disk_space_check',
- default=False, action='store_true',
- help='Skip the disk space requirement check.')
-
- return parser
-
-
-def VerifyArgs(parser, options):
- """Verifies that commandline flags are consistent."""
- if not options.output_directory:
- parser.error('output bundle directory must be specified.')
- if not os.path.exists(options.output_directory):
- parser.error('output bundle directory does not exist.')
-
- # TODO(user): add more verification as needed
-
-def EnsureSuperUser():
- """Ensures that current user has super user privileges."""
- if os.getuid() != 0:
- logging.warning('Tool must be run as root.')
- exit(-1)
-
-
-def GetLogLevel(options):
- """Log Level string to logging.LogLevel mapping."""
- level = {
- 'DEBUG': logging.DEBUG,
- 'INFO': logging.INFO,
- 'WARNING': logging.WARNING,
- 'ERROR': logging.ERROR,
- 'CRITICAL': logging.CRITICAL
- }
- if options.log_level in level:
- return level[options.log_level]
- print 'Invalid logging level. defaulting to INFO.'
- return logging.INFO
-
-
-def SetupLogging(options, log_dir='/tmp'):
- """Set up logging.
-
- All messages above INFO level are also logged to console.
-
- Args:
- options: collection of command line options.
- log_dir: directory used to generate log files.
- """
- if options.log_file:
- logfile = options.log_file
- else:
- logfile = tempfile.mktemp(dir=log_dir, prefix='bundle_log_')
- print 'Starting logging in %s' % logfile
- logging.basicConfig(filename=logfile,
- level=GetLogLevel(options),
- format='%(asctime)s %(levelname)s:%(name)s:%(message)s')
- # Use GMT timestamp in logging.
- logging.Formatter.converter=time.gmtime
- console = logging.StreamHandler()
- console.setLevel(GetLogLevel(options))
- logging.getLogger().addHandler(console)
-
-
-def PrintVersionInfo():
- #TODO: Should read from the VERSION file instead.
- print 'version 1.3.1'
-
-
-def GetTargetFilesystem(options, guest_platform):
- if options.file_system:
- return options.file_system
- else:
- return guest_platform.GetPreferredFilesystemType()
-
-
-def main():
- parser = SetupArgsParser()
- (options, _) = parser.parse_args()
- if options.display_version:
- PrintVersionInfo()
- return 0
- EnsureSuperUser()
- VerifyArgs(parser, options)
-
- scratch_dir = tempfile.mkdtemp(dir=options.output_directory)
- SetupLogging(options, scratch_dir)
- logging.warn('============================================================\n'
- 'Warning: gcimagebundle is deprecated. See\n'
- 'https://cloud.google.com/compute/docs/creating-custom-image'
- '#export_an_image_to_google_cloud_storage\n'
- 'for updated instructions.\n'
- '============================================================')
- try:
- guest_platform = platform_factory.PlatformFactory(
- options.root_directory).GetPlatform()
- except platform_factory.UnknownPlatformException:
- logging.critical('Platform is not supported.'
- ' Platform rules can be added to platform_factory.py.')
- return -1
-
- temp_file_name = tempfile.mktemp(dir=scratch_dir, suffix='.tar.gz')
-
- file_system = GetTargetFilesystem(options, guest_platform)
- logging.info('File System: %s', file_system)
- logging.info('Disk Size: %s bytes', options.fs_size)
- bundle = block_disk.RootFsRaw(
- options.fs_size, file_system, options.skip_disk_space_check)
- bundle.SetTarfile(temp_file_name)
- if options.disk:
- readlink_command = ['readlink', '-f', options.disk]
- final_path = utils.RunCommand(readlink_command).strip()
- logging.info('Resolved %s to %s', options.disk, final_path)
- bundle.AddDisk(final_path)
- # TODO(user): Find the location where the first partition of the disk
- # is mounted and add it as the source instead of relying on the source
- # param flag
- bundle.AddSource(options.root_directory)
- bundle.SetKey(options.key)
- bundle.SetScratchDirectory(scratch_dir)
-
- # Merge platform specific exclude list, mounts points
- # and user specified excludes
- excludes = guest_platform.GetExcludeList()
- if options.excludes:
- excludes.extend([exclude_spec.ExcludeSpec(x) for x in
- options.excludes.split(',')])
- logging.info('exclude list: %s', ' '.join([x.GetSpec() for x in excludes]))
- bundle.AppendExcludes(excludes)
- if not options.include_mounts:
- mount_points = utils.GetMounts(options.root_directory)
- logging.info('ignoring mounts %s', ' '.join(mount_points))
- bundle.AppendExcludes([exclude_spec.ExcludeSpec(x, preserve_dir=True) for x
- in utils.GetMounts(options.root_directory)])
- bundle.SetPlatform(guest_platform)
-
- # Verify that bundle attributes are correct and create tar bundle.
- bundle.Verify()
- (fs_size, digest) = bundle.Bundleup()
- if not digest:
- logging.critical('Could not get digest for the bundle.'
- ' The bundle may not be created correctly')
- return -1
- if fs_size > options.fs_size:
- logging.critical('Size of tar %d exceeds the file system size %d.', fs_size,
- options.fs_size)
- return -1
-
- if options.output_file_name:
- output_file = os.path.join(
- options.output_directory, options.output_file_name)
- else:
- output_file = os.path.join(
- options.output_directory, '%s.image.tar.gz' % digest)
-
- os.rename(temp_file_name, output_file)
- logging.info('Created tar.gz file at %s' % output_file)
-
- if options.bucket:
- bucket = options.bucket
- if bucket.startswith('gs://'):
- output_bucket = '%s/%s' % (
- bucket, os.path.basename(output_file))
- else:
- output_bucket = 'gs://%s/%s' % (
- bucket, os.path.basename(output_file))
-
- # /usr/local/bin not in redhat root PATH by default
- if '/usr/local/bin' not in os.environ['PATH']:
- os.environ['PATH'] += ':/usr/local/bin'
-
- # TODO: Consider using boto library directly.
- cmd = ['gsutil', 'cp', output_file, output_bucket]
- retcode = subprocess.call(cmd)
- if retcode != 0:
- logging.critical('Failed to copy image to bucket. '
- 'gsutil returned %d. To retry, run the command: %s',
- retcode, ' '.join(cmd))
-
- return -1
- logging.info('Uploaded image to %s', output_bucket)
-
- # If we've uploaded, then we can remove the local file.
- os.remove(output_file)
-
- if options.cleanup:
- shutil.rmtree(scratch_dir)
diff --git a/legacy/gcimagebundle/gcimagebundlelib/linux.py b/legacy/gcimagebundle/gcimagebundlelib/linux.py
deleted file mode 100644
index ff8c1d4..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/linux.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Base class for Linux platform specific information."""
-
-
-
-import os
-import platform
-import stat
-
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib import os_platform
-
-
-class LinuxPlatform(os_platform.Platform):
- """Base class for all Linux flavors."""
- EXCLUDE_LIST = [
- exclude_spec.ExcludeSpec('/etc/ssh/.host_key_regenerated'),
- exclude_spec.ExcludeSpec('/dev', preserve_dir=True),
- exclude_spec.ExcludeSpec('/proc', preserve_dir=True),
- exclude_spec.ExcludeSpec('/run',
- preserve_dir=True, preserve_subdir=True),
- exclude_spec.ExcludeSpec('/selinux'),
- exclude_spec.ExcludeSpec('/tmp', preserve_dir=True),
- exclude_spec.ExcludeSpec('/sys', preserve_dir=True),
- exclude_spec.ExcludeSpec('/var/lib/google/per-instance',
- preserve_dir=True),
- exclude_spec.ExcludeSpec('/var/lock',
- preserve_dir=True, preserve_subdir=True),
- exclude_spec.ExcludeSpec('/var/log',
- preserve_dir=True, preserve_subdir=True),
- exclude_spec.ExcludeSpec('/var/run',
- preserve_dir=True, preserve_subdir=True)]
-
- def __init__(self):
- """Populate the uname -a information."""
- super(LinuxPlatform, self).__init__()
- (self.name, self.hostname, self.release, self.version, self.architecture,
- self.processor) = platform.uname()
- (self.distribution, self.distribution_version,
- self.distribution_codename) = platform.dist()
-
- def GetPlatformDetails(self):
- return ' '.join([self.name, self.hostname, self.release, self.version,
- self.architecture, self.processor, self.distribution,
- self.distribution_version, self.distribution_codename])
-
- def GetName(self):
- return self.GetOs()
-
- def GetProcessor(self):
- return platform.processor()
-
- def GetArchitecture(self):
- if self.architecture:
- return self.architecture
- return ''
-
- def GetOs(self):
- if self.distribution:
- if self.distribution_codename:
- return '%s (%s)' % (self.distribution, self.distribution_codename)
- else:
- return self.distribution
- if self.name:
- return self.name
- return 'Linux'
-
- def IsLinux(self):
- return True
-
- # Linux specific methods
- def GetKernelVersion(self):
- return self.release
-
- # distribution specific methods
- # if platforms module does not do a good job override these.
- def GetDistribution(self):
- return self.distribution
-
- def GetDistributionCodeName(self):
- return self.distribution_codename
-
- def GetDistributionVersion(self):
- return self.distribution_version
-
- def GetPlatformSpecialFiles(self, tmpdir='/tmp'):
- """Creates any platform specific special files."""
- retval = []
- console_dev = os.makedev(5, 1)
- os.mknod(tmpdir + 'console', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR, console_dev)
- retval.append((tmpdir + 'console', 'dev/console'))
- null_dev = os.makedev(1, 3)
- os.mknod(tmpdir + 'null', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH, null_dev)
- retval.append((tmpdir + 'null', 'dev/null'))
- tty_dev = os.makedev(5, 0)
- os.mknod(tmpdir + 'tty', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH, tty_dev)
- retval.append((tmpdir + 'tty', 'dev/tty'))
- zero_dev = os.makedev(1, 5)
- os.mknod(tmpdir + 'zero', stat.S_IFCHR |
- stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH, zero_dev)
- retval.append((tmpdir + 'zero', 'dev/zero'))
- # /selinux is deprecated in favor of /sys/fs/selinux, but preserve it on
- # those OSes where it's present.
- if os.path.isdir('/selinux'):
- os.mkdir(tmpdir + 'selinux', 0755)
- retval.append((tmpdir + 'selinux', 'selinux'))
- return retval
-
- def Overwrite(self, filename, arcname, tmpdir='/tmp'):
- """Overwrites specified file if needed for the Linux platform."""
- pass
-
- def GetPreferredFilesystemType(self):
- """Return the optimal filesystem supported for the platform."""
- return 'ext4'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/manifest.py b/legacy/gcimagebundle/gcimagebundlelib/manifest.py
deleted file mode 100755
index 2e83d9e..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/manifest.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Image manifest."""
-
-
-import json
-from gcimagebundlelib import utils
-
-
-class ImageManifest(object):
- """Retrieves metadata from the instance and stores it in manifest.json.
-
- The image manifest is a JSON file that is bundled along side the disk.
-
- Included Metadata
- - Licenses
- """
-
- def __init__(self, http=utils.Http(), is_gce_instance=True):
- self._http = http
- self._licenses = []
- self._is_gce_instance = is_gce_instance
-
- def CreateIfNeeded(self, file_path):
- """Creates the manifest file to the specified path if it's needed.
-
- Args:
- file_path: Location of where the manifest should be written to.
-
- Returns:
- True Manifest was written to file_path.
- False Manifest was not created.
- """
- if self._is_gce_instance:
- self._LoadLicenses()
- if self._IsManifestNeeded():
- with open(file_path, 'w') as manifest_file:
- self._WriteToFile(manifest_file)
- return True
- return False
-
- def _LoadLicenses(self):
- """Loads the licenses from the metadata server if they exist."""
- response = self._http.GetMetadata('instance/', recursive=True)
- instance_metadata = json.loads(response)
- if 'licenses' in instance_metadata:
- for license_obj in instance_metadata['licenses']:
- self._licenses.append(license_obj['id'])
-
- def _ToJson(self):
- """Formats the image metadata as a JSON object."""
- return json.dumps(
- {
- 'licenses': self._licenses
- })
-
- def _IsManifestNeeded(self):
- """Determines if a manifest should be bundled with the disk."""
- if self._licenses:
- return len(self._licenses)
- return False
-
- def _WriteToFile(self, file_obj):
- """Writes the manifest data to the file handle."""
- manifest_json = self._ToJson()
- file_obj.write(manifest_json)
diff --git a/legacy/gcimagebundle/gcimagebundlelib/opensuse.py b/legacy/gcimagebundle/gcimagebundlelib/opensuse.py
deleted file mode 100644
index 9f709ff..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/opensuse.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2013 SUSE LLC All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""openSUSE platform info."""
-
-from gcimagebundlelib import suse
-
-class OpenSUSE(suse.SUSE):
- """openSUSE platform info."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- return 'openSUSE' == suse.SUSE().distribution
-
- def __init__(self):
- super(OpenSUSE, self).__init__()
-
diff --git a/legacy/gcimagebundle/gcimagebundlelib/os_platform.py b/legacy/gcimagebundle/gcimagebundlelib/os_platform.py
deleted file mode 100644
index 65e6e7c..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/os_platform.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Base class for platform specific information."""
-
-
-class Platform(object):
- """Base class for platform information."""
- EXCLUDE_LIST = []
- OVERWRITE_LIST = []
-
- @staticmethod
- def IsThisPlatform(root='/'):
- return False
-
- def __init__(self):
- pass
-
- def GetName(self):
- """Generic name for the platform."""
- return 'Unknown'
-
- def GetProcessor(self):
- """Real processor."""
- return ''
-
- def GetArchitecture(self):
- """Returns machine architecture."""
- return ''
-
- def GetExcludeList(self):
- """Returns the default exclude list of the platform."""
- return self.__class__.EXCLUDE_LIST
-
- def GetOs(self):
- """Returns the name of OS."""
- return 'Unknown'
-
- def IsLinux(self):
- return False
-
- def IsWindows(self):
- return False
-
- def IsUnix(self):
- return False
-
- def GetOverwriteList(self):
- """Returns list of platform specific files to overwrite."""
- return self.__class__.OVERWRITE_LIST
-
- def Overwrite(self, file_path, file_name, scratch_dir):
- """Called for each file in the OverwriteList."""
- return file_name
-
- def GetPlatformSpecialFiles(self, tmpdir):
- """returns a list of platform special files that should be created."""
- return []
diff --git a/legacy/gcimagebundle/gcimagebundlelib/platform_factory.py b/legacy/gcimagebundle/gcimagebundlelib/platform_factory.py
deleted file mode 100644
index da63f0e..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/platform_factory.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Factory that guesses the correct platform and creates it."""
-
-import logging
-
-from gcimagebundlelib import centos
-from gcimagebundlelib import fedora
-from gcimagebundlelib import debian
-from gcimagebundlelib import gcel
-from gcimagebundlelib import opensuse
-from gcimagebundlelib import rhel
-from gcimagebundlelib import sle
-from gcimagebundlelib import ubuntu
-
-
-class UnknownPlatformException(Exception):
- """The platform could not be correctly determined."""
-
-
-class PlatformFactory(object):
- """Guess the platform and create it."""
-
- def __init__(self, root='/'):
- self.__root = root
- self.__registry = {}
- self.__platform_registry = {}
- self.Register('Centos', centos.Centos)
- self.Register('Fedora', fedora.Fedora)
- self.Register('Debian', debian.Debian)
- self.Register('GCEL', gcel.Gcel)
- self.Register('openSUSE', opensuse.OpenSUSE)
- self.Register('Red Hat Enterprise Linux', rhel.RHEL)
- self.Register('SUSE Linux Enterprise', sle.SLE)
- self.Register('Ubuntu', ubuntu.Ubuntu)
-
- def Register(self, name, klass):
- self.__registry[name] = klass
-
- def GetPlatform(self):
- for name in self.__registry:
- if self.__registry[name].IsThisPlatform(self.__root):
- logging.info('found platform %s', name)
- return self.__registry[name]()
- else:
- logging.debug('skipping platform %s %s ', name, self.__registry[name])
- raise UnknownPlatformException('Could not determine host platform.')
diff --git a/legacy/gcimagebundle/gcimagebundlelib/rhel.py b/legacy/gcimagebundle/gcimagebundlelib/rhel.py
deleted file mode 100644
index 9ebf1ef..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/rhel.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Red Hat Enterprise Linux Linux specific platform info."""
-
-
-
-import platform
-
-from gcimagebundlelib import linux
-
-
-class RHEL(linux.LinuxPlatform):
- """Red Hat Enterprise Linux specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- (distribution, _, _) = platform.linux_distribution()
- if distribution == 'Red Hat Enterprise Linux Server':
- return True
- return False
-
- def __init__(self):
- super(RHEL, self).__init__()
-
- def GetPreferredFilesystemType(self):
- (_,version,_) = platform.linux_distribution()
- if version.startswith('7'):
- return 'xfs'
- return 'ext4'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/sle.py b/legacy/gcimagebundle/gcimagebundlelib/sle.py
deleted file mode 100644
index 8b74827..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/sle.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2013 SUSE LLC All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""SUSE Linux Enterprise (SLE) platform info."""
-
-import re
-from gcimagebundlelib import suse
-
-class SLE(suse.SUSE):
- """SLE platform info."""
-
- @staticmethod
- def IsThisPlatform(self, root='/'):
- if re.match(r'SUSE Linux Enterprise', suse.SUSE().distribution):
- return True
- return False
-
- def __init__(self):
- super(SLE, self).__init__()
-
- def GetPreferredFilesystemType(self):
- return 'ext3'
diff --git a/legacy/gcimagebundle/gcimagebundlelib/suse.py b/legacy/gcimagebundle/gcimagebundlelib/suse.py
deleted file mode 100644
index 4911b8b..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/suse.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2013 SUSE LLC All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""openSUSE and SUSE generic platform info."""
-
-import os
-import re
-
-from gcimagebundlelib import linux
-
-
-class SUSE(linux.LinuxPlatform):
- """openSUSE and SUSE generic platform info."""
-
- def __init__(self):
- super(SUSE, self).__init__()
- self.distribution_codename = None
- self.ParseOSRelease()
- if not self.distribution:
- self.ParseSUSERelease()
- if not self.distribution:
- self.distribution = ''
-
- def ParseOSRelease(self):
- """Parse the /etc/os-release file."""
- release_file = '/etc/os-release'
- if not os.path.isfile(release_file):
- self.distribution = None
- return
- lines = open(release_file, 'r').readlines()
- for ln in lines:
- if not ln:
- continue
- if re.match(r'^NAME=', ln):
- self.distribution = self.__getData(ln)
- if re.match(r'^VERSION_ID=', ln):
- self.distribution_version = self.__getData(ln)
- if re.match(r'^VERSION=', ln):
- data = self.__getData(ln)
- self.distribution_codename = data.split('(')[-1][:-1]
- return
-
- def ParseSUSERelease(self):
- """Parse /etc/SuSE-release file."""
- release_file = '/etc/SuSE-release'
- if not os.path.isfile(release_file):
- self.distribution = None
- return
- lines = open(release_file, 'r').readlines()
- prts = lines[0].split()
- cnt = 0
- self.distribution = ''
- if len(prts):
- while 1:
- item = prts[cnt]
- if re.match('\d', item):
- item = None
- break
- elif cnt > 0:
- self.distribution += ' '
- self.distribution += item
- cnt += 1
-
- for ln in lines:
- if re.match(r'^VERSION =', ln):
- self.distribution_version = self.__getData(ln)
- if re.match(r'^CODENAME =', ln):
- self.distribution_codename = self.__getData(ln)
- return
-
- def __getData(self, ln):
- """Extract data from a line in a file. Either returns data inside the
- first double quotes ("a b"; a b in this example) or if no double
- quotes exist, returns the data after the first = sign. Leading
- and trailing whitspace are stripped."""
- if ln.find('"') != -1:
- return ln.split('"')[1]
- else:
- return ln.split('=')[-1].strip()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py b/legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py
deleted file mode 100644
index 42723d7..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unit tests module for Image Bundle."""
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py b/legacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py
deleted file mode 100755
index 1cbb384..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/block_disk_test.py
+++ /dev/null
@@ -1,512 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittest for block_disk.py module."""
-
-
-__pychecker__ = 'no-local' # for unittest
-
-from contextlib import closing
-import json
-import logging
-import os
-import random
-import subprocess
-import tarfile
-import tempfile
-import unittest
-import urllib2
-
-from gcimagebundlelib import block_disk
-from gcimagebundlelib import exclude_spec
-from gcimagebundlelib.tests import image_bundle_test_base
-from gcimagebundlelib import utils
-
-
-class FsRawDiskTest(image_bundle_test_base.ImageBundleTest):
- """FsRawDisk Unit Test."""
-
- _MEGABYTE = 1024*1024
- _GIGABYTE = 1024*_MEGABYTE
-
- def setUp(self):
- super(FsRawDiskTest, self).setUp()
- self._fs_size = 10* FsRawDiskTest._MEGABYTE
- self._bundle = block_disk.FsRawDisk(self._fs_size, 'ext4')
- self._tar_path = self.tmp_path + '/image.tar.gz'
- self._bundle.SetTarfile(self._tar_path)
- self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
- self._bundle.SetKey('key')
- self._bundle._SetManifest(self._manifest)
-
- def _SetupMbrDisk(self, partition_start, partition_end, fs_uuid):
- """Creates a disk with a fake MBR.
-
- Args:
- partition_start: The byte offset where the partition starts.
- partition_end: The byte offset where the partition ends.
- fs_uuid: The UUID of the filesystem to create on the partition.
-
- Returns:
- The path where the disk is located.
- """
- # Create the disk file with the size specified.
- disk_path = os.path.join(self.tmp_root, 'mbrdisk.raw')
- disk_size = partition_end + FsRawDiskTest._MEGABYTE
- with open(disk_path, 'wb') as disk_file:
- disk_file.truncate(disk_size)
-
- # Create a partition table
- utils.MakePartitionTable(disk_path)
-
- # Create the partition
- utils.MakePartition(disk_path, 'primary', 'ext2',
- partition_start, partition_end)
-
- # Create the file system
- with utils.LoadDiskImage(disk_path) as devices:
- utils.MakeFileSystem(devices[0], 'ext4', fs_uuid)
-
- # Write some data after the MBR but before the first partition
- with open(disk_path, 'r+b') as disk_file:
- # Seek to last two bytes of first sector
- disk_file.seek(510)
- # Write MBR signature
- disk_file.write(chr(0x55))
- disk_file.write(chr(0xAA))
- # Write random data on the disk till the point first partition starts
- for _ in range(partition_start - 512):
- # Write a byte
- disk_file.write(chr(random.randint(0, 127)))
-
- return disk_path
-
- def tearDown(self):
- super(FsRawDiskTest, self).tearDown()
-
- def testDiskBundle(self):
- """Tests bundle command when a disk is specified.
-
- Creates a 20Gb source disk to start with and verifies that creating
- a 10MB file off it works.
- """
- # Create a 20GB disk with first partition starting at 1MB
- self._TestDiskBundleHelper(FsRawDiskTest._MEGABYTE,
- FsRawDiskTest._GIGABYTE*20,
- utils.RunCommand(['uuidgen']).strip())
-
- def testDiskBundlePartitionAt2MB(self):
- """Tests bundle command when a disk is specified.
-
- Creates the first partition at 2MB and verifies all data prior to that is
- copied.
- """
- # Create a 20GB disk with first partition starting at 2MB
- self._TestDiskBundleHelper(FsRawDiskTest._MEGABYTE*2,
- FsRawDiskTest._GIGABYTE*20,
- utils.RunCommand(['uuidgen']).strip())
-
- def _TestDiskBundleHelper(self, partition_start, partition_end, fs_uuid):
- disk_path = self._SetupMbrDisk(partition_start, partition_end, fs_uuid)
-
- with utils.LoadDiskImage(disk_path) as devices:
- # Get the path to do the disk.
- # devices will have something which is like /dev/mapper/loop0p1
- # We need to get loop0 out of it.
- disk_loop_back_path = '/dev/' + devices[0].split('/')[3][:-2]
-
- # Create a symlinks to the disk and loopback paths
- # This is required because of the code where we assume first
- # partition is device path appended by 1. Will remove it once we
- # update that part of the code.
- symlink_disk = os.path.join(self.tmp_root, 'disk')
- symlink_partition = self.tmp_root + '/disk1'
- utils.RunCommand(['ln', '-s', disk_loop_back_path, symlink_disk])
- utils.RunCommand(['ln', '-s', devices[0], symlink_partition])
-
- # Bundle up
- self._bundle.AddDisk(symlink_disk)
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- (_, _) = self._bundle.Bundleup()
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
- self._VerifyDiskSize(self._tar_path, self._fs_size)
- self._VerifyNonPartitionContents(self._tar_path,
- disk_path,
- partition_start)
- self._VerifyFilesystemUUID(self._tar_path, fs_uuid)
-
- def testRawDisk(self):
- """Tests the regular operation. No expected error."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 2)
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 2)
-
- def testRawDiskIgnoresHardlinks(self):
- """Tests if the raw disk ignores hard links if asked."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.IgnoreHardLinks()
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11/', '/dir1/sl1', '/dir1/hl2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test1', 1)
- self._VerifyNumberOfHardLinksInRawDisk(self._tar_path, 'test2', 1)
-
- def testRawDiskIgnoresExcludes(self):
- """Tests if the raw disk ignores specified excludes files."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir2/', '/dir2/dir1',
- '/dir2/sl2', '/dir2/hl1'])
-
- def testRawDiskExcludePreservesSubdirs(self):
- """Tests if excludes preserves subdirs underneath if asked."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
- preserve_dir=True,
- preserve_subdir=True)])
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/',
- '/dir1/dir11', 'dir2/', '/dir2/dir1',
- '/dir2/sl2', '/dir2/hl1'])
-
- def testRawDiskExcludePreservesFiles(self):
- """Tests if excludes preserves the files underneath if asked."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1',
- preserve_dir=True,
- preserve_file=True)])
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir1/', '/dir1/hl2',
- '/dir1/sl1', 'dir2/', '/dir2/dir1', '/dir2/sl2',
- '/dir2/hl1'])
-
- def testRawDiskUsesModifiedFiles(self):
- """Tests if the raw disk uses modified files."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AppendExcludes(
- [exclude_spec.ExcludeSpec(self.tmp_path + '/dir1')])
- self._bundle.SetPlatform(image_bundle_test_base.MockPlatform(self.tmp_root))
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'test1', 'test2', 'dir2/',
- '/dir2/dir1', '/dir2/sl2', '/dir2/hl1'])
- self._VerifyFileInRawDiskEndsWith(self._tar_path, 'test1',
- 'something extra.')
-
- def testRawDiskGeneratesCorrectDigest(self):
- """Tests if the SHA1 digest generated is accurate."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- p = subprocess.Popen(['/usr/bin/openssl dgst -sha1 ' + self._tar_path],
- stdout=subprocess.PIPE, shell=True)
- file_digest = p.communicate()[0].split('=')[1].strip()
- self.assertEqual(digest, file_digest)
-
- def testRawDiskHonorsRecursiveOff(self):
- """Tests if raw disk handles recursive off."""
- self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
- self._bundle.AddSource(self.tmp_path + '/dir1',
- arcname='dir1', recursive=False)
- self._bundle.AddSource(self.tmp_path + '/dir2', arcname='dir2')
- self._bundle.Verify()
- (_, digest) = self._bundle.Bundleup()
- if not digest:
- self.fail('raw disk failed')
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
- self._VerifyImageHas(self._tar_path,
- ['lost+found', 'dir1/', 'dir2/', '/dir2/dir1',
- '/dir2/sl2', '/dir2/hl1'])
-
- def testSkipLicenseCheckIfNotOnGCE(self):
- """Tests that no licenses are loaded if gcimagebundle is not run on GCE."""
- class MockHttp(utils.Http):
- def Get(self, request, timeout=None):
- # if gcimagebundle is not run on GCE the metadata server will be unreachable
- raise urllib2.URLError("urlopen error timed out")
-
- self._http = MockHttp()
- self._manifest._http = self._http
- self._manifest._is_gce_instance = False
-
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- self.assertFalse(self._bundle._manifest._IsManifestNeeded())
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
-
- def testNoManifestCreatedWithZeroLicenses(self):
- """Tests that no manifest is created when there are 0 licenses."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- self.assertFalse(self._bundle._manifest._IsManifestNeeded())
- self._VerifyTarHas(self._tar_path, ['disk.raw'])
-
- def testManifestWithOneLicense(self):
- """Tests manifest is populated with 1 license."""
- self._http._instance_response = ('{"hostname":"test",'
- '"licenses":[{"id":"TEST-LICENSE"}]}')
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- manifest_json = self._bundle._manifest._ToJson()
- manifest_obj = json.loads(manifest_json)
- self.assertTrue(self._bundle._manifest._IsManifestNeeded())
- self.assertEqual(1, len(manifest_obj['licenses']))
- self.assertEqual('TEST-LICENSE', manifest_obj['licenses'][0])
- self._VerifyTarHas(self._tar_path, ['manifest.json', 'disk.raw'])
- self._VerifyFileContentsInTarball(self._tar_path,
- 'manifest.json',
- '{"licenses": ["TEST-LICENSE"]}')
-
- def testManifestWithTwoLicenses(self):
- """Tests manifest is populated with 2 licenses."""
- self._http._instance_response = ('{"hostname":"test",'
- '"licenses":[{"id":"TEST-1"},'
- '{"id":"TEST-2"}]}')
- self._bundle.AddSource(self.tmp_path)
- self._bundle.Verify()
- _ = self._bundle.Bundleup()
- manifest_json = self._bundle._manifest._ToJson()
- manifest_obj = json.loads(manifest_json)
- self.assertTrue(self._bundle._manifest._IsManifestNeeded())
- self.assertEqual(2, len(manifest_obj['licenses']))
- self.assertEqual('TEST-1', manifest_obj['licenses'][0])
- self.assertEqual('TEST-2', manifest_obj['licenses'][1])
- self._VerifyTarHas(self._tar_path, ['manifest.json', 'disk.raw'])
- self._VerifyFileContentsInTarball(self._tar_path,
- 'manifest.json',
- '{"licenses": ["TEST-1", "TEST-2"]}')
-
- def _VerifyFilesystemUUID(self, tar, expected_uuid):
- """Verifies UUID of the first partition on disk matches the value."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
-
- created_disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(created_disk_path) as devices:
- self.assertEqual(1, len(devices))
- self.assertEqual(expected_uuid, utils.GetUUID(devices[0]))
-
- def _VerifyNonPartitionContents(self, tar, disk_path, partition_start):
- """Verifies that bytes outside the partition are preserved."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- created_disk_path = os.path.join(tmp_dir, 'disk.raw')
-
- # Verify first parition in both disks starts at the same offset
- self.assertEqual(partition_start,
- utils.GetPartitionStart(disk_path, 1))
- self.assertEqual(partition_start,
- utils.GetPartitionStart(created_disk_path, 1))
- with open(disk_path, 'r') as source_file:
- with open(created_disk_path, 'r') as created_file:
- # Seek to 510'th byte in both streams and verify rest of the
- # bytes until the partition start are the same
- source_file.seek(510)
- created_file.seek(510)
- for i in range(partition_start - 510):
- self.assertEqual(source_file.read(1),
- created_file.read(1),
- 'byte at position %s not equal' % (i + 510))
-
- def _VerifyDiskSize(self, tar, expected_size):
- """Verifies that the disk file has the same size as expected."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- statinfo = os.stat(disk_path)
- self.assertEqual(expected_size, statinfo.st_size)
-
- def _VerifyImageHas(self, tar, expected):
- """Tests if raw disk contains an expected list of files/directories."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(disk_path) as devices:
- self.assertEqual(len(devices), 1)
- mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
- with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
- found = []
- for root, dirs, files in os.walk(mnt_dir):
- root = root.replace(mnt_dir, '')
- for f in files:
- found.append(os.path.join(root, f))
- for d in dirs:
- found.append(os.path.join(root, d))
- self._AssertListEqual(expected, found)
-
- def _VerifyFileContentsInTarball(self, tar, file_name, expected_content):
- """Reads the file from the tar file and turns it."""
- with closing(tarfile.open(tar)) as tar_file:
- content = tar_file.extractfile(file_name).read()
- self.assertEqual(content, expected_content)
-
- def _VerifyFileInRawDiskEndsWith(self, tar, filename, text):
- """Tests if a file on raw disk contains ends with a specified text."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(disk_path) as devices:
- self.assertEqual(len(devices), 1)
- mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
- with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
- f = open(os.path.join(mnt_dir, filename), 'r')
- file_content = f.read()
- f.close()
- self.assertTrue(file_content.endswith(text))
-
- def _VerifyNumberOfHardLinksInRawDisk(self, tar, filename, count):
- """Tests if a file on raw disk has a specified number of hard links."""
- tmp_dir = tempfile.mkdtemp(dir=self.tmp_root)
- tar_cmd = ['tar', '-xzf', tar, '-C', tmp_dir]
- self.assertEqual(subprocess.call(tar_cmd), 0)
- disk_path = os.path.join(tmp_dir, 'disk.raw')
- with utils.LoadDiskImage(disk_path) as devices:
- self.assertEqual(len(devices), 1)
- mnt_dir = tempfile.mkdtemp(dir=self.tmp_root)
- with utils.MountFileSystem(devices[0], mnt_dir, 'ext4'):
- self.assertEqual(os.stat(os.path.join(mnt_dir, filename)).st_nlink,
- count)
-
-
-class RootFsRawTest(image_bundle_test_base.ImageBundleTest):
- """RootFsRaw Unit Test."""
-
- def setUp(self):
- super(RootFsRawTest, self).setUp()
- self._bundle = block_disk.RootFsRaw(
- 10*1024*1024, 'ext4', False, self._MockStatvfs)
- self._tar_path = self.tmp_path + '/image.tar.gz'
- self._bundle.SetTarfile(self._tar_path)
- self._bundle.AppendExcludes([exclude_spec.ExcludeSpec(self._tar_path)])
- self._bundle._SetManifest(self._manifest)
-
- def tearDown(self):
- super(RootFsRawTest, self).tearDown()
-
- def testRootRawDiskVerifiesOneSource(self):
- """Tests that only one root directory is allowed."""
- self._bundle.AddSource(self.tmp_path)
- self._bundle.AddSource(self.tmp_path + '/dir1')
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError:
- return
- self.fail()
-
- def testRootRawDiskVerifiesRootDestination(self):
- """Tests that destination directory must be /."""
- self._bundle.AddSource(self.tmp_path, arcname='/tmp')
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError:
- return
- self.fail()
-
- def testRootRawDiskNotEnoughFreeSpace(self):
- """Tests that there is not enough disk space to complete the operation."""
- self._statvfs_map = {
- "/" : image_bundle_test_base.StatvfsResult(1024, 500, 100),
- "/tmp" : image_bundle_test_base.StatvfsResult(1024, 500, 100)
- }
- self._bundle.AddSource("/")
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError as e:
- print str(e)
- return
- self.fail()
-
- def testRootFilesExceedDiskSize(self):
- """Tests that source files may exceed the raw disk file size limit."""
- self._statvfs_map = {
- "/" : image_bundle_test_base.StatvfsResult(1024, 50000, 20000),
- "/tmp" : image_bundle_test_base.StatvfsResult(1024, 100000, 90000)
- }
- self._bundle.AddSource("/")
- self._bundle.SetKey('key')
- try:
- self._bundle.Verify()
- except block_disk.InvalidRawDiskError as e:
- print str(e)
- return
- self.fail()
-
- def _MockStatvfs(self, file_path):
- return self._statvfs_map[file_path]
-
-def main():
- logging.basicConfig(level=logging.DEBUG)
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py b/legacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py
deleted file mode 100755
index 37b7fae..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/image_bundle_test_base.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Base class for image_bundle unittests."""
-
-
-__pychecker__ = 'no-local' # for unittest
-
-
-import os
-import re
-import shutil
-import subprocess
-import tarfile
-import tempfile
-import unittest
-import urllib2
-
-from gcimagebundlelib import manifest
-from gcimagebundlelib.os_platform import Platform
-from gcimagebundlelib import utils
-
-
-class InvalidOverwriteFileException(Exception):
- """Invalid overwrite target was passed to MockPlatform.Overwrite method."""
-
-
-class MockPlatform(Platform):
- """Mock platform for image bundle unit tests."""
- OVERWRITE_LIST = ['test1']
-
- def __init__(self, tmp_root):
- super(MockPlatform, self).__init__()
- self.tmp_root = tmp_root
-
- def Overwrite(self, filename, arcname, tmpdir):
- temp = tempfile.mktemp(dir=tmpdir)
- if arcname != 'test1':
- raise InvalidOverwriteFileException(arcname)
- fd = open(temp, 'w')
- fd.write(open(filename).read())
- fd.write('something extra.')
- fd.close()
- return temp
-
-
-class MockHttp(utils.Http):
- """Fake implementation of the utils.Http client. Used for metadata queries."""
- def __init__(self):
- self._instance_response = '{"hostname":"test"}'
-
- def Get(self, request, timeout=None):
- """Accepts an Http request and returns a precanned response."""
- url = request.get_full_url()
- if url == utils.METADATA_URL_PREFIX:
- return 'v1/'
- elif url.startswith(utils.METADATA_V1_URL_PREFIX):
- url = url.replace(utils.METADATA_V1_URL_PREFIX, '')
- if url == 'instance/?recursive=true':
- return self._instance_response
- raise urllib2.HTTPError
-
-class StatvfsResult:
- """ A struct for partial os.statvfs result, used to mock the result. """
-
- def __init__(self, f_bsize, f_blocks, f_bfree):
- self.f_bsize = f_bsize
- self.f_blocks = f_blocks
- self.f_bfree = f_bfree
-
-class ImageBundleTest(unittest.TestCase):
- """ImageBundle Unit Test Base Class."""
-
- def setUp(self):
- self.tmp_root = tempfile.mkdtemp(dir='/tmp')
- self.tmp_path = tempfile.mkdtemp(dir=self.tmp_root)
- self._http = MockHttp()
- self._manifest = manifest.ImageManifest(http=self._http, is_gce_instance=True)
- self._SetupFilesystemToTar()
-
- def tearDown(self):
- self._CleanupFiles()
-
- def _SetupFilesystemToTar(self):
- """Creates some directory structure to tar."""
- if os.path.exists(self.tmp_path):
- shutil.rmtree(self.tmp_path)
- os.makedirs(self.tmp_path)
- with open(self.tmp_path + '/test1', 'w') as fd:
- print >> fd, 'some text'
- shutil.copyfile(self.tmp_path + '/test1', self.tmp_path + '/test2')
- os.makedirs(self.tmp_path + '/dir1')
- os.makedirs(self.tmp_path + '/dir1/dir11')
- os.makedirs(self.tmp_path + '/dir2')
- os.makedirs(self.tmp_path + '/dir2/dir1')
- os.symlink(self.tmp_path + '/test1', self.tmp_path + '/dir1/sl1')
- os.link(self.tmp_path + '/test2', self.tmp_path + '/dir1/hl2')
- os.symlink(self.tmp_path + '/test2', self.tmp_path + '/dir2/sl2')
- os.link(self.tmp_path + '/test1', self.tmp_path + '/dir2/hl1')
-
- def _CleanupFiles(self):
- """Removes the files under test directory."""
- if os.path.exists(self.tmp_root):
- shutil.rmtree(self.tmp_root)
-
- def _VerifyTarHas(self, tar, expected):
- p = subprocess.Popen(['tar -tf %s' % tar],
- stdout=subprocess.PIPE, shell=True)
- found = p.communicate()[0].split('\n')
- if './' in found:
- found.remove('./')
- if '' in found:
- found.remove('')
- self._AssertListEqual(expected, found)
-
- def _VerifyFileInTarEndsWith(self, tar, filename, text):
- tf = tarfile.open(tar, 'r:gz')
- fd = tf.extractfile(filename)
- file_content = fd.read()
- self.assertTrue(file_content.endswith(text))
-
- def _AssertListEqual(self, list1, list2):
- """Asserts that, when sorted, list1 and list2 are identical."""
- sorted_list1 = [re.sub(r'/$', '', x) for x in list1]
- sorted_list2 = [re.sub(r'/$', '', x) for x in list2]
- sorted_list1.sort()
- sorted_list2.sort()
- self.assertEqual(sorted_list1, sorted_list2)
diff --git a/legacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py b/legacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py
deleted file mode 100755
index dd7d2cd..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/tests/utils_test.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Unittest for utils.py module."""
-
-__pychecker__ = 'no-local' # for unittest
-
-import logging
-import subprocess
-import unittest
-import uuid
-
-from gcimagebundlelib import utils
-
-
-class ImageBundleTest(unittest.TestCase):
-
- def testRunCommand(self):
- """Run a simple command and verify it works."""
- utils.RunCommand(['ls', '/'])
-
- def testRunCommandThatFails(self):
- """Run a command that will fail and verify it raises the correct error."""
- def RunCommandUnderTest():
- non_existent_path = '/' + uuid.uuid4().hex
- utils.RunCommand(['mkfs', '-t', 'ext4', non_existent_path])
- self.assertRaises(subprocess.CalledProcessError, RunCommandUnderTest)
-
-
-def main():
- logging.basicConfig(level=logging.DEBUG)
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/legacy/gcimagebundle/gcimagebundlelib/ubuntu.py b/legacy/gcimagebundle/gcimagebundlelib/ubuntu.py
deleted file mode 100644
index 8d68687..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/ubuntu.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Ubuntu specific platform info."""
-
-import csv
-import os
-from gcimagebundlelib import linux
-
-
-class Ubuntu(linux.LinuxPlatform):
- """Ubuntu specific information."""
-
- @staticmethod
- def IsThisPlatform(root='/'):
- release_file = root + '/etc/lsb-release'
- if os.path.exists(release_file):
- (_, _, flavor, _) = Ubuntu.ParseLsbRelease(release_file)
- if flavor and flavor.lower() == 'ubuntu':
- return True
- return False
-
- @staticmethod
- def ParseLsbRelease(release_file='/etc/lsb-release'):
- """Parses the /etc/lsb-releases file."""
- release_info = {}
- for line in csv.reader(open(release_file), delimiter='='):
- if len(line) > 1:
- release_info[line[0]] = line[1]
- if ('DISTRIB_CODENAME' not in release_info or
- 'DISTRIB_DESCRIPTION' not in release_info or
- 'DISTRIB_ID' not in release_info or
- 'DISTRIB_RELEASE' not in release_info):
- return (None, None, None, None)
- return (release_info['DISTRIB_CODENAME'],
- release_info['DISTRIB_DESCRIPTION'],
- release_info['DISTRIB_ID'],
- release_info['DISTRIB_RELEASE'])
-
- def __init__(self):
- super(Ubuntu, self).__init__()
- (self.distribution_codename, _, self.distribution,
- self.distribution_version) = Ubuntu.ParseLsbRelease()
diff --git a/legacy/gcimagebundle/gcimagebundlelib/utils.py b/legacy/gcimagebundle/gcimagebundlelib/utils.py
deleted file mode 100644
index a8fde40..0000000
--- a/legacy/gcimagebundle/gcimagebundlelib/utils.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Utilities for image bundling tool."""
-
-import logging
-import os
-import subprocess
-import time
-import urllib2
-
-METADATA_URL_PREFIX = 'http://169.254.169.254/computeMetadata/'
-METADATA_V1_URL_PREFIX = METADATA_URL_PREFIX + 'v1/'
-
-
-class MakeFileSystemException(Exception):
- """Error occurred in file system creation."""
-
-
-class TarAndGzipFileException(Exception):
- """Error occurred in creating the tarball."""
-
-
-class LoadDiskImage(object):
- """Loads raw disk image using kpartx."""
-
- def __init__(self, file_path):
- """Initializes LoadDiskImage object.
-
- Args:
- file_path: a path to a file containing raw disk image.
-
- Returns:
- A list of devices for every partition found in an image.
- """
- self._file_path = file_path
-
- def __enter__(self):
- """Map disk image as a device."""
- SyncFileSystem()
- kpartx_cmd = ['kpartx', '-a', '-v', '-s', self._file_path]
- output = RunCommand(kpartx_cmd)
- devs = []
- for line in output.splitlines():
- split_line = line.split()
- if (len(split_line) > 2 and split_line[0] == 'add'
- and split_line[1] == 'map'):
- devs.append('/dev/mapper/' + split_line[2])
- time.sleep(2)
- return devs
-
- def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
- """Unmap disk image as a device.
-
- Args:
- unused_exc_type: unused.
- unused_exc_value: unused.
- unused_exc_tb: unused.
- """
- SyncFileSystem()
- time.sleep(2)
- kpartx_cmd = ['kpartx', '-d', '-v', '-s', self._file_path]
- RunCommand(kpartx_cmd)
-
-
-class MountFileSystem(object):
- """Mounts a file system."""
-
- def __init__(self, dev_path, dir_path, fs_type):
- """Initializes MountFileSystem object.
-
- Args:
- dev_path: A path to a device to mount.
- dir_path: A path to a directory where a device is to be mounted.
- """
- self._dev_path = dev_path
- self._dir_path = dir_path
- self._fs_type = fs_type
-
- def __enter__(self):
- """Mounts a device.
- """
- # Since the bundled image can have the same uuid as the root disk,
- # we should prevent uuid conflicts for xfs mounts.
- if self._fs_type is 'xfs':
- mount_cmd = ['mount', '-o', 'nouuid', self._dev_path, self._dir_path]
- else:
- mount_cmd = ['mount', self._dev_path, self._dir_path]
- RunCommand(mount_cmd)
-
- def __exit__(self, unused_exc_type, unused_exc_value, unused_exc_tb):
- """Unmounts a file system.
-
- Args:
- unused_exc_type: unused.
- unused_exc_value: unused.
- unused_exc_tb: unused.
- """
- umount_cmd = ['umount', self._dir_path]
- RunCommand(umount_cmd)
- SyncFileSystem()
-
-
-def SyncFileSystem():
- RunCommand(['sync'])
-
-def GetMounts(root='/'):
- """Find all mount points under the specified root.
-
- Args:
- root: a path to look for a mount points.
-
- Returns:
- A list of mount points.
- """
- output = RunCommand(['/bin/mount', '-l'])
- mounts = []
- for line in output.splitlines():
- split_line = line.split()
- mount_point = split_line[2]
- if mount_point == root:
- continue
- # We are simply ignoring the fs_type of fs for now. But we can use that
- # later Just verify that these are actually mount points.
- if os.path.ismount(mount_point) and mount_point.startswith(root):
- mounts.append(mount_point)
- return mounts
-
-
-def MakePartitionTable(file_path):
- """Create a partition table in a file.
-
- Args:
- file_path: A path to a file where a partition table will be created.
- """
- RunCommand(['parted', file_path, 'mklabel', 'msdos'])
-
-
-def MakePartition(file_path, partition_type, fs_type, start, end):
- """Create a partition in a file.
-
- Args:
- file_path: A path to a file where a partition will be created.
- partition_type: A type of a partition to be created. Tested option is msdos.
- fs_type: A type of a file system to be created. For example, ext2, ext3,
- etc.
- start: Start offset of a partition in bytes.
- end: End offset of a partition in bytes.
- """
- parted_cmd = ['parted', file_path, 'unit B', 'mkpart', partition_type,
- fs_type, str(start), str(end)]
- RunCommand(parted_cmd)
-
-
-def MakeFileSystem(dev_path, fs_type, uuid=None):
- """Create a file system in a device.
-
- Args:
- dev_path: A path to a device.
- fs_type: A type of a file system to be created. For example ext2, ext3, etc.
- uuid: The value to use as the UUID for the filesystem. If none, a random
- UUID will be generated and used.
-
- Returns:
- The uuid of the filesystem. This will be the same as the passed value if
- a value was specified. If no uuid was passed in, this will be the randomly
- generated uuid.
-
- Raises:
- MakeFileSystemException: If mkfs encounters an error.
- """
- if uuid is None:
- uuid = RunCommand(['uuidgen']).strip()
- if uuid is None:
- raise MakeFileSystemException(dev_path)
-
- mkfs_cmd = ['mkfs', '-t', fs_type, dev_path]
- RunCommand(mkfs_cmd)
-
- if fs_type is 'xfs':
- set_uuid_cmd = ['xfs_admin', '-U', uuid, dev_path]
- else:
- set_uuid_cmd = ['tune2fs', '-U', uuid, dev_path]
- RunCommand(set_uuid_cmd)
-
- return uuid
-
-
-def Rsync(src, dest, exclude_file, ignore_hard_links, recursive, xattrs):
- """Copy files from specified directory using rsync.
-
- Args:
- src: Source location to copy.
- dest: Destination to copy files to.
- exclude_file: A path to a file which contains a list of exclude from copy
- filters.
- ignore_hard_links: If True a hard links are copied as a separate files. If
- False, hard link are recreated in dest.
- recursive: Specifies if directories are copied recursively or not.
- xattrs: Specifies if extended attributes are preserved or not.
- """
- rsync_cmd = ['rsync', '--times', '--perms', '--owner', '--group', '--links',
- '--devices', '--acls', '--sparse']
- if not ignore_hard_links:
- rsync_cmd.append('--hard-links')
- if recursive:
- rsync_cmd.append('--recursive')
- else:
- rsync_cmd.append('--dirs')
- if xattrs:
- rsync_cmd.append('--xattrs')
- if exclude_file:
- rsync_cmd.append('--exclude-from=' + exclude_file)
- rsync_cmd.extend([src, dest])
-
- logging.debug('Calling: %s', repr(rsync_cmd))
- if exclude_file:
- logging.debug('Contents of exclude file %s:', exclude_file)
- with open(exclude_file, 'rb') as excludes:
- for line in excludes:
- logging.debug(' %s', line.rstrip())
-
- RunCommand(rsync_cmd)
-
-
-def GetUUID(partition_path):
- """Fetches the UUID of the filesystem on the specified partition.
-
- Args:
- partition_path: The path to the partition.
-
- Returns:
- The uuid of the filesystem.
- """
- output = RunCommand(['blkid', partition_path])
- for token in output.split():
- if token.startswith('UUID='):
- uuid = token.strip()[len('UUID="'):-1]
-
- logging.debug('found uuid = %s', uuid)
- return uuid
-
-
-def CopyBytes(src, dest, count):
- """Copies count bytes from the src to dest file.
-
- Args:
- src: The source to read bytes from.
- dest: The destination to copy bytes to.
- count: Number of bytes to copy.
- """
- block_size = 4096
- block_count = count / block_size
- dd_command = ['dd',
- 'if=%s' % src,
- 'of=%s' % dest,
- 'conv=notrunc',
- 'bs=%s' % block_size,
- 'count=%s' % block_count]
- RunCommand(dd_command)
- remaining_bytes = count - block_count * block_size
- if remaining_bytes:
- logging.debug('remaining bytes to copy = %s', remaining_bytes)
- dd_command = ['dd',
- 'if=%s' % src,
- 'of=%s' % dest,
- 'seek=%s' % block_count,
- 'skip=%s' % block_count,
- 'conv=notrunc',
- 'bs=1',
- 'count=%s' % remaining_bytes]
- RunCommand(dd_command)
-
-
-def GetPartitionStart(disk_path, partition_number):
- """Returns the starting position in bytes of the partition.
-
- Args:
- disk_path: The path to disk device.
- partition_number: The partition number to lookup. 1 based.
-
- Returns:
- The starting position of the first partition in bytes.
-
- Raises:
- subprocess.CalledProcessError: If running parted fails.
- IndexError: If there is no partition at the given number.
- """
- parted_cmd = ['parted',
- disk_path,
- 'unit B',
- 'print']
- # In case the device is not valid and parted throws the retry/cancel prompt
- # write c to stdin.
- output = RunCommand(parted_cmd, input_str='c')
- for line in output.splitlines():
- split_line = line.split()
- if len(split_line) > 4 and split_line[0] == str(partition_number):
- return int(split_line[1][:-1])
- raise IndexError()
-
-
-def RemovePartition(disk_path, partition_number):
- """Removes the partition number from the disk.
-
- Args:
- disk_path: The disk to remove the partition from.
- partition_number: The partition number to remove.
- """
- parted_cmd = ['parted',
- disk_path,
- 'rm',
- str(partition_number)]
- # In case the device is not valid and parted throws the retry/cancel prompt
- # write c to stdin.
- RunCommand(parted_cmd, input_str='c')
-
-
-def GetDiskSize(disk_file):
- """Returns the size of the disk device in bytes.
-
- Args:
- disk_file: The full path to the disk device.
-
- Returns:
- The size of the disk device in bytes.
-
- Raises:
- subprocess.CalledProcessError: If fdisk command fails for the disk file.
- """
- output = RunCommand(['fdisk', '-s', disk_file])
- return int(output) * 1024
-
-
-def RunCommand(command, input_str=None):
- """Runs the command and returns the output printed on stdout.
-
- Args:
- command: The command to run.
- input_str: The input to pass to subprocess via stdin.
-
- Returns:
- The stdout from running the command.
-
- Raises:
- subprocess.CalledProcessError: if the command fails.
- """
- logging.debug('running %s with input=%s', command, input_str)
- p = subprocess.Popen(command, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- cmd_output = p.communicate(input_str)
- logging.debug('stdout %s', cmd_output[0])
- logging.debug('stderr %s', cmd_output[1])
- logging.debug('returncode %s', p.returncode)
- if p.returncode:
- logging.warning('Error while running %s return_code = %s\n'
- 'stdout=%s\nstderr=%s',
- command, p.returncode, cmd_output[0],
- cmd_output[1])
- raise subprocess.CalledProcessError(p.returncode,
- cmd=command)
- return cmd_output[0]
-
-
-def TarAndGzipFile(src_paths, dest):
- """Pack file in tar archive and optionally gzip it.
-
- Args:
- src_paths: A list of files that will be archived.
- (Must be in the same directory.)
- dest: An archive name. If a file ends with .gz or .tgz an archive is gzipped
- as well.
-
- Raises:
- TarAndGzipFileException: If tar encounters an error.
- """
- if dest.endswith('.gz') or dest.endswith('.tgz'):
- mode = 'czSf'
- else:
- mode = 'cSf'
- src_names = [os.path.basename(src_path) for src_path in src_paths]
- # Take the directory of the first file in the list, all files are expected
- # to be in the same directory.
- src_dir = os.path.dirname(src_paths[0])
- tar_cmd = ['tar', mode, dest, '-C', src_dir] + src_names
- retcode = subprocess.call(tar_cmd)
- if retcode:
- raise TarAndGzipFileException(','.join(src_paths))
-
-
-class Http(object):
- def Get(self, request, timeout=None):
- return urllib2.urlopen(request, timeout=timeout).read()
-
- def GetMetadata(self, url_path, recursive=False, timeout=None):
- """Retrieves instance metadata.
-
- Args:
- url_path: The path of the metadata url after the api version.
- http://169.254.169.254/computeMetadata/v1/url_path
- recursive: If set, returns the tree of metadata starting at url_path as
- a json string.
- timeout: How long to wait for blocking operations (in seconds).
- A value of None uses urllib2's default timeout.
- Returns:
- The metadata returned based on the url path.
-
- """
- # Use the latest version of the metadata.
- suffix = ''
- if recursive:
- suffix = '?recursive=true'
- url = '{0}{1}{2}'.format(METADATA_V1_URL_PREFIX, url_path, suffix)
- request = urllib2.Request(url)
- request.add_unredirected_header('Metadata-Flavor', 'Google')
- return self.Get(request, timeout=timeout)
-
-
-def IsRunningOnGCE():
- """Detect if we are running on GCE.
-
- Returns:
- True if we are running on GCE, False otherwise.
- """
- # Try accessing DMI/SMBIOS informations through dmidecode first
- try:
- dmidecode_cmd = ['dmidecode', '-s', 'bios-vendor']
- output = RunCommand(dmidecode_cmd)
- return 'Google' in output
- except subprocess.CalledProcessError:
- # We fail if dmidecode doesn't exist or we have insufficient privileges
- pass
-
- # If dmidecode is not working, fallback to contacting the metadata server
- try:
- Http().GetMetadata('instance/id', timeout=1)
- return True
- except urllib2.HTTPError as e:
- logging.warning('HTTP error: %s (http status code=%s)' % (e.reason, e.code))
- except urllib2.URLError as e:
- logging.warning('Cannot reach metadata server: %s' % e.reason)
-
- return False
diff --git a/legacy/gcimagebundle/setup.py b/legacy/gcimagebundle/setup.py
deleted file mode 100755
index 76ccd04..0000000
--- a/legacy/gcimagebundle/setup.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright 2013 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Setup installation module for Image Bundle."""
-
-import os
-import distribute_setup
-distribute_setup.use_setuptools()
-
-from setuptools import find_packages
-from setuptools import setup
-
-CURDIR = os.path.abspath(os.path.dirname(__file__))
-
-def Read(file_name):
- with open(os.path.join(CURDIR, file_name), 'r') as f:
- return f.read().strip()
-
-setup(
- name='gcimagebundle',
- version=Read('VERSION'),
- url='https://github.com/GoogleCloudPlatform/compute-image-packages/tree/master/image-bundle',
- download_url='https://github.com/GoogleCloudPlatform/compute-image-packages/releases',
- license='Apache 2.0',
- author='Google Inc.',
- author_email='gc-team@google.com',
- description=('Image bundling tool for root file system.'),
- long_description=Read('README.md'),
- zip_safe=False,
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'Intended Audience :: System Administrators',
- 'License :: OSI Approved :: Apache Software License',
- 'Natural Language :: English',
- 'Topic :: System :: Filesystems',
- 'Topic :: Utilities',
- ],
- platforms='any',
- include_package_data=True,
- packages=find_packages(exclude=['distribute_setup']),
- scripts=['gcimagebundle'],
- test_suite='gcimagebundlelib.tests',
-)
diff --git a/legacy/gcimagebundle/stdeb.cfg b/legacy/gcimagebundle/stdeb.cfg
deleted file mode 100644
index 09364a3..0000000
--- a/legacy/gcimagebundle/stdeb.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[DEFAULT]
-Depends: kpartx, parted, rsync, uuid-runtime
-XS-Python-Version: >= 2.6
diff --git a/unit-tests/travis-run-tests.sh b/unit-tests/travis-run-tests.sh
deleted file mode 100644
index e8f3c52..0000000
--- a/unit-tests/travis-run-tests.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-# This is just a stub script for now. Unit tests will be placed in this directory and
-# run by this script.
-exit 0 \ No newline at end of file